sctputil.c revision 294153
1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/10/sys/netinet/sctputil.c 294153 2016-01-16 14:50:43Z tuexen $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_pcb.h>
38#include <netinet/sctputil.h>
39#include <netinet/sctp_var.h>
40#include <netinet/sctp_sysctl.h>
41#ifdef INET6
42#include <netinet6/sctp6_var.h>
43#endif
44#include <netinet/sctp_header.h>
45#include <netinet/sctp_output.h>
46#include <netinet/sctp_uio.h>
47#include <netinet/sctp_timer.h>
48#include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49#include <netinet/sctp_auth.h>
50#include <netinet/sctp_asconf.h>
51#include <netinet/sctp_bsd_addr.h>
52#include <netinet/udp.h>
53#include <netinet/udp_var.h>
54#include <sys/proc.h>
55
56
57#ifndef KTR_SCTP
58#define KTR_SCTP KTR_SUBSYS
59#endif
60
61extern struct sctp_cc_functions sctp_cc_functions[];
62extern struct sctp_ss_functions sctp_ss_functions[];
63
64void
65sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
66{
67	struct sctp_cwnd_log sctp_clog;
68
69	sctp_clog.x.sb.stcb = stcb;
70	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
71	if (stcb)
72		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
73	else
74		sctp_clog.x.sb.stcb_sbcc = 0;
75	sctp_clog.x.sb.incr = incr;
76	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
77	    SCTP_LOG_EVENT_SB,
78	    from,
79	    sctp_clog.x.misc.log1,
80	    sctp_clog.x.misc.log2,
81	    sctp_clog.x.misc.log3,
82	    sctp_clog.x.misc.log4);
83}
84
85void
86sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
87{
88	struct sctp_cwnd_log sctp_clog;
89
90	sctp_clog.x.close.inp = (void *)inp;
91	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
92	if (stcb) {
93		sctp_clog.x.close.stcb = (void *)stcb;
94		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
95	} else {
96		sctp_clog.x.close.stcb = 0;
97		sctp_clog.x.close.state = 0;
98	}
99	sctp_clog.x.close.loc = loc;
100	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
101	    SCTP_LOG_EVENT_CLOSE,
102	    0,
103	    sctp_clog.x.misc.log1,
104	    sctp_clog.x.misc.log2,
105	    sctp_clog.x.misc.log3,
106	    sctp_clog.x.misc.log4);
107}
108
109void
110rto_logging(struct sctp_nets *net, int from)
111{
112	struct sctp_cwnd_log sctp_clog;
113
114	memset(&sctp_clog, 0, sizeof(sctp_clog));
115	sctp_clog.x.rto.net = (void *)net;
116	sctp_clog.x.rto.rtt = net->rtt / 1000;
117	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118	    SCTP_LOG_EVENT_RTT,
119	    from,
120	    sctp_clog.x.misc.log1,
121	    sctp_clog.x.misc.log2,
122	    sctp_clog.x.misc.log3,
123	    sctp_clog.x.misc.log4);
124}
125
126void
127sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128{
129	struct sctp_cwnd_log sctp_clog;
130
131	sctp_clog.x.strlog.stcb = stcb;
132	sctp_clog.x.strlog.n_tsn = tsn;
133	sctp_clog.x.strlog.n_sseq = sseq;
134	sctp_clog.x.strlog.e_tsn = 0;
135	sctp_clog.x.strlog.e_sseq = 0;
136	sctp_clog.x.strlog.strm = stream;
137	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138	    SCTP_LOG_EVENT_STRM,
139	    from,
140	    sctp_clog.x.misc.log1,
141	    sctp_clog.x.misc.log2,
142	    sctp_clog.x.misc.log3,
143	    sctp_clog.x.misc.log4);
144}
145
146void
147sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148{
149	struct sctp_cwnd_log sctp_clog;
150
151	sctp_clog.x.nagle.stcb = (void *)stcb;
152	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157	    SCTP_LOG_EVENT_NAGLE,
158	    action,
159	    sctp_clog.x.misc.log1,
160	    sctp_clog.x.misc.log2,
161	    sctp_clog.x.misc.log3,
162	    sctp_clog.x.misc.log4);
163}
164
165void
166sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167{
168	struct sctp_cwnd_log sctp_clog;
169
170	sctp_clog.x.sack.cumack = cumack;
171	sctp_clog.x.sack.oldcumack = old_cumack;
172	sctp_clog.x.sack.tsn = tsn;
173	sctp_clog.x.sack.numGaps = gaps;
174	sctp_clog.x.sack.numDups = dups;
175	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176	    SCTP_LOG_EVENT_SACK,
177	    from,
178	    sctp_clog.x.misc.log1,
179	    sctp_clog.x.misc.log2,
180	    sctp_clog.x.misc.log3,
181	    sctp_clog.x.misc.log4);
182}
183
184void
185sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186{
187	struct sctp_cwnd_log sctp_clog;
188
189	memset(&sctp_clog, 0, sizeof(sctp_clog));
190	sctp_clog.x.map.base = map;
191	sctp_clog.x.map.cum = cum;
192	sctp_clog.x.map.high = high;
193	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194	    SCTP_LOG_EVENT_MAP,
195	    from,
196	    sctp_clog.x.misc.log1,
197	    sctp_clog.x.misc.log2,
198	    sctp_clog.x.misc.log3,
199	    sctp_clog.x.misc.log4);
200}
201
202void
203sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
204{
205	struct sctp_cwnd_log sctp_clog;
206
207	memset(&sctp_clog, 0, sizeof(sctp_clog));
208	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210	sctp_clog.x.fr.tsn = tsn;
211	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212	    SCTP_LOG_EVENT_FR,
213	    from,
214	    sctp_clog.x.misc.log1,
215	    sctp_clog.x.misc.log2,
216	    sctp_clog.x.misc.log3,
217	    sctp_clog.x.misc.log4);
218}
219
220#ifdef SCTP_MBUF_LOGGING
221void
222sctp_log_mb(struct mbuf *m, int from)
223{
224	struct sctp_cwnd_log sctp_clog;
225
226	sctp_clog.x.mb.mp = m;
227	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
230	if (SCTP_BUF_IS_EXTENDED(m)) {
231		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233	} else {
234		sctp_clog.x.mb.ext = 0;
235		sctp_clog.x.mb.refcnt = 0;
236	}
237	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
238	    SCTP_LOG_EVENT_MBUF,
239	    from,
240	    sctp_clog.x.misc.log1,
241	    sctp_clog.x.misc.log2,
242	    sctp_clog.x.misc.log3,
243	    sctp_clog.x.misc.log4);
244}
245
246void
247sctp_log_mbc(struct mbuf *m, int from)
248{
249	struct mbuf *mat;
250
251	for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) {
252		sctp_log_mb(mat, from);
253	}
254}
255
256#endif
257
258void
259sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
260{
261	struct sctp_cwnd_log sctp_clog;
262
263	if (control == NULL) {
264		SCTP_PRINTF("Gak log of NULL?\n");
265		return;
266	}
267	sctp_clog.x.strlog.stcb = control->stcb;
268	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
269	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
270	sctp_clog.x.strlog.strm = control->sinfo_stream;
271	if (poschk != NULL) {
272		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
273		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
274	} else {
275		sctp_clog.x.strlog.e_tsn = 0;
276		sctp_clog.x.strlog.e_sseq = 0;
277	}
278	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
279	    SCTP_LOG_EVENT_STRM,
280	    from,
281	    sctp_clog.x.misc.log1,
282	    sctp_clog.x.misc.log2,
283	    sctp_clog.x.misc.log3,
284	    sctp_clog.x.misc.log4);
285}
286
287void
288sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
289{
290	struct sctp_cwnd_log sctp_clog;
291
292	sctp_clog.x.cwnd.net = net;
293	if (stcb->asoc.send_queue_cnt > 255)
294		sctp_clog.x.cwnd.cnt_in_send = 255;
295	else
296		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
297	if (stcb->asoc.stream_queue_cnt > 255)
298		sctp_clog.x.cwnd.cnt_in_str = 255;
299	else
300		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
301
302	if (net) {
303		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
304		sctp_clog.x.cwnd.inflight = net->flight_size;
305		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
306		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
307		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
308	}
309	if (SCTP_CWNDLOG_PRESEND == from) {
310		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
311	}
312	sctp_clog.x.cwnd.cwnd_augment = augment;
313	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
314	    SCTP_LOG_EVENT_CWND,
315	    from,
316	    sctp_clog.x.misc.log1,
317	    sctp_clog.x.misc.log2,
318	    sctp_clog.x.misc.log3,
319	    sctp_clog.x.misc.log4);
320}
321
322void
323sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324{
325	struct sctp_cwnd_log sctp_clog;
326
327	memset(&sctp_clog, 0, sizeof(sctp_clog));
328	if (inp) {
329		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330
331	} else {
332		sctp_clog.x.lock.sock = (void *)NULL;
333	}
334	sctp_clog.x.lock.inp = (void *)inp;
335	if (stcb) {
336		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337	} else {
338		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339	}
340	if (inp) {
341		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343	} else {
344		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346	}
347	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348	if (inp && (inp->sctp_socket)) {
349		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352	} else {
353		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356	}
357	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358	    SCTP_LOG_LOCK_EVENT,
359	    from,
360	    sctp_clog.x.misc.log1,
361	    sctp_clog.x.misc.log2,
362	    sctp_clog.x.misc.log3,
363	    sctp_clog.x.misc.log4);
364}
365
366void
367sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
368{
369	struct sctp_cwnd_log sctp_clog;
370
371	memset(&sctp_clog, 0, sizeof(sctp_clog));
372	sctp_clog.x.cwnd.net = net;
373	sctp_clog.x.cwnd.cwnd_new_value = error;
374	sctp_clog.x.cwnd.inflight = net->flight_size;
375	sctp_clog.x.cwnd.cwnd_augment = burst;
376	if (stcb->asoc.send_queue_cnt > 255)
377		sctp_clog.x.cwnd.cnt_in_send = 255;
378	else
379		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
380	if (stcb->asoc.stream_queue_cnt > 255)
381		sctp_clog.x.cwnd.cnt_in_str = 255;
382	else
383		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
384	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
385	    SCTP_LOG_EVENT_MAXBURST,
386	    from,
387	    sctp_clog.x.misc.log1,
388	    sctp_clog.x.misc.log2,
389	    sctp_clog.x.misc.log3,
390	    sctp_clog.x.misc.log4);
391}
392
393void
394sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
395{
396	struct sctp_cwnd_log sctp_clog;
397
398	sctp_clog.x.rwnd.rwnd = peers_rwnd;
399	sctp_clog.x.rwnd.send_size = snd_size;
400	sctp_clog.x.rwnd.overhead = overhead;
401	sctp_clog.x.rwnd.new_rwnd = 0;
402	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
403	    SCTP_LOG_EVENT_RWND,
404	    from,
405	    sctp_clog.x.misc.log1,
406	    sctp_clog.x.misc.log2,
407	    sctp_clog.x.misc.log3,
408	    sctp_clog.x.misc.log4);
409}
410
411void
412sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
413{
414	struct sctp_cwnd_log sctp_clog;
415
416	sctp_clog.x.rwnd.rwnd = peers_rwnd;
417	sctp_clog.x.rwnd.send_size = flight_size;
418	sctp_clog.x.rwnd.overhead = overhead;
419	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
420	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
421	    SCTP_LOG_EVENT_RWND,
422	    from,
423	    sctp_clog.x.misc.log1,
424	    sctp_clog.x.misc.log2,
425	    sctp_clog.x.misc.log3,
426	    sctp_clog.x.misc.log4);
427}
428
429#ifdef SCTP_MBCNT_LOGGING
430static void
431sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
432{
433	struct sctp_cwnd_log sctp_clog;
434
435	sctp_clog.x.mbcnt.total_queue_size = total_oq;
436	sctp_clog.x.mbcnt.size_change = book;
437	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
438	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
439	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440	    SCTP_LOG_EVENT_MBCNT,
441	    from,
442	    sctp_clog.x.misc.log1,
443	    sctp_clog.x.misc.log2,
444	    sctp_clog.x.misc.log3,
445	    sctp_clog.x.misc.log4);
446}
447
448#endif
449
450void
451sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452{
453	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454	    SCTP_LOG_MISC_EVENT,
455	    from,
456	    a, b, c, d);
457}
458
459void
460sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
461{
462	struct sctp_cwnd_log sctp_clog;
463
464	sctp_clog.x.wake.stcb = (void *)stcb;
465	sctp_clog.x.wake.wake_cnt = wake_cnt;
466	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469
470	if (stcb->asoc.stream_queue_cnt < 0xff)
471		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472	else
473		sctp_clog.x.wake.stream_qcnt = 0xff;
474
475	if (stcb->asoc.chunks_on_out_queue < 0xff)
476		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477	else
478		sctp_clog.x.wake.chunks_on_oque = 0xff;
479
480	sctp_clog.x.wake.sctpflags = 0;
481	/* set in the defered mode stuff */
482	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483		sctp_clog.x.wake.sctpflags |= 1;
484	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485		sctp_clog.x.wake.sctpflags |= 2;
486	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487		sctp_clog.x.wake.sctpflags |= 4;
488	/* what about the sb */
489	if (stcb->sctp_socket) {
490		struct socket *so = stcb->sctp_socket;
491
492		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493	} else {
494		sctp_clog.x.wake.sbflags = 0xff;
495	}
496	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497	    SCTP_LOG_EVENT_WAKE,
498	    from,
499	    sctp_clog.x.misc.log1,
500	    sctp_clog.x.misc.log2,
501	    sctp_clog.x.misc.log3,
502	    sctp_clog.x.misc.log4);
503}
504
505void
506sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
507{
508	struct sctp_cwnd_log sctp_clog;
509
510	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
511	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
512	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
513	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
514	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
515	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
516	sctp_clog.x.blk.sndlen = sendlen;
517	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
518	    SCTP_LOG_EVENT_BLOCK,
519	    from,
520	    sctp_clog.x.misc.log1,
521	    sctp_clog.x.misc.log2,
522	    sctp_clog.x.misc.log3,
523	    sctp_clog.x.misc.log4);
524}
525
526int
527sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
528{
529	/* May need to fix this if ktrdump does not work */
530	return (0);
531}
532
533#ifdef SCTP_AUDITING_ENABLED
534uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
535static int sctp_audit_indx = 0;
536
537static
538void
539sctp_print_audit_report(void)
540{
541	int i;
542	int cnt;
543
544	cnt = 0;
545	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
546		if ((sctp_audit_data[i][0] == 0xe0) &&
547		    (sctp_audit_data[i][1] == 0x01)) {
548			cnt = 0;
549			SCTP_PRINTF("\n");
550		} else if (sctp_audit_data[i][0] == 0xf0) {
551			cnt = 0;
552			SCTP_PRINTF("\n");
553		} else if ((sctp_audit_data[i][0] == 0xc0) &&
554		    (sctp_audit_data[i][1] == 0x01)) {
555			SCTP_PRINTF("\n");
556			cnt = 0;
557		}
558		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
559		    (uint32_t) sctp_audit_data[i][1]);
560		cnt++;
561		if ((cnt % 14) == 0)
562			SCTP_PRINTF("\n");
563	}
564	for (i = 0; i < sctp_audit_indx; i++) {
565		if ((sctp_audit_data[i][0] == 0xe0) &&
566		    (sctp_audit_data[i][1] == 0x01)) {
567			cnt = 0;
568			SCTP_PRINTF("\n");
569		} else if (sctp_audit_data[i][0] == 0xf0) {
570			cnt = 0;
571			SCTP_PRINTF("\n");
572		} else if ((sctp_audit_data[i][0] == 0xc0) &&
573		    (sctp_audit_data[i][1] == 0x01)) {
574			SCTP_PRINTF("\n");
575			cnt = 0;
576		}
577		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
578		    (uint32_t) sctp_audit_data[i][1]);
579		cnt++;
580		if ((cnt % 14) == 0)
581			SCTP_PRINTF("\n");
582	}
583	SCTP_PRINTF("\n");
584}
585
586void
587sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
588    struct sctp_nets *net)
589{
590	int resend_cnt, tot_out, rep, tot_book_cnt;
591	struct sctp_nets *lnet;
592	struct sctp_tmit_chunk *chk;
593
594	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
595	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
596	sctp_audit_indx++;
597	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598		sctp_audit_indx = 0;
599	}
600	if (inp == NULL) {
601		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
602		sctp_audit_data[sctp_audit_indx][1] = 0x01;
603		sctp_audit_indx++;
604		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
605			sctp_audit_indx = 0;
606		}
607		return;
608	}
609	if (stcb == NULL) {
610		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
611		sctp_audit_data[sctp_audit_indx][1] = 0x02;
612		sctp_audit_indx++;
613		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
614			sctp_audit_indx = 0;
615		}
616		return;
617	}
618	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
619	sctp_audit_data[sctp_audit_indx][1] =
620	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
621	sctp_audit_indx++;
622	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
623		sctp_audit_indx = 0;
624	}
625	rep = 0;
626	tot_book_cnt = 0;
627	resend_cnt = tot_out = 0;
628	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
629		if (chk->sent == SCTP_DATAGRAM_RESEND) {
630			resend_cnt++;
631		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
632			tot_out += chk->book_size;
633			tot_book_cnt++;
634		}
635	}
636	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
637		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
638		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
639		sctp_audit_indx++;
640		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
641			sctp_audit_indx = 0;
642		}
643		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
644		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
645		rep = 1;
646		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
647		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
648		sctp_audit_data[sctp_audit_indx][1] =
649		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
650		sctp_audit_indx++;
651		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
652			sctp_audit_indx = 0;
653		}
654	}
655	if (tot_out != stcb->asoc.total_flight) {
656		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
657		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
658		sctp_audit_indx++;
659		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
660			sctp_audit_indx = 0;
661		}
662		rep = 1;
663		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
664		    (int)stcb->asoc.total_flight);
665		stcb->asoc.total_flight = tot_out;
666	}
667	if (tot_book_cnt != stcb->asoc.total_flight_count) {
668		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
670		sctp_audit_indx++;
671		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672			sctp_audit_indx = 0;
673		}
674		rep = 1;
675		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
676
677		stcb->asoc.total_flight_count = tot_book_cnt;
678	}
679	tot_out = 0;
680	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
681		tot_out += lnet->flight_size;
682	}
683	if (tot_out != stcb->asoc.total_flight) {
684		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
685		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
686		sctp_audit_indx++;
687		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
688			sctp_audit_indx = 0;
689		}
690		rep = 1;
691		SCTP_PRINTF("real flight:%d net total was %d\n",
692		    stcb->asoc.total_flight, tot_out);
693		/* now corrective action */
694		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
695
696			tot_out = 0;
697			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
698				if ((chk->whoTo == lnet) &&
699				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
700					tot_out += chk->book_size;
701				}
702			}
703			if (lnet->flight_size != tot_out) {
704				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
705				    (void *)lnet, lnet->flight_size,
706				    tot_out);
707				lnet->flight_size = tot_out;
708			}
709		}
710	}
711	if (rep) {
712		sctp_print_audit_report();
713	}
714}
715
716void
717sctp_audit_log(uint8_t ev, uint8_t fd)
718{
719
720	sctp_audit_data[sctp_audit_indx][0] = ev;
721	sctp_audit_data[sctp_audit_indx][1] = fd;
722	sctp_audit_indx++;
723	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
724		sctp_audit_indx = 0;
725	}
726}
727
728#endif
729
730/*
731 * sctp_stop_timers_for_shutdown() should be called
732 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
733 * state to make sure that all timers are stopped.
734 */
735void
736sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
737{
738	struct sctp_association *asoc;
739	struct sctp_nets *net;
740
741	asoc = &stcb->asoc;
742
743	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
744	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
745	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
746	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
747	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
748	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
749		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
750		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
751	}
752}
753
754/*
755 * a list of sizes based on typical mtu's, used only if next hop size not
756 * returned.
757 */
758static uint32_t sctp_mtu_sizes[] = {
759	68,
760	296,
761	508,
762	512,
763	544,
764	576,
765	1006,
766	1492,
767	1500,
768	1536,
769	2002,
770	2048,
771	4352,
772	4464,
773	8166,
774	17914,
775	32000,
776	65535
777};
778
779/*
780 * Return the largest MTU smaller than val. If there is no
781 * entry, just return val.
782 */
783uint32_t
784sctp_get_prev_mtu(uint32_t val)
785{
786	uint32_t i;
787
788	if (val <= sctp_mtu_sizes[0]) {
789		return (val);
790	}
791	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
792		if (val <= sctp_mtu_sizes[i]) {
793			break;
794		}
795	}
796	return (sctp_mtu_sizes[i - 1]);
797}
798
799/*
800 * Return the smallest MTU larger than val. If there is no
801 * entry, just return val.
802 */
803uint32_t
804sctp_get_next_mtu(uint32_t val)
805{
806	/* select another MTU that is just bigger than this one */
807	uint32_t i;
808
809	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
810		if (val < sctp_mtu_sizes[i]) {
811			return (sctp_mtu_sizes[i]);
812		}
813	}
814	return (val);
815}
816
817void
818sctp_fill_random_store(struct sctp_pcb *m)
819{
820	/*
821	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
822	 * our counter. The result becomes our good random numbers and we
823	 * then setup to give these out. Note that we do no locking to
824	 * protect this. This is ok, since if competing folks call this we
825	 * will get more gobbled gook in the random store which is what we
826	 * want. There is a danger that two guys will use the same random
827	 * numbers, but thats ok too since that is random as well :->
828	 */
829	m->store_at = 0;
830	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
831	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
832	    sizeof(m->random_counter), (uint8_t *) m->random_store);
833	m->random_counter++;
834}
835
836uint32_t
837sctp_select_initial_TSN(struct sctp_pcb *inp)
838{
839	/*
840	 * A true implementation should use random selection process to get
841	 * the initial stream sequence number, using RFC1750 as a good
842	 * guideline
843	 */
844	uint32_t x, *xp;
845	uint8_t *p;
846	int store_at, new_store;
847
848	if (inp->initial_sequence_debug != 0) {
849		uint32_t ret;
850
851		ret = inp->initial_sequence_debug;
852		inp->initial_sequence_debug++;
853		return (ret);
854	}
855retry:
856	store_at = inp->store_at;
857	new_store = store_at + sizeof(uint32_t);
858	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
859		new_store = 0;
860	}
861	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
862		goto retry;
863	}
864	if (new_store == 0) {
865		/* Refill the random store */
866		sctp_fill_random_store(inp);
867	}
868	p = &inp->random_store[store_at];
869	xp = (uint32_t *) p;
870	x = *xp;
871	return (x);
872}
873
874uint32_t
875sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
876{
877	uint32_t x;
878	struct timeval now;
879
880	if (check) {
881		(void)SCTP_GETTIME_TIMEVAL(&now);
882	}
883	for (;;) {
884		x = sctp_select_initial_TSN(&inp->sctp_ep);
885		if (x == 0) {
886			/* we never use 0 */
887			continue;
888		}
889		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
890			break;
891		}
892	}
893	return (x);
894}
895
896int32_t
897sctp_map_assoc_state(int kernel_state)
898{
899	int32_t user_state;
900
901	if (kernel_state & SCTP_STATE_WAS_ABORTED) {
902		user_state = SCTP_CLOSED;
903	} else if (kernel_state & SCTP_STATE_SHUTDOWN_PENDING) {
904		user_state = SCTP_SHUTDOWN_PENDING;
905	} else {
906		switch (kernel_state & SCTP_STATE_MASK) {
907		case SCTP_STATE_EMPTY:
908			user_state = SCTP_CLOSED;
909			break;
910		case SCTP_STATE_INUSE:
911			user_state = SCTP_CLOSED;
912			break;
913		case SCTP_STATE_COOKIE_WAIT:
914			user_state = SCTP_COOKIE_WAIT;
915			break;
916		case SCTP_STATE_COOKIE_ECHOED:
917			user_state = SCTP_COOKIE_ECHOED;
918			break;
919		case SCTP_STATE_OPEN:
920			user_state = SCTP_ESTABLISHED;
921			break;
922		case SCTP_STATE_SHUTDOWN_SENT:
923			user_state = SCTP_SHUTDOWN_SENT;
924			break;
925		case SCTP_STATE_SHUTDOWN_RECEIVED:
926			user_state = SCTP_SHUTDOWN_RECEIVED;
927			break;
928		case SCTP_STATE_SHUTDOWN_ACK_SENT:
929			user_state = SCTP_SHUTDOWN_ACK_SENT;
930			break;
931		default:
932			user_state = SCTP_CLOSED;
933			break;
934		}
935	}
936	return (user_state);
937}
938
939int
940sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
941    uint32_t override_tag, uint32_t vrf_id)
942{
943	struct sctp_association *asoc;
944
945	/*
946	 * Anything set to zero is taken care of by the allocation routine's
947	 * bzero
948	 */
949
950	/*
951	 * Up front select what scoping to apply on addresses I tell my peer
952	 * Not sure what to do with these right now, we will need to come up
953	 * with a way to set them. We may need to pass them through from the
954	 * caller in the sctp_aloc_assoc() function.
955	 */
956	int i;
957
958#if defined(SCTP_DETAILED_STR_STATS)
959	int j;
960
961#endif
962
963	asoc = &stcb->asoc;
964	/* init all variables to a known value. */
965	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
966	asoc->max_burst = inp->sctp_ep.max_burst;
967	asoc->fr_max_burst = inp->sctp_ep.fr_max_burst;
968	asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
969	asoc->cookie_life = inp->sctp_ep.def_cookie_life;
970	asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off;
971	asoc->ecn_supported = inp->ecn_supported;
972	asoc->prsctp_supported = inp->prsctp_supported;
973	asoc->auth_supported = inp->auth_supported;
974	asoc->asconf_supported = inp->asconf_supported;
975	asoc->reconfig_supported = inp->reconfig_supported;
976	asoc->nrsack_supported = inp->nrsack_supported;
977	asoc->pktdrop_supported = inp->pktdrop_supported;
978	asoc->sctp_cmt_pf = (uint8_t) 0;
979	asoc->sctp_frag_point = inp->sctp_frag_point;
980	asoc->sctp_features = inp->sctp_features;
981	asoc->default_dscp = inp->sctp_ep.default_dscp;
982	asoc->max_cwnd = inp->max_cwnd;
983#ifdef INET6
984	if (inp->sctp_ep.default_flowlabel) {
985		asoc->default_flowlabel = inp->sctp_ep.default_flowlabel;
986	} else {
987		if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
988			asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep);
989			asoc->default_flowlabel &= 0x000fffff;
990			asoc->default_flowlabel |= 0x80000000;
991		} else {
992			asoc->default_flowlabel = 0;
993		}
994	}
995#endif
996	asoc->sb_send_resv = 0;
997	if (override_tag) {
998		asoc->my_vtag = override_tag;
999	} else {
1000		asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
1001	}
1002	/* Get the nonce tags */
1003	asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1004	asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
1005	asoc->vrf_id = vrf_id;
1006
1007#ifdef SCTP_ASOCLOG_OF_TSNS
1008	asoc->tsn_in_at = 0;
1009	asoc->tsn_out_at = 0;
1010	asoc->tsn_in_wrapped = 0;
1011	asoc->tsn_out_wrapped = 0;
1012	asoc->cumack_log_at = 0;
1013	asoc->cumack_log_atsnt = 0;
1014#endif
1015#ifdef SCTP_FS_SPEC_LOG
1016	asoc->fs_index = 0;
1017#endif
1018	asoc->refcnt = 0;
1019	asoc->assoc_up_sent = 0;
1020	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
1021	    sctp_select_initial_TSN(&inp->sctp_ep);
1022	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
1023	/* we are optimisitic here */
1024	asoc->peer_supports_nat = 0;
1025	asoc->sent_queue_retran_cnt = 0;
1026
1027	/* for CMT */
1028	asoc->last_net_cmt_send_started = NULL;
1029
1030	/* This will need to be adjusted */
1031	asoc->last_acked_seq = asoc->init_seq_number - 1;
1032	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
1033	asoc->asconf_seq_in = asoc->last_acked_seq;
1034
1035	/* here we are different, we hold the next one we expect */
1036	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
1037
1038	asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max;
1039	asoc->initial_rto = inp->sctp_ep.initial_rto;
1040
1041	asoc->max_init_times = inp->sctp_ep.max_init_times;
1042	asoc->max_send_times = inp->sctp_ep.max_send_times;
1043	asoc->def_net_failure = inp->sctp_ep.def_net_failure;
1044	asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold;
1045	asoc->free_chunk_cnt = 0;
1046
1047	asoc->iam_blocking = 0;
1048	asoc->context = inp->sctp_context;
1049	asoc->local_strreset_support = inp->local_strreset_support;
1050	asoc->def_send = inp->def_send;
1051	asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1052	asoc->sack_freq = inp->sctp_ep.sctp_sack_freq;
1053	asoc->pr_sctp_cnt = 0;
1054	asoc->total_output_queue_size = 0;
1055
1056	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1057		asoc->scope.ipv6_addr_legal = 1;
1058		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1059			asoc->scope.ipv4_addr_legal = 1;
1060		} else {
1061			asoc->scope.ipv4_addr_legal = 0;
1062		}
1063	} else {
1064		asoc->scope.ipv6_addr_legal = 0;
1065		asoc->scope.ipv4_addr_legal = 1;
1066	}
1067
1068	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND);
1069	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket);
1070
1071	asoc->smallest_mtu = inp->sctp_frag_point;
1072	asoc->minrto = inp->sctp_ep.sctp_minrto;
1073	asoc->maxrto = inp->sctp_ep.sctp_maxrto;
1074
1075	asoc->locked_on_sending = NULL;
1076	asoc->stream_locked_on = 0;
1077	asoc->ecn_echo_cnt_onq = 0;
1078	asoc->stream_locked = 0;
1079
1080	asoc->send_sack = 1;
1081
1082	LIST_INIT(&asoc->sctp_restricted_addrs);
1083
1084	TAILQ_INIT(&asoc->nets);
1085	TAILQ_INIT(&asoc->pending_reply_queue);
1086	TAILQ_INIT(&asoc->asconf_ack_sent);
1087	/* Setup to fill the hb random cache at first HB */
1088	asoc->hb_random_idx = 4;
1089
1090	asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time;
1091
1092	stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module;
1093	stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module];
1094
1095	stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module;
1096	stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module];
1097
1098	/*
1099	 * Now the stream parameters, here we allocate space for all streams
1100	 * that we request by default.
1101	 */
1102	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1103	    inp->sctp_ep.pre_open_stream_count;
1104	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1105	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1106	    SCTP_M_STRMO);
1107	if (asoc->strmout == NULL) {
1108		/* big trouble no memory */
1109		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1110		return (ENOMEM);
1111	}
1112	for (i = 0; i < asoc->streamoutcnt; i++) {
1113		/*
1114		 * inbound side must be set to 0xffff, also NOTE when we get
1115		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1116		 * count (streamoutcnt) but first check if we sent to any of
1117		 * the upper streams that were dropped (if some were). Those
1118		 * that were dropped must be notified to the upper layer as
1119		 * failed to send.
1120		 */
1121		asoc->strmout[i].next_sequence_send = 0x0;
1122		TAILQ_INIT(&asoc->strmout[i].outqueue);
1123		asoc->strmout[i].chunks_on_queues = 0;
1124#if defined(SCTP_DETAILED_STR_STATS)
1125		for (j = 0; j < SCTP_PR_SCTP_MAX + 1; j++) {
1126			asoc->strmout[i].abandoned_sent[j] = 0;
1127			asoc->strmout[i].abandoned_unsent[j] = 0;
1128		}
1129#else
1130		asoc->strmout[i].abandoned_sent[0] = 0;
1131		asoc->strmout[i].abandoned_unsent[0] = 0;
1132#endif
1133		asoc->strmout[i].stream_no = i;
1134		asoc->strmout[i].last_msg_incomplete = 0;
1135		asoc->strmout[i].state = SCTP_STREAM_OPENING;
1136		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1137	}
1138	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1139
1140	/* Now the mapping array */
1141	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1142	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1143	    SCTP_M_MAP);
1144	if (asoc->mapping_array == NULL) {
1145		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1146		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1147		return (ENOMEM);
1148	}
1149	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1150	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1151	    SCTP_M_MAP);
1152	if (asoc->nr_mapping_array == NULL) {
1153		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1154		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1155		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1156		return (ENOMEM);
1157	}
1158	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1159
1160	/* Now the init of the other outqueues */
1161	TAILQ_INIT(&asoc->free_chunks);
1162	TAILQ_INIT(&asoc->control_send_queue);
1163	TAILQ_INIT(&asoc->asconf_send_queue);
1164	TAILQ_INIT(&asoc->send_queue);
1165	TAILQ_INIT(&asoc->sent_queue);
1166	TAILQ_INIT(&asoc->reasmqueue);
1167	TAILQ_INIT(&asoc->resetHead);
1168	asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome;
1169	TAILQ_INIT(&asoc->asconf_queue);
1170	/* authentication fields */
1171	asoc->authinfo.random = NULL;
1172	asoc->authinfo.active_keyid = 0;
1173	asoc->authinfo.assoc_key = NULL;
1174	asoc->authinfo.assoc_keyid = 0;
1175	asoc->authinfo.recv_key = NULL;
1176	asoc->authinfo.recv_keyid = 0;
1177	LIST_INIT(&asoc->shared_keys);
1178	asoc->marked_retrans = 0;
1179	asoc->port = inp->sctp_ep.port;
1180	asoc->timoinit = 0;
1181	asoc->timodata = 0;
1182	asoc->timosack = 0;
1183	asoc->timoshutdown = 0;
1184	asoc->timoheartbeat = 0;
1185	asoc->timocookie = 0;
1186	asoc->timoshutdownack = 0;
1187	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1188	asoc->discontinuity_time = asoc->start_time;
1189	for (i = 0; i < SCTP_PR_SCTP_MAX + 1; i++) {
1190		asoc->abandoned_unsent[i] = 0;
1191		asoc->abandoned_sent[i] = 0;
1192	}
1193	/*
1194	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1195	 * freed later when the association is freed.
1196	 */
1197	return (0);
1198}
1199
1200void
1201sctp_print_mapping_array(struct sctp_association *asoc)
1202{
1203	unsigned int i, limit;
1204
1205	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1206	    asoc->mapping_array_size,
1207	    asoc->mapping_array_base_tsn,
1208	    asoc->cumulative_tsn,
1209	    asoc->highest_tsn_inside_map,
1210	    asoc->highest_tsn_inside_nr_map);
1211	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1212		if (asoc->mapping_array[limit - 1] != 0) {
1213			break;
1214		}
1215	}
1216	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1217	for (i = 0; i < limit; i++) {
1218		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1219	}
1220	if (limit % 16)
1221		SCTP_PRINTF("\n");
1222	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1223		if (asoc->nr_mapping_array[limit - 1]) {
1224			break;
1225		}
1226	}
1227	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1228	for (i = 0; i < limit; i++) {
1229		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1230	}
1231	if (limit % 16)
1232		SCTP_PRINTF("\n");
1233}
1234
1235int
1236sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1237{
1238	/* mapping array needs to grow */
1239	uint8_t *new_array1, *new_array2;
1240	uint32_t new_size;
1241
1242	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1243	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1244	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1245	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1246		/* can't get more, forget it */
1247		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1248		if (new_array1) {
1249			SCTP_FREE(new_array1, SCTP_M_MAP);
1250		}
1251		if (new_array2) {
1252			SCTP_FREE(new_array2, SCTP_M_MAP);
1253		}
1254		return (-1);
1255	}
1256	memset(new_array1, 0, new_size);
1257	memset(new_array2, 0, new_size);
1258	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1259	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1260	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1261	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1262	asoc->mapping_array = new_array1;
1263	asoc->nr_mapping_array = new_array2;
1264	asoc->mapping_array_size = new_size;
1265	return (0);
1266}
1267
1268
1269static void
1270sctp_iterator_work(struct sctp_iterator *it)
1271{
1272	int iteration_count = 0;
1273	int inp_skip = 0;
1274	int first_in = 1;
1275	struct sctp_inpcb *tinp;
1276
1277	SCTP_INP_INFO_RLOCK();
1278	SCTP_ITERATOR_LOCK();
1279	if (it->inp) {
1280		SCTP_INP_RLOCK(it->inp);
1281		SCTP_INP_DECR_REF(it->inp);
1282	}
1283	if (it->inp == NULL) {
1284		/* iterator is complete */
1285done_with_iterator:
1286		SCTP_ITERATOR_UNLOCK();
1287		SCTP_INP_INFO_RUNLOCK();
1288		if (it->function_atend != NULL) {
1289			(*it->function_atend) (it->pointer, it->val);
1290		}
1291		SCTP_FREE(it, SCTP_M_ITER);
1292		return;
1293	}
1294select_a_new_ep:
1295	if (first_in) {
1296		first_in = 0;
1297	} else {
1298		SCTP_INP_RLOCK(it->inp);
1299	}
1300	while (((it->pcb_flags) &&
1301	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1302	    ((it->pcb_features) &&
1303	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1304		/* endpoint flags or features don't match, so keep looking */
1305		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1306			SCTP_INP_RUNLOCK(it->inp);
1307			goto done_with_iterator;
1308		}
1309		tinp = it->inp;
1310		it->inp = LIST_NEXT(it->inp, sctp_list);
1311		SCTP_INP_RUNLOCK(tinp);
1312		if (it->inp == NULL) {
1313			goto done_with_iterator;
1314		}
1315		SCTP_INP_RLOCK(it->inp);
1316	}
1317	/* now go through each assoc which is in the desired state */
1318	if (it->done_current_ep == 0) {
1319		if (it->function_inp != NULL)
1320			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1321		it->done_current_ep = 1;
1322	}
1323	if (it->stcb == NULL) {
1324		/* run the per instance function */
1325		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1326	}
1327	if ((inp_skip) || it->stcb == NULL) {
1328		if (it->function_inp_end != NULL) {
1329			inp_skip = (*it->function_inp_end) (it->inp,
1330			    it->pointer,
1331			    it->val);
1332		}
1333		SCTP_INP_RUNLOCK(it->inp);
1334		goto no_stcb;
1335	}
1336	while (it->stcb) {
1337		SCTP_TCB_LOCK(it->stcb);
1338		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1339			/* not in the right state... keep looking */
1340			SCTP_TCB_UNLOCK(it->stcb);
1341			goto next_assoc;
1342		}
1343		/* see if we have limited out the iterator loop */
1344		iteration_count++;
1345		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1346			/* Pause to let others grab the lock */
1347			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1348			SCTP_TCB_UNLOCK(it->stcb);
1349			SCTP_INP_INCR_REF(it->inp);
1350			SCTP_INP_RUNLOCK(it->inp);
1351			SCTP_ITERATOR_UNLOCK();
1352			SCTP_INP_INFO_RUNLOCK();
1353			SCTP_INP_INFO_RLOCK();
1354			SCTP_ITERATOR_LOCK();
1355			if (sctp_it_ctl.iterator_flags) {
1356				/* We won't be staying here */
1357				SCTP_INP_DECR_REF(it->inp);
1358				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1359				if (sctp_it_ctl.iterator_flags &
1360				    SCTP_ITERATOR_STOP_CUR_IT) {
1361					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1362					goto done_with_iterator;
1363				}
1364				if (sctp_it_ctl.iterator_flags &
1365				    SCTP_ITERATOR_STOP_CUR_INP) {
1366					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1367					goto no_stcb;
1368				}
1369				/* If we reach here huh? */
1370				SCTP_PRINTF("Unknown it ctl flag %x\n",
1371				    sctp_it_ctl.iterator_flags);
1372				sctp_it_ctl.iterator_flags = 0;
1373			}
1374			SCTP_INP_RLOCK(it->inp);
1375			SCTP_INP_DECR_REF(it->inp);
1376			SCTP_TCB_LOCK(it->stcb);
1377			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1378			iteration_count = 0;
1379		}
1380		/* run function on this one */
1381		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1382
1383		/*
1384		 * we lie here, it really needs to have its own type but
1385		 * first I must verify that this won't effect things :-0
1386		 */
1387		if (it->no_chunk_output == 0)
1388			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1389
1390		SCTP_TCB_UNLOCK(it->stcb);
1391next_assoc:
1392		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1393		if (it->stcb == NULL) {
1394			/* Run last function */
1395			if (it->function_inp_end != NULL) {
1396				inp_skip = (*it->function_inp_end) (it->inp,
1397				    it->pointer,
1398				    it->val);
1399			}
1400		}
1401	}
1402	SCTP_INP_RUNLOCK(it->inp);
1403no_stcb:
1404	/* done with all assocs on this endpoint, move on to next endpoint */
1405	it->done_current_ep = 0;
1406	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1407		it->inp = NULL;
1408	} else {
1409		it->inp = LIST_NEXT(it->inp, sctp_list);
1410	}
1411	if (it->inp == NULL) {
1412		goto done_with_iterator;
1413	}
1414	goto select_a_new_ep;
1415}
1416
1417void
1418sctp_iterator_worker(void)
1419{
1420	struct sctp_iterator *it, *nit;
1421
1422	/* This function is called with the WQ lock in place */
1423
1424	sctp_it_ctl.iterator_running = 1;
1425	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1426		sctp_it_ctl.cur_it = it;
1427		/* now lets work on this one */
1428		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1429		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1430		CURVNET_SET(it->vn);
1431		sctp_iterator_work(it);
1432		sctp_it_ctl.cur_it = NULL;
1433		CURVNET_RESTORE();
1434		SCTP_IPI_ITERATOR_WQ_LOCK();
1435		/* sa_ignore FREED_MEMORY */
1436	}
1437	sctp_it_ctl.iterator_running = 0;
1438	return;
1439}
1440
1441
1442static void
1443sctp_handle_addr_wq(void)
1444{
1445	/* deal with the ADDR wq from the rtsock calls */
1446	struct sctp_laddr *wi, *nwi;
1447	struct sctp_asconf_iterator *asc;
1448
1449	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1450	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1451	if (asc == NULL) {
1452		/* Try later, no memory */
1453		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1454		    (struct sctp_inpcb *)NULL,
1455		    (struct sctp_tcb *)NULL,
1456		    (struct sctp_nets *)NULL);
1457		return;
1458	}
1459	LIST_INIT(&asc->list_of_work);
1460	asc->cnt = 0;
1461
1462	SCTP_WQ_ADDR_LOCK();
1463	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1464		LIST_REMOVE(wi, sctp_nxt_addr);
1465		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1466		asc->cnt++;
1467	}
1468	SCTP_WQ_ADDR_UNLOCK();
1469
1470	if (asc->cnt == 0) {
1471		SCTP_FREE(asc, SCTP_M_ASC_IT);
1472	} else {
1473		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1474		    sctp_asconf_iterator_stcb,
1475		    NULL,	/* No ep end for boundall */
1476		    SCTP_PCB_FLAGS_BOUNDALL,
1477		    SCTP_PCB_ANY_FEATURES,
1478		    SCTP_ASOC_ANY_STATE,
1479		    (void *)asc, 0,
1480		    sctp_asconf_iterator_end, NULL, 0);
1481	}
1482}
1483
1484void
1485sctp_timeout_handler(void *t)
1486{
1487	struct sctp_inpcb *inp;
1488	struct sctp_tcb *stcb;
1489	struct sctp_nets *net;
1490	struct sctp_timer *tmr;
1491	struct mbuf *op_err;
1492
1493#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1494	struct socket *so;
1495
1496#endif
1497	int did_output;
1498
1499	tmr = (struct sctp_timer *)t;
1500	inp = (struct sctp_inpcb *)tmr->ep;
1501	stcb = (struct sctp_tcb *)tmr->tcb;
1502	net = (struct sctp_nets *)tmr->net;
1503	CURVNET_SET((struct vnet *)tmr->vnet);
1504	did_output = 1;
1505
1506#ifdef SCTP_AUDITING_ENABLED
1507	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1508	sctp_auditing(3, inp, stcb, net);
1509#endif
1510
1511	/* sanity checks... */
1512	if (tmr->self != (void *)tmr) {
1513		/*
1514		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1515		 * (void *)tmr);
1516		 */
1517		CURVNET_RESTORE();
1518		return;
1519	}
1520	tmr->stopped_from = 0xa001;
1521	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1522		/*
1523		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1524		 * tmr->type);
1525		 */
1526		CURVNET_RESTORE();
1527		return;
1528	}
1529	tmr->stopped_from = 0xa002;
1530	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1531		CURVNET_RESTORE();
1532		return;
1533	}
1534	/* if this is an iterator timeout, get the struct and clear inp */
1535	tmr->stopped_from = 0xa003;
1536	if (inp) {
1537		SCTP_INP_INCR_REF(inp);
1538		if ((inp->sctp_socket == NULL) &&
1539		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1540		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1541		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1542		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1543		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1544		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1545		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1546		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1547		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1548		    ) {
1549			SCTP_INP_DECR_REF(inp);
1550			CURVNET_RESTORE();
1551			return;
1552		}
1553	}
1554	tmr->stopped_from = 0xa004;
1555	if (stcb) {
1556		atomic_add_int(&stcb->asoc.refcnt, 1);
1557		if (stcb->asoc.state == 0) {
1558			atomic_add_int(&stcb->asoc.refcnt, -1);
1559			if (inp) {
1560				SCTP_INP_DECR_REF(inp);
1561			}
1562			CURVNET_RESTORE();
1563			return;
1564		}
1565	}
1566	tmr->stopped_from = 0xa005;
1567	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1568	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1569		if (inp) {
1570			SCTP_INP_DECR_REF(inp);
1571		}
1572		if (stcb) {
1573			atomic_add_int(&stcb->asoc.refcnt, -1);
1574		}
1575		CURVNET_RESTORE();
1576		return;
1577	}
1578	tmr->stopped_from = 0xa006;
1579
1580	if (stcb) {
1581		SCTP_TCB_LOCK(stcb);
1582		atomic_add_int(&stcb->asoc.refcnt, -1);
1583		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1584		    ((stcb->asoc.state == 0) ||
1585		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1586			SCTP_TCB_UNLOCK(stcb);
1587			if (inp) {
1588				SCTP_INP_DECR_REF(inp);
1589			}
1590			CURVNET_RESTORE();
1591			return;
1592		}
1593	}
1594	/* record in stopped what t-o occured */
1595	tmr->stopped_from = tmr->type;
1596
1597	/* mark as being serviced now */
1598	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1599		/*
1600		 * Callout has been rescheduled.
1601		 */
1602		goto get_out;
1603	}
1604	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1605		/*
1606		 * Not active, so no action.
1607		 */
1608		goto get_out;
1609	}
1610	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1611
1612	/* call the handler for the appropriate timer type */
1613	switch (tmr->type) {
1614	case SCTP_TIMER_TYPE_ZERO_COPY:
1615		if (inp == NULL) {
1616			break;
1617		}
1618		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1619			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1620		}
1621		break;
1622	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1623		if (inp == NULL) {
1624			break;
1625		}
1626		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1627			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1628		}
1629		break;
1630	case SCTP_TIMER_TYPE_ADDR_WQ:
1631		sctp_handle_addr_wq();
1632		break;
1633	case SCTP_TIMER_TYPE_SEND:
1634		if ((stcb == NULL) || (inp == NULL)) {
1635			break;
1636		}
1637		SCTP_STAT_INCR(sctps_timodata);
1638		stcb->asoc.timodata++;
1639		stcb->asoc.num_send_timers_up--;
1640		if (stcb->asoc.num_send_timers_up < 0) {
1641			stcb->asoc.num_send_timers_up = 0;
1642		}
1643		SCTP_TCB_LOCK_ASSERT(stcb);
1644		if (sctp_t3rxt_timer(inp, stcb, net)) {
1645			/* no need to unlock on tcb its gone */
1646
1647			goto out_decr;
1648		}
1649		SCTP_TCB_LOCK_ASSERT(stcb);
1650#ifdef SCTP_AUDITING_ENABLED
1651		sctp_auditing(4, inp, stcb, net);
1652#endif
1653		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1654		if ((stcb->asoc.num_send_timers_up == 0) &&
1655		    (stcb->asoc.sent_queue_cnt > 0)) {
1656			struct sctp_tmit_chunk *chk;
1657
1658			/*
1659			 * safeguard. If there on some on the sent queue
1660			 * somewhere but no timers running something is
1661			 * wrong... so we start a timer on the first chunk
1662			 * on the send queue on whatever net it is sent to.
1663			 */
1664			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1665			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1666			    chk->whoTo);
1667		}
1668		break;
1669	case SCTP_TIMER_TYPE_INIT:
1670		if ((stcb == NULL) || (inp == NULL)) {
1671			break;
1672		}
1673		SCTP_STAT_INCR(sctps_timoinit);
1674		stcb->asoc.timoinit++;
1675		if (sctp_t1init_timer(inp, stcb, net)) {
1676			/* no need to unlock on tcb its gone */
1677			goto out_decr;
1678		}
1679		/* We do output but not here */
1680		did_output = 0;
1681		break;
1682	case SCTP_TIMER_TYPE_RECV:
1683		if ((stcb == NULL) || (inp == NULL)) {
1684			break;
1685		}
1686		SCTP_STAT_INCR(sctps_timosack);
1687		stcb->asoc.timosack++;
1688		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1689#ifdef SCTP_AUDITING_ENABLED
1690		sctp_auditing(4, inp, stcb, net);
1691#endif
1692		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1693		break;
1694	case SCTP_TIMER_TYPE_SHUTDOWN:
1695		if ((stcb == NULL) || (inp == NULL)) {
1696			break;
1697		}
1698		if (sctp_shutdown_timer(inp, stcb, net)) {
1699			/* no need to unlock on tcb its gone */
1700			goto out_decr;
1701		}
1702		SCTP_STAT_INCR(sctps_timoshutdown);
1703		stcb->asoc.timoshutdown++;
1704#ifdef SCTP_AUDITING_ENABLED
1705		sctp_auditing(4, inp, stcb, net);
1706#endif
1707		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1708		break;
1709	case SCTP_TIMER_TYPE_HEARTBEAT:
1710		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1711			break;
1712		}
1713		SCTP_STAT_INCR(sctps_timoheartbeat);
1714		stcb->asoc.timoheartbeat++;
1715		if (sctp_heartbeat_timer(inp, stcb, net)) {
1716			/* no need to unlock on tcb its gone */
1717			goto out_decr;
1718		}
1719#ifdef SCTP_AUDITING_ENABLED
1720		sctp_auditing(4, inp, stcb, net);
1721#endif
1722		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1723			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1724			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1725		}
1726		break;
1727	case SCTP_TIMER_TYPE_COOKIE:
1728		if ((stcb == NULL) || (inp == NULL)) {
1729			break;
1730		}
1731		if (sctp_cookie_timer(inp, stcb, net)) {
1732			/* no need to unlock on tcb its gone */
1733			goto out_decr;
1734		}
1735		SCTP_STAT_INCR(sctps_timocookie);
1736		stcb->asoc.timocookie++;
1737#ifdef SCTP_AUDITING_ENABLED
1738		sctp_auditing(4, inp, stcb, net);
1739#endif
1740		/*
1741		 * We consider T3 and Cookie timer pretty much the same with
1742		 * respect to where from in chunk_output.
1743		 */
1744		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1745		break;
1746	case SCTP_TIMER_TYPE_NEWCOOKIE:
1747		{
1748			struct timeval tv;
1749			int i, secret;
1750
1751			if (inp == NULL) {
1752				break;
1753			}
1754			SCTP_STAT_INCR(sctps_timosecret);
1755			(void)SCTP_GETTIME_TIMEVAL(&tv);
1756			SCTP_INP_WLOCK(inp);
1757			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1758			inp->sctp_ep.last_secret_number =
1759			    inp->sctp_ep.current_secret_number;
1760			inp->sctp_ep.current_secret_number++;
1761			if (inp->sctp_ep.current_secret_number >=
1762			    SCTP_HOW_MANY_SECRETS) {
1763				inp->sctp_ep.current_secret_number = 0;
1764			}
1765			secret = (int)inp->sctp_ep.current_secret_number;
1766			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1767				inp->sctp_ep.secret_key[secret][i] =
1768				    sctp_select_initial_TSN(&inp->sctp_ep);
1769			}
1770			SCTP_INP_WUNLOCK(inp);
1771			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1772		}
1773		did_output = 0;
1774		break;
1775	case SCTP_TIMER_TYPE_PATHMTURAISE:
1776		if ((stcb == NULL) || (inp == NULL)) {
1777			break;
1778		}
1779		SCTP_STAT_INCR(sctps_timopathmtu);
1780		sctp_pathmtu_timer(inp, stcb, net);
1781		did_output = 0;
1782		break;
1783	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1784		if ((stcb == NULL) || (inp == NULL)) {
1785			break;
1786		}
1787		if (sctp_shutdownack_timer(inp, stcb, net)) {
1788			/* no need to unlock on tcb its gone */
1789			goto out_decr;
1790		}
1791		SCTP_STAT_INCR(sctps_timoshutdownack);
1792		stcb->asoc.timoshutdownack++;
1793#ifdef SCTP_AUDITING_ENABLED
1794		sctp_auditing(4, inp, stcb, net);
1795#endif
1796		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1797		break;
1798	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1799		if ((stcb == NULL) || (inp == NULL)) {
1800			break;
1801		}
1802		SCTP_STAT_INCR(sctps_timoshutdownguard);
1803		op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code),
1804		    "Shutdown guard timer expired");
1805		sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED);
1806		/* no need to unlock on tcb its gone */
1807		goto out_decr;
1808
1809	case SCTP_TIMER_TYPE_STRRESET:
1810		if ((stcb == NULL) || (inp == NULL)) {
1811			break;
1812		}
1813		if (sctp_strreset_timer(inp, stcb, net)) {
1814			/* no need to unlock on tcb its gone */
1815			goto out_decr;
1816		}
1817		SCTP_STAT_INCR(sctps_timostrmrst);
1818		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1819		break;
1820	case SCTP_TIMER_TYPE_ASCONF:
1821		if ((stcb == NULL) || (inp == NULL)) {
1822			break;
1823		}
1824		if (sctp_asconf_timer(inp, stcb, net)) {
1825			/* no need to unlock on tcb its gone */
1826			goto out_decr;
1827		}
1828		SCTP_STAT_INCR(sctps_timoasconf);
1829#ifdef SCTP_AUDITING_ENABLED
1830		sctp_auditing(4, inp, stcb, net);
1831#endif
1832		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1833		break;
1834	case SCTP_TIMER_TYPE_PRIM_DELETED:
1835		if ((stcb == NULL) || (inp == NULL)) {
1836			break;
1837		}
1838		sctp_delete_prim_timer(inp, stcb, net);
1839		SCTP_STAT_INCR(sctps_timodelprim);
1840		break;
1841
1842	case SCTP_TIMER_TYPE_AUTOCLOSE:
1843		if ((stcb == NULL) || (inp == NULL)) {
1844			break;
1845		}
1846		SCTP_STAT_INCR(sctps_timoautoclose);
1847		sctp_autoclose_timer(inp, stcb, net);
1848		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1849		did_output = 0;
1850		break;
1851	case SCTP_TIMER_TYPE_ASOCKILL:
1852		if ((stcb == NULL) || (inp == NULL)) {
1853			break;
1854		}
1855		SCTP_STAT_INCR(sctps_timoassockill);
1856		/* Can we free it yet? */
1857		SCTP_INP_DECR_REF(inp);
1858		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL,
1859		    SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1860#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1861		so = SCTP_INP_SO(inp);
1862		atomic_add_int(&stcb->asoc.refcnt, 1);
1863		SCTP_TCB_UNLOCK(stcb);
1864		SCTP_SOCKET_LOCK(so, 1);
1865		SCTP_TCB_LOCK(stcb);
1866		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1867#endif
1868		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
1869		    SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1870#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1871		SCTP_SOCKET_UNLOCK(so, 1);
1872#endif
1873		/*
1874		 * free asoc, always unlocks (or destroy's) so prevent
1875		 * duplicate unlock or unlock of a free mtx :-0
1876		 */
1877		stcb = NULL;
1878		goto out_no_decr;
1879	case SCTP_TIMER_TYPE_INPKILL:
1880		SCTP_STAT_INCR(sctps_timoinpkill);
1881		if (inp == NULL) {
1882			break;
1883		}
1884		/*
1885		 * special case, take away our increment since WE are the
1886		 * killer
1887		 */
1888		SCTP_INP_DECR_REF(inp);
1889		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL,
1890		    SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1891		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1892		    SCTP_CALLED_FROM_INPKILL_TIMER);
1893		inp = NULL;
1894		goto out_no_decr;
1895	default:
1896		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1897		    tmr->type);
1898		break;
1899	}
1900#ifdef SCTP_AUDITING_ENABLED
1901	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1902	if (inp)
1903		sctp_auditing(5, inp, stcb, net);
1904#endif
1905	if ((did_output) && stcb) {
1906		/*
1907		 * Now we need to clean up the control chunk chain if an
1908		 * ECNE is on it. It must be marked as UNSENT again so next
1909		 * call will continue to send it until such time that we get
1910		 * a CWR, to remove it. It is, however, less likely that we
1911		 * will find a ecn echo on the chain though.
1912		 */
1913		sctp_fix_ecn_echo(&stcb->asoc);
1914	}
1915get_out:
1916	if (stcb) {
1917		SCTP_TCB_UNLOCK(stcb);
1918	}
1919out_decr:
1920	if (inp) {
1921		SCTP_INP_DECR_REF(inp);
1922	}
1923out_no_decr:
1924	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1925	    tmr->type);
1926	CURVNET_RESTORE();
1927}
1928
1929void
1930sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1931    struct sctp_nets *net)
1932{
1933	uint32_t to_ticks;
1934	struct sctp_timer *tmr;
1935
1936	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1937		return;
1938
1939	tmr = NULL;
1940	if (stcb) {
1941		SCTP_TCB_LOCK_ASSERT(stcb);
1942	}
1943	switch (t_type) {
1944	case SCTP_TIMER_TYPE_ZERO_COPY:
1945		tmr = &inp->sctp_ep.zero_copy_timer;
1946		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1947		break;
1948	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1949		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1950		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1951		break;
1952	case SCTP_TIMER_TYPE_ADDR_WQ:
1953		/* Only 1 tick away :-) */
1954		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1955		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1956		break;
1957	case SCTP_TIMER_TYPE_SEND:
1958		/* Here we use the RTO timer */
1959		{
1960			int rto_val;
1961
1962			if ((stcb == NULL) || (net == NULL)) {
1963				return;
1964			}
1965			tmr = &net->rxt_timer;
1966			if (net->RTO == 0) {
1967				rto_val = stcb->asoc.initial_rto;
1968			} else {
1969				rto_val = net->RTO;
1970			}
1971			to_ticks = MSEC_TO_TICKS(rto_val);
1972		}
1973		break;
1974	case SCTP_TIMER_TYPE_INIT:
1975		/*
1976		 * Here we use the INIT timer default usually about 1
1977		 * minute.
1978		 */
1979		if ((stcb == NULL) || (net == NULL)) {
1980			return;
1981		}
1982		tmr = &net->rxt_timer;
1983		if (net->RTO == 0) {
1984			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1985		} else {
1986			to_ticks = MSEC_TO_TICKS(net->RTO);
1987		}
1988		break;
1989	case SCTP_TIMER_TYPE_RECV:
1990		/*
1991		 * Here we use the Delayed-Ack timer value from the inp
1992		 * ususually about 200ms.
1993		 */
1994		if (stcb == NULL) {
1995			return;
1996		}
1997		tmr = &stcb->asoc.dack_timer;
1998		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1999		break;
2000	case SCTP_TIMER_TYPE_SHUTDOWN:
2001		/* Here we use the RTO of the destination. */
2002		if ((stcb == NULL) || (net == NULL)) {
2003			return;
2004		}
2005		if (net->RTO == 0) {
2006			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2007		} else {
2008			to_ticks = MSEC_TO_TICKS(net->RTO);
2009		}
2010		tmr = &net->rxt_timer;
2011		break;
2012	case SCTP_TIMER_TYPE_HEARTBEAT:
2013		/*
2014		 * the net is used here so that we can add in the RTO. Even
2015		 * though we use a different timer. We also add the HB timer
2016		 * PLUS a random jitter.
2017		 */
2018		if ((stcb == NULL) || (net == NULL)) {
2019			return;
2020		} else {
2021			uint32_t rndval;
2022			uint32_t jitter;
2023
2024			if ((net->dest_state & SCTP_ADDR_NOHB) &&
2025			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
2026				return;
2027			}
2028			if (net->RTO == 0) {
2029				to_ticks = stcb->asoc.initial_rto;
2030			} else {
2031				to_ticks = net->RTO;
2032			}
2033			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2034			jitter = rndval % to_ticks;
2035			if (jitter >= (to_ticks >> 1)) {
2036				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
2037			} else {
2038				to_ticks = to_ticks - jitter;
2039			}
2040			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2041			    !(net->dest_state & SCTP_ADDR_PF)) {
2042				to_ticks += net->heart_beat_delay;
2043			}
2044			/*
2045			 * Now we must convert the to_ticks that are now in
2046			 * ms to ticks.
2047			 */
2048			to_ticks = MSEC_TO_TICKS(to_ticks);
2049			tmr = &net->hb_timer;
2050		}
2051		break;
2052	case SCTP_TIMER_TYPE_COOKIE:
2053		/*
2054		 * Here we can use the RTO timer from the network since one
2055		 * RTT was compelete. If a retran happened then we will be
2056		 * using the RTO initial value.
2057		 */
2058		if ((stcb == NULL) || (net == NULL)) {
2059			return;
2060		}
2061		if (net->RTO == 0) {
2062			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2063		} else {
2064			to_ticks = MSEC_TO_TICKS(net->RTO);
2065		}
2066		tmr = &net->rxt_timer;
2067		break;
2068	case SCTP_TIMER_TYPE_NEWCOOKIE:
2069		/*
2070		 * nothing needed but the endpoint here ususually about 60
2071		 * minutes.
2072		 */
2073		tmr = &inp->sctp_ep.signature_change;
2074		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2075		break;
2076	case SCTP_TIMER_TYPE_ASOCKILL:
2077		if (stcb == NULL) {
2078			return;
2079		}
2080		tmr = &stcb->asoc.strreset_timer;
2081		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2082		break;
2083	case SCTP_TIMER_TYPE_INPKILL:
2084		/*
2085		 * The inp is setup to die. We re-use the signature_chage
2086		 * timer since that has stopped and we are in the GONE
2087		 * state.
2088		 */
2089		tmr = &inp->sctp_ep.signature_change;
2090		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2091		break;
2092	case SCTP_TIMER_TYPE_PATHMTURAISE:
2093		/*
2094		 * Here we use the value found in the EP for PMTU ususually
2095		 * about 10 minutes.
2096		 */
2097		if ((stcb == NULL) || (net == NULL)) {
2098			return;
2099		}
2100		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2101			return;
2102		}
2103		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2104		tmr = &net->pmtu_timer;
2105		break;
2106	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2107		/* Here we use the RTO of the destination */
2108		if ((stcb == NULL) || (net == NULL)) {
2109			return;
2110		}
2111		if (net->RTO == 0) {
2112			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2113		} else {
2114			to_ticks = MSEC_TO_TICKS(net->RTO);
2115		}
2116		tmr = &net->rxt_timer;
2117		break;
2118	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2119		/*
2120		 * Here we use the endpoints shutdown guard timer usually
2121		 * about 3 minutes.
2122		 */
2123		if (stcb == NULL) {
2124			return;
2125		}
2126		if (inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] == 0) {
2127			to_ticks = 5 * MSEC_TO_TICKS(stcb->asoc.maxrto);
2128		} else {
2129			to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2130		}
2131		tmr = &stcb->asoc.shut_guard_timer;
2132		break;
2133	case SCTP_TIMER_TYPE_STRRESET:
2134		/*
2135		 * Here the timer comes from the stcb but its value is from
2136		 * the net's RTO.
2137		 */
2138		if ((stcb == NULL) || (net == NULL)) {
2139			return;
2140		}
2141		if (net->RTO == 0) {
2142			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2143		} else {
2144			to_ticks = MSEC_TO_TICKS(net->RTO);
2145		}
2146		tmr = &stcb->asoc.strreset_timer;
2147		break;
2148	case SCTP_TIMER_TYPE_ASCONF:
2149		/*
2150		 * Here the timer comes from the stcb but its value is from
2151		 * the net's RTO.
2152		 */
2153		if ((stcb == NULL) || (net == NULL)) {
2154			return;
2155		}
2156		if (net->RTO == 0) {
2157			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2158		} else {
2159			to_ticks = MSEC_TO_TICKS(net->RTO);
2160		}
2161		tmr = &stcb->asoc.asconf_timer;
2162		break;
2163	case SCTP_TIMER_TYPE_PRIM_DELETED:
2164		if ((stcb == NULL) || (net != NULL)) {
2165			return;
2166		}
2167		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2168		tmr = &stcb->asoc.delete_prim_timer;
2169		break;
2170	case SCTP_TIMER_TYPE_AUTOCLOSE:
2171		if (stcb == NULL) {
2172			return;
2173		}
2174		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2175			/*
2176			 * Really an error since stcb is NOT set to
2177			 * autoclose
2178			 */
2179			return;
2180		}
2181		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2182		tmr = &stcb->asoc.autoclose_timer;
2183		break;
2184	default:
2185		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2186		    __FUNCTION__, t_type);
2187		return;
2188		break;
2189	}
2190	if ((to_ticks <= 0) || (tmr == NULL)) {
2191		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2192		    __FUNCTION__, t_type, to_ticks, (void *)tmr);
2193		return;
2194	}
2195	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2196		/*
2197		 * we do NOT allow you to have it already running. if it is
2198		 * we leave the current one up unchanged
2199		 */
2200		return;
2201	}
2202	/* At this point we can proceed */
2203	if (t_type == SCTP_TIMER_TYPE_SEND) {
2204		stcb->asoc.num_send_timers_up++;
2205	}
2206	tmr->stopped_from = 0;
2207	tmr->type = t_type;
2208	tmr->ep = (void *)inp;
2209	tmr->tcb = (void *)stcb;
2210	tmr->net = (void *)net;
2211	tmr->self = (void *)tmr;
2212	tmr->vnet = (void *)curvnet;
2213	tmr->ticks = sctp_get_tick_count();
2214	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2215	return;
2216}
2217
2218void
2219sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2220    struct sctp_nets *net, uint32_t from)
2221{
2222	struct sctp_timer *tmr;
2223
2224	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2225	    (inp == NULL))
2226		return;
2227
2228	tmr = NULL;
2229	if (stcb) {
2230		SCTP_TCB_LOCK_ASSERT(stcb);
2231	}
2232	switch (t_type) {
2233	case SCTP_TIMER_TYPE_ZERO_COPY:
2234		tmr = &inp->sctp_ep.zero_copy_timer;
2235		break;
2236	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2237		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2238		break;
2239	case SCTP_TIMER_TYPE_ADDR_WQ:
2240		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2241		break;
2242	case SCTP_TIMER_TYPE_SEND:
2243		if ((stcb == NULL) || (net == NULL)) {
2244			return;
2245		}
2246		tmr = &net->rxt_timer;
2247		break;
2248	case SCTP_TIMER_TYPE_INIT:
2249		if ((stcb == NULL) || (net == NULL)) {
2250			return;
2251		}
2252		tmr = &net->rxt_timer;
2253		break;
2254	case SCTP_TIMER_TYPE_RECV:
2255		if (stcb == NULL) {
2256			return;
2257		}
2258		tmr = &stcb->asoc.dack_timer;
2259		break;
2260	case SCTP_TIMER_TYPE_SHUTDOWN:
2261		if ((stcb == NULL) || (net == NULL)) {
2262			return;
2263		}
2264		tmr = &net->rxt_timer;
2265		break;
2266	case SCTP_TIMER_TYPE_HEARTBEAT:
2267		if ((stcb == NULL) || (net == NULL)) {
2268			return;
2269		}
2270		tmr = &net->hb_timer;
2271		break;
2272	case SCTP_TIMER_TYPE_COOKIE:
2273		if ((stcb == NULL) || (net == NULL)) {
2274			return;
2275		}
2276		tmr = &net->rxt_timer;
2277		break;
2278	case SCTP_TIMER_TYPE_NEWCOOKIE:
2279		/* nothing needed but the endpoint here */
2280		tmr = &inp->sctp_ep.signature_change;
2281		/*
2282		 * We re-use the newcookie timer for the INP kill timer. We
2283		 * must assure that we do not kill it by accident.
2284		 */
2285		break;
2286	case SCTP_TIMER_TYPE_ASOCKILL:
2287		/*
2288		 * Stop the asoc kill timer.
2289		 */
2290		if (stcb == NULL) {
2291			return;
2292		}
2293		tmr = &stcb->asoc.strreset_timer;
2294		break;
2295
2296	case SCTP_TIMER_TYPE_INPKILL:
2297		/*
2298		 * The inp is setup to die. We re-use the signature_chage
2299		 * timer since that has stopped and we are in the GONE
2300		 * state.
2301		 */
2302		tmr = &inp->sctp_ep.signature_change;
2303		break;
2304	case SCTP_TIMER_TYPE_PATHMTURAISE:
2305		if ((stcb == NULL) || (net == NULL)) {
2306			return;
2307		}
2308		tmr = &net->pmtu_timer;
2309		break;
2310	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2311		if ((stcb == NULL) || (net == NULL)) {
2312			return;
2313		}
2314		tmr = &net->rxt_timer;
2315		break;
2316	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2317		if (stcb == NULL) {
2318			return;
2319		}
2320		tmr = &stcb->asoc.shut_guard_timer;
2321		break;
2322	case SCTP_TIMER_TYPE_STRRESET:
2323		if (stcb == NULL) {
2324			return;
2325		}
2326		tmr = &stcb->asoc.strreset_timer;
2327		break;
2328	case SCTP_TIMER_TYPE_ASCONF:
2329		if (stcb == NULL) {
2330			return;
2331		}
2332		tmr = &stcb->asoc.asconf_timer;
2333		break;
2334	case SCTP_TIMER_TYPE_PRIM_DELETED:
2335		if (stcb == NULL) {
2336			return;
2337		}
2338		tmr = &stcb->asoc.delete_prim_timer;
2339		break;
2340	case SCTP_TIMER_TYPE_AUTOCLOSE:
2341		if (stcb == NULL) {
2342			return;
2343		}
2344		tmr = &stcb->asoc.autoclose_timer;
2345		break;
2346	default:
2347		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2348		    __FUNCTION__, t_type);
2349		break;
2350	}
2351	if (tmr == NULL) {
2352		return;
2353	}
2354	if ((tmr->type != t_type) && tmr->type) {
2355		/*
2356		 * Ok we have a timer that is under joint use. Cookie timer
2357		 * per chance with the SEND timer. We therefore are NOT
2358		 * running the timer that the caller wants stopped.  So just
2359		 * return.
2360		 */
2361		return;
2362	}
2363	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2364		stcb->asoc.num_send_timers_up--;
2365		if (stcb->asoc.num_send_timers_up < 0) {
2366			stcb->asoc.num_send_timers_up = 0;
2367		}
2368	}
2369	tmr->self = NULL;
2370	tmr->stopped_from = from;
2371	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2372	return;
2373}
2374
2375uint32_t
2376sctp_calculate_len(struct mbuf *m)
2377{
2378	uint32_t tlen = 0;
2379	struct mbuf *at;
2380
2381	at = m;
2382	while (at) {
2383		tlen += SCTP_BUF_LEN(at);
2384		at = SCTP_BUF_NEXT(at);
2385	}
2386	return (tlen);
2387}
2388
2389void
2390sctp_mtu_size_reset(struct sctp_inpcb *inp,
2391    struct sctp_association *asoc, uint32_t mtu)
2392{
2393	/*
2394	 * Reset the P-MTU size on this association, this involves changing
2395	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2396	 * allow the DF flag to be cleared.
2397	 */
2398	struct sctp_tmit_chunk *chk;
2399	unsigned int eff_mtu, ovh;
2400
2401	asoc->smallest_mtu = mtu;
2402	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2403		ovh = SCTP_MIN_OVERHEAD;
2404	} else {
2405		ovh = SCTP_MIN_V4_OVERHEAD;
2406	}
2407	eff_mtu = mtu - ovh;
2408	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2409		if (chk->send_size > eff_mtu) {
2410			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2411		}
2412	}
2413	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2414		if (chk->send_size > eff_mtu) {
2415			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2416		}
2417	}
2418}
2419
2420
2421/*
2422 * given an association and starting time of the current RTT period return
2423 * RTO in number of msecs net should point to the current network
2424 */
2425
2426uint32_t
2427sctp_calculate_rto(struct sctp_tcb *stcb,
2428    struct sctp_association *asoc,
2429    struct sctp_nets *net,
2430    struct timeval *told,
2431    int safe, int rtt_from_sack)
2432{
2433	/*-
2434	 * given an association and the starting time of the current RTT
2435	 * period (in value1/value2) return RTO in number of msecs.
2436	 */
2437	int32_t rtt;		/* RTT in ms */
2438	uint32_t new_rto;
2439	int first_measure = 0;
2440	struct timeval now, then, *old;
2441
2442	/* Copy it out for sparc64 */
2443	if (safe == sctp_align_unsafe_makecopy) {
2444		old = &then;
2445		memcpy(&then, told, sizeof(struct timeval));
2446	} else if (safe == sctp_align_safe_nocopy) {
2447		old = told;
2448	} else {
2449		/* error */
2450		SCTP_PRINTF("Huh, bad rto calc call\n");
2451		return (0);
2452	}
2453	/************************/
2454	/* 1. calculate new RTT */
2455	/************************/
2456	/* get the current time */
2457	if (stcb->asoc.use_precise_time) {
2458		(void)SCTP_GETPTIME_TIMEVAL(&now);
2459	} else {
2460		(void)SCTP_GETTIME_TIMEVAL(&now);
2461	}
2462	timevalsub(&now, old);
2463	/* store the current RTT in us */
2464	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2465	        (uint64_t) now.tv_usec;
2466
2467	/* compute rtt in ms */
2468	rtt = (int32_t) (net->rtt / 1000);
2469	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2470		/*
2471		 * Tell the CC module that a new update has just occurred
2472		 * from a sack
2473		 */
2474		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2475	}
2476	/*
2477	 * Do we need to determine the lan? We do this only on sacks i.e.
2478	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2479	 */
2480	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2481	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2482		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2483			net->lan_type = SCTP_LAN_INTERNET;
2484		} else {
2485			net->lan_type = SCTP_LAN_LOCAL;
2486		}
2487	}
2488	/***************************/
2489	/* 2. update RTTVAR & SRTT */
2490	/***************************/
2491	/*-
2492	 * Compute the scaled average lastsa and the
2493	 * scaled variance lastsv as described in van Jacobson
2494	 * Paper "Congestion Avoidance and Control", Annex A.
2495	 *
2496	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2497	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2498	 */
2499	if (net->RTO_measured) {
2500		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2501		net->lastsa += rtt;
2502		if (rtt < 0) {
2503			rtt = -rtt;
2504		}
2505		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2506		net->lastsv += rtt;
2507		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2508			rto_logging(net, SCTP_LOG_RTTVAR);
2509		}
2510	} else {
2511		/* First RTO measurment */
2512		net->RTO_measured = 1;
2513		first_measure = 1;
2514		net->lastsa = rtt << SCTP_RTT_SHIFT;
2515		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2516		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2517			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2518		}
2519	}
2520	if (net->lastsv == 0) {
2521		net->lastsv = SCTP_CLOCK_GRANULARITY;
2522	}
2523	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2524	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2525	    (stcb->asoc.sat_network_lockout == 0)) {
2526		stcb->asoc.sat_network = 1;
2527	} else if ((!first_measure) && stcb->asoc.sat_network) {
2528		stcb->asoc.sat_network = 0;
2529		stcb->asoc.sat_network_lockout = 1;
2530	}
2531	/* bound it, per C6/C7 in Section 5.3.1 */
2532	if (new_rto < stcb->asoc.minrto) {
2533		new_rto = stcb->asoc.minrto;
2534	}
2535	if (new_rto > stcb->asoc.maxrto) {
2536		new_rto = stcb->asoc.maxrto;
2537	}
2538	/* we are now returning the RTO */
2539	return (new_rto);
2540}
2541
2542/*
2543 * return a pointer to a contiguous piece of data from the given mbuf chain
2544 * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2545 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2546 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2547 */
2548caddr_t
2549sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2550{
2551	uint32_t count;
2552	uint8_t *ptr;
2553
2554	ptr = in_ptr;
2555	if ((off < 0) || (len <= 0))
2556		return (NULL);
2557
2558	/* find the desired start location */
2559	while ((m != NULL) && (off > 0)) {
2560		if (off < SCTP_BUF_LEN(m))
2561			break;
2562		off -= SCTP_BUF_LEN(m);
2563		m = SCTP_BUF_NEXT(m);
2564	}
2565	if (m == NULL)
2566		return (NULL);
2567
2568	/* is the current mbuf large enough (eg. contiguous)? */
2569	if ((SCTP_BUF_LEN(m) - off) >= len) {
2570		return (mtod(m, caddr_t)+off);
2571	} else {
2572		/* else, it spans more than one mbuf, so save a temp copy... */
2573		while ((m != NULL) && (len > 0)) {
2574			count = min(SCTP_BUF_LEN(m) - off, len);
2575			bcopy(mtod(m, caddr_t)+off, ptr, count);
2576			len -= count;
2577			ptr += count;
2578			off = 0;
2579			m = SCTP_BUF_NEXT(m);
2580		}
2581		if ((m == NULL) && (len > 0))
2582			return (NULL);
2583		else
2584			return ((caddr_t)in_ptr);
2585	}
2586}
2587
2588
2589
2590struct sctp_paramhdr *
2591sctp_get_next_param(struct mbuf *m,
2592    int offset,
2593    struct sctp_paramhdr *pull,
2594    int pull_limit)
2595{
2596	/* This just provides a typed signature to Peter's Pull routine */
2597	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2598	    (uint8_t *) pull));
2599}
2600
2601
2602struct mbuf *
2603sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2604{
2605	struct mbuf *m_last;
2606	caddr_t dp;
2607
2608	if (padlen > 3) {
2609		return (NULL);
2610	}
2611	if (padlen <= M_TRAILINGSPACE(m)) {
2612		/*
2613		 * The easy way. We hope the majority of the time we hit
2614		 * here :)
2615		 */
2616		m_last = m;
2617	} else {
2618		/* Hard way we must grow the mbuf chain */
2619		m_last = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA);
2620		if (m_last == NULL) {
2621			return (NULL);
2622		}
2623		SCTP_BUF_LEN(m_last) = 0;
2624		SCTP_BUF_NEXT(m_last) = NULL;
2625		SCTP_BUF_NEXT(m) = m_last;
2626	}
2627	dp = mtod(m_last, caddr_t)+SCTP_BUF_LEN(m_last);
2628	SCTP_BUF_LEN(m_last) += padlen;
2629	memset(dp, 0, padlen);
2630	return (m_last);
2631}
2632
2633struct mbuf *
2634sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2635{
2636	/* find the last mbuf in chain and pad it */
2637	struct mbuf *m_at;
2638
2639	if (last_mbuf != NULL) {
2640		return (sctp_add_pad_tombuf(last_mbuf, padval));
2641	} else {
2642		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2643			if (SCTP_BUF_NEXT(m_at) == NULL) {
2644				return (sctp_add_pad_tombuf(m_at, padval));
2645			}
2646		}
2647	}
2648	return (NULL);
2649}
2650
2651static void
2652sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2653    uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2654#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2655    SCTP_UNUSED
2656#endif
2657)
2658{
2659	struct mbuf *m_notify;
2660	struct sctp_assoc_change *sac;
2661	struct sctp_queued_to_read *control;
2662	size_t notif_len, abort_len;
2663	unsigned int i;
2664
2665#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2666	struct socket *so;
2667
2668#endif
2669
2670	if (stcb == NULL) {
2671		return;
2672	}
2673	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2674		notif_len = sizeof(struct sctp_assoc_change);
2675		if (abort != NULL) {
2676			abort_len = ntohs(abort->ch.chunk_length);
2677		} else {
2678			abort_len = 0;
2679		}
2680		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2681			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2682		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2683			notif_len += abort_len;
2684		}
2685		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2686		if (m_notify == NULL) {
2687			/* Retry with smaller value. */
2688			notif_len = sizeof(struct sctp_assoc_change);
2689			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
2690			if (m_notify == NULL) {
2691				goto set_error;
2692			}
2693		}
2694		SCTP_BUF_NEXT(m_notify) = NULL;
2695		sac = mtod(m_notify, struct sctp_assoc_change *);
2696		memset(sac, 0, notif_len);
2697		sac->sac_type = SCTP_ASSOC_CHANGE;
2698		sac->sac_flags = 0;
2699		sac->sac_length = sizeof(struct sctp_assoc_change);
2700		sac->sac_state = state;
2701		sac->sac_error = error;
2702		/* XXX verify these stream counts */
2703		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2704		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2705		sac->sac_assoc_id = sctp_get_associd(stcb);
2706		if (notif_len > sizeof(struct sctp_assoc_change)) {
2707			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2708				i = 0;
2709				if (stcb->asoc.prsctp_supported == 1) {
2710					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2711				}
2712				if (stcb->asoc.auth_supported == 1) {
2713					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2714				}
2715				if (stcb->asoc.asconf_supported == 1) {
2716					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2717				}
2718				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2719				if (stcb->asoc.reconfig_supported == 1) {
2720					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2721				}
2722				sac->sac_length += i;
2723			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2724				memcpy(sac->sac_info, abort, abort_len);
2725				sac->sac_length += abort_len;
2726			}
2727		}
2728		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2729		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2730		    0, 0, stcb->asoc.context, 0, 0, 0,
2731		    m_notify);
2732		if (control != NULL) {
2733			control->length = SCTP_BUF_LEN(m_notify);
2734			/* not that we need this */
2735			control->tail_mbuf = m_notify;
2736			control->spec_flags = M_NOTIFICATION;
2737			sctp_add_to_readq(stcb->sctp_ep, stcb,
2738			    control,
2739			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2740			    so_locked);
2741		} else {
2742			sctp_m_freem(m_notify);
2743		}
2744	}
2745	/*
2746	 * For 1-to-1 style sockets, we send up and error when an ABORT
2747	 * comes in.
2748	 */
2749set_error:
2750	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2751	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2752	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2753		SOCK_LOCK(stcb->sctp_socket);
2754		if (from_peer) {
2755			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2756				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2757				stcb->sctp_socket->so_error = ECONNREFUSED;
2758			} else {
2759				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2760				stcb->sctp_socket->so_error = ECONNRESET;
2761			}
2762		} else {
2763			if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2764			    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2765				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT);
2766				stcb->sctp_socket->so_error = ETIMEDOUT;
2767			} else {
2768				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2769				stcb->sctp_socket->so_error = ECONNABORTED;
2770			}
2771		}
2772	}
2773	/* Wake ANY sleepers */
2774#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2775	so = SCTP_INP_SO(stcb->sctp_ep);
2776	if (!so_locked) {
2777		atomic_add_int(&stcb->asoc.refcnt, 1);
2778		SCTP_TCB_UNLOCK(stcb);
2779		SCTP_SOCKET_LOCK(so, 1);
2780		SCTP_TCB_LOCK(stcb);
2781		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2782		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2783			SCTP_SOCKET_UNLOCK(so, 1);
2784			return;
2785		}
2786	}
2787#endif
2788	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2789	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2790	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2791		socantrcvmore_locked(stcb->sctp_socket);
2792	}
2793	sorwakeup(stcb->sctp_socket);
2794	sowwakeup(stcb->sctp_socket);
2795#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2796	if (!so_locked) {
2797		SCTP_SOCKET_UNLOCK(so, 1);
2798	}
2799#endif
2800}
2801
2802static void
2803sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2804    struct sockaddr *sa, uint32_t error, int so_locked
2805#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2806    SCTP_UNUSED
2807#endif
2808)
2809{
2810	struct mbuf *m_notify;
2811	struct sctp_paddr_change *spc;
2812	struct sctp_queued_to_read *control;
2813
2814	if ((stcb == NULL) ||
2815	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2816		/* event not enabled */
2817		return;
2818	}
2819	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA);
2820	if (m_notify == NULL)
2821		return;
2822	SCTP_BUF_LEN(m_notify) = 0;
2823	spc = mtod(m_notify, struct sctp_paddr_change *);
2824	memset(spc, 0, sizeof(struct sctp_paddr_change));
2825	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2826	spc->spc_flags = 0;
2827	spc->spc_length = sizeof(struct sctp_paddr_change);
2828	switch (sa->sa_family) {
2829#ifdef INET
2830	case AF_INET:
2831#ifdef INET6
2832		if (sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
2833			in6_sin_2_v4mapsin6((struct sockaddr_in *)sa,
2834			    (struct sockaddr_in6 *)&spc->spc_aaddr);
2835		} else {
2836			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2837		}
2838#else
2839		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2840#endif
2841		break;
2842#endif
2843#ifdef INET6
2844	case AF_INET6:
2845		{
2846			struct sockaddr_in6 *sin6;
2847
2848			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2849
2850			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2851			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2852				if (sin6->sin6_scope_id == 0) {
2853					/* recover scope_id for user */
2854					(void)sa6_recoverscope(sin6);
2855				} else {
2856					/* clear embedded scope_id for user */
2857					in6_clearscope(&sin6->sin6_addr);
2858				}
2859			}
2860			break;
2861		}
2862#endif
2863	default:
2864		/* TSNH */
2865		break;
2866	}
2867	spc->spc_state = state;
2868	spc->spc_error = error;
2869	spc->spc_assoc_id = sctp_get_associd(stcb);
2870
2871	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2872	SCTP_BUF_NEXT(m_notify) = NULL;
2873
2874	/* append to socket */
2875	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2876	    0, 0, stcb->asoc.context, 0, 0, 0,
2877	    m_notify);
2878	if (control == NULL) {
2879		/* no memory */
2880		sctp_m_freem(m_notify);
2881		return;
2882	}
2883	control->length = SCTP_BUF_LEN(m_notify);
2884	control->spec_flags = M_NOTIFICATION;
2885	/* not that we need this */
2886	control->tail_mbuf = m_notify;
2887	sctp_add_to_readq(stcb->sctp_ep, stcb,
2888	    control,
2889	    &stcb->sctp_socket->so_rcv, 1,
2890	    SCTP_READ_LOCK_NOT_HELD,
2891	    so_locked);
2892}
2893
2894
2895static void
2896sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2897    struct sctp_tmit_chunk *chk, int so_locked
2898#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2899    SCTP_UNUSED
2900#endif
2901)
2902{
2903	struct mbuf *m_notify;
2904	struct sctp_send_failed *ssf;
2905	struct sctp_send_failed_event *ssfe;
2906	struct sctp_queued_to_read *control;
2907	int length;
2908
2909	if ((stcb == NULL) ||
2910	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2911	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2912		/* event not enabled */
2913		return;
2914	}
2915	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2916		length = sizeof(struct sctp_send_failed_event);
2917	} else {
2918		length = sizeof(struct sctp_send_failed);
2919	}
2920	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
2921	if (m_notify == NULL)
2922		/* no space left */
2923		return;
2924	SCTP_BUF_LEN(m_notify) = 0;
2925	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2926		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2927		memset(ssfe, 0, length);
2928		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2929		if (sent) {
2930			ssfe->ssfe_flags = SCTP_DATA_SENT;
2931		} else {
2932			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2933		}
2934		length += chk->send_size;
2935		length -= sizeof(struct sctp_data_chunk);
2936		ssfe->ssfe_length = length;
2937		ssfe->ssfe_error = error;
2938		/* not exactly what the user sent in, but should be close :) */
2939		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2940		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2941		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2942		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2943		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2944		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2945		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2946	} else {
2947		ssf = mtod(m_notify, struct sctp_send_failed *);
2948		memset(ssf, 0, length);
2949		ssf->ssf_type = SCTP_SEND_FAILED;
2950		if (sent) {
2951			ssf->ssf_flags = SCTP_DATA_SENT;
2952		} else {
2953			ssf->ssf_flags = SCTP_DATA_UNSENT;
2954		}
2955		length += chk->send_size;
2956		length -= sizeof(struct sctp_data_chunk);
2957		ssf->ssf_length = length;
2958		ssf->ssf_error = error;
2959		/* not exactly what the user sent in, but should be close :) */
2960		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2961		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2962		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2963		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2964		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2965		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2966		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2967		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2968		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2969	}
2970	if (chk->data) {
2971		/*
2972		 * trim off the sctp chunk header(it should be there)
2973		 */
2974		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2975			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2976			sctp_mbuf_crush(chk->data);
2977			chk->send_size -= sizeof(struct sctp_data_chunk);
2978		}
2979	}
2980	SCTP_BUF_NEXT(m_notify) = chk->data;
2981	/* Steal off the mbuf */
2982	chk->data = NULL;
2983	/*
2984	 * For this case, we check the actual socket buffer, since the assoc
2985	 * is going away we don't want to overfill the socket buffer for a
2986	 * non-reader
2987	 */
2988	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2989		sctp_m_freem(m_notify);
2990		return;
2991	}
2992	/* append to socket */
2993	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2994	    0, 0, stcb->asoc.context, 0, 0, 0,
2995	    m_notify);
2996	if (control == NULL) {
2997		/* no memory */
2998		sctp_m_freem(m_notify);
2999		return;
3000	}
3001	control->spec_flags = M_NOTIFICATION;
3002	sctp_add_to_readq(stcb->sctp_ep, stcb,
3003	    control,
3004	    &stcb->sctp_socket->so_rcv, 1,
3005	    SCTP_READ_LOCK_NOT_HELD,
3006	    so_locked);
3007}
3008
3009
3010static void
3011sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3012    struct sctp_stream_queue_pending *sp, int so_locked
3013#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3014    SCTP_UNUSED
3015#endif
3016)
3017{
3018	struct mbuf *m_notify;
3019	struct sctp_send_failed *ssf;
3020	struct sctp_send_failed_event *ssfe;
3021	struct sctp_queued_to_read *control;
3022	int length;
3023
3024	if ((stcb == NULL) ||
3025	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
3026	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
3027		/* event not enabled */
3028		return;
3029	}
3030	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3031		length = sizeof(struct sctp_send_failed_event);
3032	} else {
3033		length = sizeof(struct sctp_send_failed);
3034	}
3035	m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA);
3036	if (m_notify == NULL) {
3037		/* no space left */
3038		return;
3039	}
3040	SCTP_BUF_LEN(m_notify) = 0;
3041	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
3042		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
3043		memset(ssfe, 0, length);
3044		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
3045		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
3046		length += sp->length;
3047		ssfe->ssfe_length = length;
3048		ssfe->ssfe_error = error;
3049		/* not exactly what the user sent in, but should be close :) */
3050		ssfe->ssfe_info.snd_sid = sp->stream;
3051		if (sp->some_taken) {
3052			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
3053		} else {
3054			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
3055		}
3056		ssfe->ssfe_info.snd_ppid = sp->ppid;
3057		ssfe->ssfe_info.snd_context = sp->context;
3058		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
3059		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
3060		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
3061	} else {
3062		ssf = mtod(m_notify, struct sctp_send_failed *);
3063		memset(ssf, 0, length);
3064		ssf->ssf_type = SCTP_SEND_FAILED;
3065		ssf->ssf_flags = SCTP_DATA_UNSENT;
3066		length += sp->length;
3067		ssf->ssf_length = length;
3068		ssf->ssf_error = error;
3069		/* not exactly what the user sent in, but should be close :) */
3070		ssf->ssf_info.sinfo_stream = sp->stream;
3071		ssf->ssf_info.sinfo_ssn = 0;
3072		if (sp->some_taken) {
3073			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3074		} else {
3075			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3076		}
3077		ssf->ssf_info.sinfo_ppid = sp->ppid;
3078		ssf->ssf_info.sinfo_context = sp->context;
3079		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3080		ssf->ssf_assoc_id = sctp_get_associd(stcb);
3081		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3082	}
3083	SCTP_BUF_NEXT(m_notify) = sp->data;
3084
3085	/* Steal off the mbuf */
3086	sp->data = NULL;
3087	/*
3088	 * For this case, we check the actual socket buffer, since the assoc
3089	 * is going away we don't want to overfill the socket buffer for a
3090	 * non-reader
3091	 */
3092	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3093		sctp_m_freem(m_notify);
3094		return;
3095	}
3096	/* append to socket */
3097	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3098	    0, 0, stcb->asoc.context, 0, 0, 0,
3099	    m_notify);
3100	if (control == NULL) {
3101		/* no memory */
3102		sctp_m_freem(m_notify);
3103		return;
3104	}
3105	control->spec_flags = M_NOTIFICATION;
3106	sctp_add_to_readq(stcb->sctp_ep, stcb,
3107	    control,
3108	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3109}
3110
3111
3112
3113static void
3114sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3115{
3116	struct mbuf *m_notify;
3117	struct sctp_adaptation_event *sai;
3118	struct sctp_queued_to_read *control;
3119
3120	if ((stcb == NULL) ||
3121	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3122		/* event not enabled */
3123		return;
3124	}
3125	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA);
3126	if (m_notify == NULL)
3127		/* no space left */
3128		return;
3129	SCTP_BUF_LEN(m_notify) = 0;
3130	sai = mtod(m_notify, struct sctp_adaptation_event *);
3131	memset(sai, 0, sizeof(struct sctp_adaptation_event));
3132	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3133	sai->sai_flags = 0;
3134	sai->sai_length = sizeof(struct sctp_adaptation_event);
3135	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3136	sai->sai_assoc_id = sctp_get_associd(stcb);
3137
3138	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3139	SCTP_BUF_NEXT(m_notify) = NULL;
3140
3141	/* append to socket */
3142	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3143	    0, 0, stcb->asoc.context, 0, 0, 0,
3144	    m_notify);
3145	if (control == NULL) {
3146		/* no memory */
3147		sctp_m_freem(m_notify);
3148		return;
3149	}
3150	control->length = SCTP_BUF_LEN(m_notify);
3151	control->spec_flags = M_NOTIFICATION;
3152	/* not that we need this */
3153	control->tail_mbuf = m_notify;
3154	sctp_add_to_readq(stcb->sctp_ep, stcb,
3155	    control,
3156	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3157}
3158
3159/* This always must be called with the read-queue LOCKED in the INP */
3160static void
3161sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3162    uint32_t val, int so_locked
3163#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3164    SCTP_UNUSED
3165#endif
3166)
3167{
3168	struct mbuf *m_notify;
3169	struct sctp_pdapi_event *pdapi;
3170	struct sctp_queued_to_read *control;
3171	struct sockbuf *sb;
3172
3173	if ((stcb == NULL) ||
3174	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3175		/* event not enabled */
3176		return;
3177	}
3178	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3179		return;
3180	}
3181	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA);
3182	if (m_notify == NULL)
3183		/* no space left */
3184		return;
3185	SCTP_BUF_LEN(m_notify) = 0;
3186	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3187	memset(pdapi, 0, sizeof(struct sctp_pdapi_event));
3188	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3189	pdapi->pdapi_flags = 0;
3190	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3191	pdapi->pdapi_indication = error;
3192	pdapi->pdapi_stream = (val >> 16);
3193	pdapi->pdapi_seq = (val & 0x0000ffff);
3194	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3195
3196	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3197	SCTP_BUF_NEXT(m_notify) = NULL;
3198	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3199	    0, 0, stcb->asoc.context, 0, 0, 0,
3200	    m_notify);
3201	if (control == NULL) {
3202		/* no memory */
3203		sctp_m_freem(m_notify);
3204		return;
3205	}
3206	control->spec_flags = M_NOTIFICATION;
3207	control->length = SCTP_BUF_LEN(m_notify);
3208	/* not that we need this */
3209	control->tail_mbuf = m_notify;
3210	control->held_length = 0;
3211	control->length = 0;
3212	sb = &stcb->sctp_socket->so_rcv;
3213	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3214		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3215	}
3216	sctp_sballoc(stcb, sb, m_notify);
3217	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3218		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3219	}
3220	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3221	control->end_added = 1;
3222	if (stcb->asoc.control_pdapi)
3223		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3224	else {
3225		/* we really should not see this case */
3226		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3227	}
3228	if (stcb->sctp_ep && stcb->sctp_socket) {
3229		/* This should always be the case */
3230#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3231		struct socket *so;
3232
3233		so = SCTP_INP_SO(stcb->sctp_ep);
3234		if (!so_locked) {
3235			atomic_add_int(&stcb->asoc.refcnt, 1);
3236			SCTP_TCB_UNLOCK(stcb);
3237			SCTP_SOCKET_LOCK(so, 1);
3238			SCTP_TCB_LOCK(stcb);
3239			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3240			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3241				SCTP_SOCKET_UNLOCK(so, 1);
3242				return;
3243			}
3244		}
3245#endif
3246		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3247#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3248		if (!so_locked) {
3249			SCTP_SOCKET_UNLOCK(so, 1);
3250		}
3251#endif
3252	}
3253}
3254
3255static void
3256sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3257{
3258	struct mbuf *m_notify;
3259	struct sctp_shutdown_event *sse;
3260	struct sctp_queued_to_read *control;
3261
3262	/*
3263	 * For TCP model AND UDP connected sockets we will send an error up
3264	 * when an SHUTDOWN completes
3265	 */
3266	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3267	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3268		/* mark socket closed for read/write and wakeup! */
3269#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3270		struct socket *so;
3271
3272		so = SCTP_INP_SO(stcb->sctp_ep);
3273		atomic_add_int(&stcb->asoc.refcnt, 1);
3274		SCTP_TCB_UNLOCK(stcb);
3275		SCTP_SOCKET_LOCK(so, 1);
3276		SCTP_TCB_LOCK(stcb);
3277		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3278		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3279			SCTP_SOCKET_UNLOCK(so, 1);
3280			return;
3281		}
3282#endif
3283		socantsendmore(stcb->sctp_socket);
3284#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3285		SCTP_SOCKET_UNLOCK(so, 1);
3286#endif
3287	}
3288	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3289		/* event not enabled */
3290		return;
3291	}
3292	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA);
3293	if (m_notify == NULL)
3294		/* no space left */
3295		return;
3296	sse = mtod(m_notify, struct sctp_shutdown_event *);
3297	memset(sse, 0, sizeof(struct sctp_shutdown_event));
3298	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3299	sse->sse_flags = 0;
3300	sse->sse_length = sizeof(struct sctp_shutdown_event);
3301	sse->sse_assoc_id = sctp_get_associd(stcb);
3302
3303	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3304	SCTP_BUF_NEXT(m_notify) = NULL;
3305
3306	/* append to socket */
3307	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3308	    0, 0, stcb->asoc.context, 0, 0, 0,
3309	    m_notify);
3310	if (control == NULL) {
3311		/* no memory */
3312		sctp_m_freem(m_notify);
3313		return;
3314	}
3315	control->spec_flags = M_NOTIFICATION;
3316	control->length = SCTP_BUF_LEN(m_notify);
3317	/* not that we need this */
3318	control->tail_mbuf = m_notify;
3319	sctp_add_to_readq(stcb->sctp_ep, stcb,
3320	    control,
3321	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3322}
3323
3324static void
3325sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3326    int so_locked
3327#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3328    SCTP_UNUSED
3329#endif
3330)
3331{
3332	struct mbuf *m_notify;
3333	struct sctp_sender_dry_event *event;
3334	struct sctp_queued_to_read *control;
3335
3336	if ((stcb == NULL) ||
3337	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3338		/* event not enabled */
3339		return;
3340	}
3341	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA);
3342	if (m_notify == NULL) {
3343		/* no space left */
3344		return;
3345	}
3346	SCTP_BUF_LEN(m_notify) = 0;
3347	event = mtod(m_notify, struct sctp_sender_dry_event *);
3348	memset(event, 0, sizeof(struct sctp_sender_dry_event));
3349	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3350	event->sender_dry_flags = 0;
3351	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3352	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3353
3354	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3355	SCTP_BUF_NEXT(m_notify) = NULL;
3356
3357	/* append to socket */
3358	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3359	    0, 0, stcb->asoc.context, 0, 0, 0,
3360	    m_notify);
3361	if (control == NULL) {
3362		/* no memory */
3363		sctp_m_freem(m_notify);
3364		return;
3365	}
3366	control->length = SCTP_BUF_LEN(m_notify);
3367	control->spec_flags = M_NOTIFICATION;
3368	/* not that we need this */
3369	control->tail_mbuf = m_notify;
3370	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3371	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3372}
3373
3374
3375void
3376sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3377{
3378	struct mbuf *m_notify;
3379	struct sctp_queued_to_read *control;
3380	struct sctp_stream_change_event *stradd;
3381
3382	if ((stcb == NULL) ||
3383	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3384		/* event not enabled */
3385		return;
3386	}
3387	if ((stcb->asoc.peer_req_out) && flag) {
3388		/* Peer made the request, don't tell the local user */
3389		stcb->asoc.peer_req_out = 0;
3390		return;
3391	}
3392	stcb->asoc.peer_req_out = 0;
3393	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA);
3394	if (m_notify == NULL)
3395		/* no space left */
3396		return;
3397	SCTP_BUF_LEN(m_notify) = 0;
3398	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3399	memset(stradd, 0, sizeof(struct sctp_stream_change_event));
3400	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3401	stradd->strchange_flags = flag;
3402	stradd->strchange_length = sizeof(struct sctp_stream_change_event);
3403	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3404	stradd->strchange_instrms = numberin;
3405	stradd->strchange_outstrms = numberout;
3406	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event);
3407	SCTP_BUF_NEXT(m_notify) = NULL;
3408	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3409		/* no space */
3410		sctp_m_freem(m_notify);
3411		return;
3412	}
3413	/* append to socket */
3414	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3415	    0, 0, stcb->asoc.context, 0, 0, 0,
3416	    m_notify);
3417	if (control == NULL) {
3418		/* no memory */
3419		sctp_m_freem(m_notify);
3420		return;
3421	}
3422	control->spec_flags = M_NOTIFICATION;
3423	control->length = SCTP_BUF_LEN(m_notify);
3424	/* not that we need this */
3425	control->tail_mbuf = m_notify;
3426	sctp_add_to_readq(stcb->sctp_ep, stcb,
3427	    control,
3428	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3429}
3430
3431void
3432sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3433{
3434	struct mbuf *m_notify;
3435	struct sctp_queued_to_read *control;
3436	struct sctp_assoc_reset_event *strasoc;
3437
3438	if ((stcb == NULL) ||
3439	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3440		/* event not enabled */
3441		return;
3442	}
3443	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA);
3444	if (m_notify == NULL)
3445		/* no space left */
3446		return;
3447	SCTP_BUF_LEN(m_notify) = 0;
3448	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3449	memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event));
3450	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3451	strasoc->assocreset_flags = flag;
3452	strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event);
3453	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3454	strasoc->assocreset_local_tsn = sending_tsn;
3455	strasoc->assocreset_remote_tsn = recv_tsn;
3456	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event);
3457	SCTP_BUF_NEXT(m_notify) = NULL;
3458	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3459		/* no space */
3460		sctp_m_freem(m_notify);
3461		return;
3462	}
3463	/* append to socket */
3464	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3465	    0, 0, stcb->asoc.context, 0, 0, 0,
3466	    m_notify);
3467	if (control == NULL) {
3468		/* no memory */
3469		sctp_m_freem(m_notify);
3470		return;
3471	}
3472	control->spec_flags = M_NOTIFICATION;
3473	control->length = SCTP_BUF_LEN(m_notify);
3474	/* not that we need this */
3475	control->tail_mbuf = m_notify;
3476	sctp_add_to_readq(stcb->sctp_ep, stcb,
3477	    control,
3478	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3479}
3480
3481
3482
3483static void
3484sctp_notify_stream_reset(struct sctp_tcb *stcb,
3485    int number_entries, uint16_t * list, int flag)
3486{
3487	struct mbuf *m_notify;
3488	struct sctp_queued_to_read *control;
3489	struct sctp_stream_reset_event *strreset;
3490	int len;
3491
3492	if ((stcb == NULL) ||
3493	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3494		/* event not enabled */
3495		return;
3496	}
3497	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA);
3498	if (m_notify == NULL)
3499		/* no space left */
3500		return;
3501	SCTP_BUF_LEN(m_notify) = 0;
3502	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3503	if (len > M_TRAILINGSPACE(m_notify)) {
3504		/* never enough room */
3505		sctp_m_freem(m_notify);
3506		return;
3507	}
3508	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3509	memset(strreset, 0, len);
3510	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3511	strreset->strreset_flags = flag;
3512	strreset->strreset_length = len;
3513	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3514	if (number_entries) {
3515		int i;
3516
3517		for (i = 0; i < number_entries; i++) {
3518			strreset->strreset_stream_list[i] = ntohs(list[i]);
3519		}
3520	}
3521	SCTP_BUF_LEN(m_notify) = len;
3522	SCTP_BUF_NEXT(m_notify) = NULL;
3523	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3524		/* no space */
3525		sctp_m_freem(m_notify);
3526		return;
3527	}
3528	/* append to socket */
3529	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3530	    0, 0, stcb->asoc.context, 0, 0, 0,
3531	    m_notify);
3532	if (control == NULL) {
3533		/* no memory */
3534		sctp_m_freem(m_notify);
3535		return;
3536	}
3537	control->spec_flags = M_NOTIFICATION;
3538	control->length = SCTP_BUF_LEN(m_notify);
3539	/* not that we need this */
3540	control->tail_mbuf = m_notify;
3541	sctp_add_to_readq(stcb->sctp_ep, stcb,
3542	    control,
3543	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3544}
3545
3546
3547static void
3548sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3549{
3550	struct mbuf *m_notify;
3551	struct sctp_remote_error *sre;
3552	struct sctp_queued_to_read *control;
3553	size_t notif_len, chunk_len;
3554
3555	if ((stcb == NULL) ||
3556	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3557		return;
3558	}
3559	if (chunk != NULL) {
3560		chunk_len = ntohs(chunk->ch.chunk_length);
3561	} else {
3562		chunk_len = 0;
3563	}
3564	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3565	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3566	if (m_notify == NULL) {
3567		/* Retry with smaller value. */
3568		notif_len = sizeof(struct sctp_remote_error);
3569		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA);
3570		if (m_notify == NULL) {
3571			return;
3572		}
3573	}
3574	SCTP_BUF_NEXT(m_notify) = NULL;
3575	sre = mtod(m_notify, struct sctp_remote_error *);
3576	memset(sre, 0, notif_len);
3577	sre->sre_type = SCTP_REMOTE_ERROR;
3578	sre->sre_flags = 0;
3579	sre->sre_length = sizeof(struct sctp_remote_error);
3580	sre->sre_error = error;
3581	sre->sre_assoc_id = sctp_get_associd(stcb);
3582	if (notif_len > sizeof(struct sctp_remote_error)) {
3583		memcpy(sre->sre_data, chunk, chunk_len);
3584		sre->sre_length += chunk_len;
3585	}
3586	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3587	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3588	    0, 0, stcb->asoc.context, 0, 0, 0,
3589	    m_notify);
3590	if (control != NULL) {
3591		control->length = SCTP_BUF_LEN(m_notify);
3592		/* not that we need this */
3593		control->tail_mbuf = m_notify;
3594		control->spec_flags = M_NOTIFICATION;
3595		sctp_add_to_readq(stcb->sctp_ep, stcb,
3596		    control,
3597		    &stcb->sctp_socket->so_rcv, 1,
3598		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3599	} else {
3600		sctp_m_freem(m_notify);
3601	}
3602}
3603
3604
3605void
3606sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3607    uint32_t error, void *data, int so_locked
3608#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3609    SCTP_UNUSED
3610#endif
3611)
3612{
3613	if ((stcb == NULL) ||
3614	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3615	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3616	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3617		/* If the socket is gone we are out of here */
3618		return;
3619	}
3620	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3621		return;
3622	}
3623	if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3624	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) {
3625		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3626		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3627		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3628			/* Don't report these in front states */
3629			return;
3630		}
3631	}
3632	switch (notification) {
3633	case SCTP_NOTIFY_ASSOC_UP:
3634		if (stcb->asoc.assoc_up_sent == 0) {
3635			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3636			stcb->asoc.assoc_up_sent = 1;
3637		}
3638		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3639			sctp_notify_adaptation_layer(stcb);
3640		}
3641		if (stcb->asoc.auth_supported == 0) {
3642			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3643			    NULL, so_locked);
3644		}
3645		break;
3646	case SCTP_NOTIFY_ASSOC_DOWN:
3647		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3648		break;
3649	case SCTP_NOTIFY_INTERFACE_DOWN:
3650		{
3651			struct sctp_nets *net;
3652
3653			net = (struct sctp_nets *)data;
3654			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3655			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3656			break;
3657		}
3658	case SCTP_NOTIFY_INTERFACE_UP:
3659		{
3660			struct sctp_nets *net;
3661
3662			net = (struct sctp_nets *)data;
3663			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3664			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3665			break;
3666		}
3667	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3668		{
3669			struct sctp_nets *net;
3670
3671			net = (struct sctp_nets *)data;
3672			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3673			    (struct sockaddr *)&net->ro._l_addr, error, so_locked);
3674			break;
3675		}
3676	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3677		sctp_notify_send_failed2(stcb, error,
3678		    (struct sctp_stream_queue_pending *)data, so_locked);
3679		break;
3680	case SCTP_NOTIFY_SENT_DG_FAIL:
3681		sctp_notify_send_failed(stcb, 1, error,
3682		    (struct sctp_tmit_chunk *)data, so_locked);
3683		break;
3684	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3685		sctp_notify_send_failed(stcb, 0, error,
3686		    (struct sctp_tmit_chunk *)data, so_locked);
3687		break;
3688	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3689		{
3690			uint32_t val;
3691
3692			val = *((uint32_t *) data);
3693
3694			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3695			break;
3696		}
3697	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3698		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3699		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3700			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3701		} else {
3702			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3703		}
3704		break;
3705	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3706		if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3707		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) {
3708			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3709		} else {
3710			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3711		}
3712		break;
3713	case SCTP_NOTIFY_ASSOC_RESTART:
3714		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3715		if (stcb->asoc.auth_supported == 0) {
3716			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3717			    NULL, so_locked);
3718		}
3719		break;
3720	case SCTP_NOTIFY_STR_RESET_SEND:
3721		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3722		break;
3723	case SCTP_NOTIFY_STR_RESET_RECV:
3724		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3725		break;
3726	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3727		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3728		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3729		break;
3730	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3731		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3732		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3733		break;
3734	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3735		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3736		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3737		break;
3738	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3739		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3740		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3741		break;
3742	case SCTP_NOTIFY_ASCONF_ADD_IP:
3743		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3744		    error, so_locked);
3745		break;
3746	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3747		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3748		    error, so_locked);
3749		break;
3750	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3751		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3752		    error, so_locked);
3753		break;
3754	case SCTP_NOTIFY_PEER_SHUTDOWN:
3755		sctp_notify_shutdown_event(stcb);
3756		break;
3757	case SCTP_NOTIFY_AUTH_NEW_KEY:
3758		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3759		    (uint16_t) (uintptr_t) data,
3760		    so_locked);
3761		break;
3762	case SCTP_NOTIFY_AUTH_FREE_KEY:
3763		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3764		    (uint16_t) (uintptr_t) data,
3765		    so_locked);
3766		break;
3767	case SCTP_NOTIFY_NO_PEER_AUTH:
3768		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3769		    (uint16_t) (uintptr_t) data,
3770		    so_locked);
3771		break;
3772	case SCTP_NOTIFY_SENDER_DRY:
3773		sctp_notify_sender_dry_event(stcb, so_locked);
3774		break;
3775	case SCTP_NOTIFY_REMOTE_ERROR:
3776		sctp_notify_remote_error(stcb, error, data);
3777		break;
3778	default:
3779		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3780		    __FUNCTION__, notification, notification);
3781		break;
3782	}			/* end switch */
3783}
3784
3785void
3786sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3787#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3788    SCTP_UNUSED
3789#endif
3790)
3791{
3792	struct sctp_association *asoc;
3793	struct sctp_stream_out *outs;
3794	struct sctp_tmit_chunk *chk, *nchk;
3795	struct sctp_stream_queue_pending *sp, *nsp;
3796	int i;
3797
3798	if (stcb == NULL) {
3799		return;
3800	}
3801	asoc = &stcb->asoc;
3802	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3803		/* already being freed */
3804		return;
3805	}
3806	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3807	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3808	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3809		return;
3810	}
3811	/* now through all the gunk freeing chunks */
3812	if (holds_lock == 0) {
3813		SCTP_TCB_SEND_LOCK(stcb);
3814	}
3815	/* sent queue SHOULD be empty */
3816	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3817		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3818		asoc->sent_queue_cnt--;
3819		if (chk->sent != SCTP_DATAGRAM_NR_ACKED) {
3820			if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3821				asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3822#ifdef INVARIANTS
3823			} else {
3824				panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3825#endif
3826			}
3827		}
3828		if (chk->data != NULL) {
3829			sctp_free_bufspace(stcb, asoc, chk, 1);
3830			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3831			    error, chk, so_locked);
3832			if (chk->data) {
3833				sctp_m_freem(chk->data);
3834				chk->data = NULL;
3835			}
3836		}
3837		sctp_free_a_chunk(stcb, chk, so_locked);
3838		/* sa_ignore FREED_MEMORY */
3839	}
3840	/* pending send queue SHOULD be empty */
3841	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3842		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3843		asoc->send_queue_cnt--;
3844		if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
3845			asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
3846#ifdef INVARIANTS
3847		} else {
3848			panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
3849#endif
3850		}
3851		if (chk->data != NULL) {
3852			sctp_free_bufspace(stcb, asoc, chk, 1);
3853			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3854			    error, chk, so_locked);
3855			if (chk->data) {
3856				sctp_m_freem(chk->data);
3857				chk->data = NULL;
3858			}
3859		}
3860		sctp_free_a_chunk(stcb, chk, so_locked);
3861		/* sa_ignore FREED_MEMORY */
3862	}
3863	for (i = 0; i < asoc->streamoutcnt; i++) {
3864		/* For each stream */
3865		outs = &asoc->strmout[i];
3866		/* clean up any sends there */
3867		asoc->locked_on_sending = NULL;
3868		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3869			asoc->stream_queue_cnt--;
3870			TAILQ_REMOVE(&outs->outqueue, sp, next);
3871			sctp_free_spbufspace(stcb, asoc, sp);
3872			if (sp->data) {
3873				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3874				    error, (void *)sp, so_locked);
3875				if (sp->data) {
3876					sctp_m_freem(sp->data);
3877					sp->data = NULL;
3878					sp->tail_mbuf = NULL;
3879					sp->length = 0;
3880				}
3881			}
3882			if (sp->net) {
3883				sctp_free_remote_addr(sp->net);
3884				sp->net = NULL;
3885			}
3886			/* Free the chunk */
3887			sctp_free_a_strmoq(stcb, sp, so_locked);
3888			/* sa_ignore FREED_MEMORY */
3889		}
3890	}
3891
3892	if (holds_lock == 0) {
3893		SCTP_TCB_SEND_UNLOCK(stcb);
3894	}
3895}
3896
3897void
3898sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3899    struct sctp_abort_chunk *abort, int so_locked
3900#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3901    SCTP_UNUSED
3902#endif
3903)
3904{
3905	if (stcb == NULL) {
3906		return;
3907	}
3908	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3909	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3910	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3911		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3912	}
3913	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3914	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3915	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3916		return;
3917	}
3918	/* Tell them we lost the asoc */
3919	sctp_report_all_outbound(stcb, error, 1, so_locked);
3920	if (from_peer) {
3921		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3922	} else {
3923		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3924	}
3925}
3926
3927void
3928sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3929    struct mbuf *m, int iphlen,
3930    struct sockaddr *src, struct sockaddr *dst,
3931    struct sctphdr *sh, struct mbuf *op_err,
3932    uint8_t mflowtype, uint32_t mflowid,
3933    uint32_t vrf_id, uint16_t port)
3934{
3935	uint32_t vtag;
3936
3937#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3938	struct socket *so;
3939
3940#endif
3941
3942	vtag = 0;
3943	if (stcb != NULL) {
3944		/* We have a TCB to abort, send notification too */
3945		vtag = stcb->asoc.peer_vtag;
3946		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3947		/* get the assoc vrf id and table id */
3948		vrf_id = stcb->asoc.vrf_id;
3949		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3950	}
3951	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3952	    mflowtype, mflowid, inp->fibnum,
3953	    vrf_id, port);
3954	if (stcb != NULL) {
3955		/* Ok, now lets free it */
3956#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3957		so = SCTP_INP_SO(inp);
3958		atomic_add_int(&stcb->asoc.refcnt, 1);
3959		SCTP_TCB_UNLOCK(stcb);
3960		SCTP_SOCKET_LOCK(so, 1);
3961		SCTP_TCB_LOCK(stcb);
3962		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3963#endif
3964		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3965		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3966		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3967			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3968		}
3969		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
3970		    SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3971#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3972		SCTP_SOCKET_UNLOCK(so, 1);
3973#endif
3974	}
3975}
3976
3977#ifdef SCTP_ASOCLOG_OF_TSNS
3978void
3979sctp_print_out_track_log(struct sctp_tcb *stcb)
3980{
3981#ifdef NOSIY_PRINTS
3982	int i;
3983
3984	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3985	SCTP_PRINTF("IN bound TSN log-aaa\n");
3986	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3987		SCTP_PRINTF("None rcvd\n");
3988		goto none_in;
3989	}
3990	if (stcb->asoc.tsn_in_wrapped) {
3991		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3992			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3993			    stcb->asoc.in_tsnlog[i].tsn,
3994			    stcb->asoc.in_tsnlog[i].strm,
3995			    stcb->asoc.in_tsnlog[i].seq,
3996			    stcb->asoc.in_tsnlog[i].flgs,
3997			    stcb->asoc.in_tsnlog[i].sz);
3998		}
3999	}
4000	if (stcb->asoc.tsn_in_at) {
4001		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
4002			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4003			    stcb->asoc.in_tsnlog[i].tsn,
4004			    stcb->asoc.in_tsnlog[i].strm,
4005			    stcb->asoc.in_tsnlog[i].seq,
4006			    stcb->asoc.in_tsnlog[i].flgs,
4007			    stcb->asoc.in_tsnlog[i].sz);
4008		}
4009	}
4010none_in:
4011	SCTP_PRINTF("OUT bound TSN log-aaa\n");
4012	if ((stcb->asoc.tsn_out_at == 0) &&
4013	    (stcb->asoc.tsn_out_wrapped == 0)) {
4014		SCTP_PRINTF("None sent\n");
4015	}
4016	if (stcb->asoc.tsn_out_wrapped) {
4017		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
4018			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4019			    stcb->asoc.out_tsnlog[i].tsn,
4020			    stcb->asoc.out_tsnlog[i].strm,
4021			    stcb->asoc.out_tsnlog[i].seq,
4022			    stcb->asoc.out_tsnlog[i].flgs,
4023			    stcb->asoc.out_tsnlog[i].sz);
4024		}
4025	}
4026	if (stcb->asoc.tsn_out_at) {
4027		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
4028			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
4029			    stcb->asoc.out_tsnlog[i].tsn,
4030			    stcb->asoc.out_tsnlog[i].strm,
4031			    stcb->asoc.out_tsnlog[i].seq,
4032			    stcb->asoc.out_tsnlog[i].flgs,
4033			    stcb->asoc.out_tsnlog[i].sz);
4034		}
4035	}
4036#endif
4037}
4038
4039#endif
4040
4041void
4042sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4043    struct mbuf *op_err,
4044    int so_locked
4045#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4046    SCTP_UNUSED
4047#endif
4048)
4049{
4050#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4051	struct socket *so;
4052
4053#endif
4054
4055#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4056	so = SCTP_INP_SO(inp);
4057#endif
4058	if (stcb == NULL) {
4059		/* Got to have a TCB */
4060		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4061			if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4062				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4063				    SCTP_CALLED_DIRECTLY_NOCMPSET);
4064			}
4065		}
4066		return;
4067	} else {
4068		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
4069	}
4070	/* notify the ulp */
4071	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
4072		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
4073	}
4074	/* notify the peer */
4075	sctp_send_abort_tcb(stcb, op_err, so_locked);
4076	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
4077	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
4078	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4079		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4080	}
4081	/* now free the asoc */
4082#ifdef SCTP_ASOCLOG_OF_TSNS
4083	sctp_print_out_track_log(stcb);
4084#endif
4085#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4086	if (!so_locked) {
4087		atomic_add_int(&stcb->asoc.refcnt, 1);
4088		SCTP_TCB_UNLOCK(stcb);
4089		SCTP_SOCKET_LOCK(so, 1);
4090		SCTP_TCB_LOCK(stcb);
4091		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4092	}
4093#endif
4094	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
4095	    SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
4096#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4097	if (!so_locked) {
4098		SCTP_SOCKET_UNLOCK(so, 1);
4099	}
4100#endif
4101}
4102
4103void
4104sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4105    struct sockaddr *src, struct sockaddr *dst,
4106    struct sctphdr *sh, struct sctp_inpcb *inp,
4107    struct mbuf *cause,
4108    uint8_t mflowtype, uint32_t mflowid, uint16_t fibnum,
4109    uint32_t vrf_id, uint16_t port)
4110{
4111	struct sctp_chunkhdr *ch, chunk_buf;
4112	unsigned int chk_length;
4113	int contains_init_chunk;
4114
4115	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4116	/* Generate a TO address for future reference */
4117	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4118		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
4119			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4120			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4121		}
4122	}
4123	contains_init_chunk = 0;
4124	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4125	    sizeof(*ch), (uint8_t *) & chunk_buf);
4126	while (ch != NULL) {
4127		chk_length = ntohs(ch->chunk_length);
4128		if (chk_length < sizeof(*ch)) {
4129			/* break to abort land */
4130			break;
4131		}
4132		switch (ch->chunk_type) {
4133		case SCTP_INIT:
4134			contains_init_chunk = 1;
4135			break;
4136		case SCTP_PACKET_DROPPED:
4137			/* we don't respond to pkt-dropped */
4138			return;
4139		case SCTP_ABORT_ASSOCIATION:
4140			/* we don't respond with an ABORT to an ABORT */
4141			return;
4142		case SCTP_SHUTDOWN_COMPLETE:
4143			/*
4144			 * we ignore it since we are not waiting for it and
4145			 * peer is gone
4146			 */
4147			return;
4148		case SCTP_SHUTDOWN_ACK:
4149			sctp_send_shutdown_complete2(src, dst, sh,
4150			    mflowtype, mflowid, fibnum,
4151			    vrf_id, port);
4152			return;
4153		default:
4154			break;
4155		}
4156		offset += SCTP_SIZE32(chk_length);
4157		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4158		    sizeof(*ch), (uint8_t *) & chunk_buf);
4159	}
4160	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4161	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4162	    (contains_init_chunk == 0))) {
4163		sctp_send_abort(m, iphlen, src, dst, sh, 0, cause,
4164		    mflowtype, mflowid, fibnum,
4165		    vrf_id, port);
4166	}
4167}
4168
4169/*
4170 * check the inbound datagram to make sure there is not an abort inside it,
4171 * if there is return 1, else return 0.
4172 */
4173int
4174sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4175{
4176	struct sctp_chunkhdr *ch;
4177	struct sctp_init_chunk *init_chk, chunk_buf;
4178	int offset;
4179	unsigned int chk_length;
4180
4181	offset = iphlen + sizeof(struct sctphdr);
4182	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4183	    (uint8_t *) & chunk_buf);
4184	while (ch != NULL) {
4185		chk_length = ntohs(ch->chunk_length);
4186		if (chk_length < sizeof(*ch)) {
4187			/* packet is probably corrupt */
4188			break;
4189		}
4190		/* we seem to be ok, is it an abort? */
4191		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4192			/* yep, tell them */
4193			return (1);
4194		}
4195		if (ch->chunk_type == SCTP_INITIATION) {
4196			/* need to update the Vtag */
4197			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4198			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4199			if (init_chk != NULL) {
4200				*vtagfill = ntohl(init_chk->init.initiate_tag);
4201			}
4202		}
4203		/* Nope, move to the next chunk */
4204		offset += SCTP_SIZE32(chk_length);
4205		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4206		    sizeof(*ch), (uint8_t *) & chunk_buf);
4207	}
4208	return (0);
4209}
4210
4211/*
4212 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4213 * set (i.e. it's 0) so, create this function to compare link local scopes
4214 */
4215#ifdef INET6
4216uint32_t
4217sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4218{
4219	struct sockaddr_in6 a, b;
4220
4221	/* save copies */
4222	a = *addr1;
4223	b = *addr2;
4224
4225	if (a.sin6_scope_id == 0)
4226		if (sa6_recoverscope(&a)) {
4227			/* can't get scope, so can't match */
4228			return (0);
4229		}
4230	if (b.sin6_scope_id == 0)
4231		if (sa6_recoverscope(&b)) {
4232			/* can't get scope, so can't match */
4233			return (0);
4234		}
4235	if (a.sin6_scope_id != b.sin6_scope_id)
4236		return (0);
4237
4238	return (1);
4239}
4240
4241/*
4242 * returns a sockaddr_in6 with embedded scope recovered and removed
4243 */
4244struct sockaddr_in6 *
4245sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4246{
4247	/* check and strip embedded scope junk */
4248	if (addr->sin6_family == AF_INET6) {
4249		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4250			if (addr->sin6_scope_id == 0) {
4251				*store = *addr;
4252				if (!sa6_recoverscope(store)) {
4253					/* use the recovered scope */
4254					addr = store;
4255				}
4256			} else {
4257				/* else, return the original "to" addr */
4258				in6_clearscope(&addr->sin6_addr);
4259			}
4260		}
4261	}
4262	return (addr);
4263}
4264
4265#endif
4266
4267/*
4268 * are the two addresses the same?  currently a "scopeless" check returns: 1
4269 * if same, 0 if not
4270 */
4271int
4272sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4273{
4274
4275	/* must be valid */
4276	if (sa1 == NULL || sa2 == NULL)
4277		return (0);
4278
4279	/* must be the same family */
4280	if (sa1->sa_family != sa2->sa_family)
4281		return (0);
4282
4283	switch (sa1->sa_family) {
4284#ifdef INET6
4285	case AF_INET6:
4286		{
4287			/* IPv6 addresses */
4288			struct sockaddr_in6 *sin6_1, *sin6_2;
4289
4290			sin6_1 = (struct sockaddr_in6 *)sa1;
4291			sin6_2 = (struct sockaddr_in6 *)sa2;
4292			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4293			    sin6_2));
4294		}
4295#endif
4296#ifdef INET
4297	case AF_INET:
4298		{
4299			/* IPv4 addresses */
4300			struct sockaddr_in *sin_1, *sin_2;
4301
4302			sin_1 = (struct sockaddr_in *)sa1;
4303			sin_2 = (struct sockaddr_in *)sa2;
4304			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4305		}
4306#endif
4307	default:
4308		/* we don't do these... */
4309		return (0);
4310	}
4311}
4312
4313void
4314sctp_print_address(struct sockaddr *sa)
4315{
4316#ifdef INET6
4317	char ip6buf[INET6_ADDRSTRLEN];
4318
4319#endif
4320
4321	switch (sa->sa_family) {
4322#ifdef INET6
4323	case AF_INET6:
4324		{
4325			struct sockaddr_in6 *sin6;
4326
4327			sin6 = (struct sockaddr_in6 *)sa;
4328			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4329			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4330			    ntohs(sin6->sin6_port),
4331			    sin6->sin6_scope_id);
4332			break;
4333		}
4334#endif
4335#ifdef INET
4336	case AF_INET:
4337		{
4338			struct sockaddr_in *sin;
4339			unsigned char *p;
4340
4341			sin = (struct sockaddr_in *)sa;
4342			p = (unsigned char *)&sin->sin_addr;
4343			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4344			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4345			break;
4346		}
4347#endif
4348	default:
4349		SCTP_PRINTF("?\n");
4350		break;
4351	}
4352}
4353
4354void
4355sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4356    struct sctp_inpcb *new_inp,
4357    struct sctp_tcb *stcb,
4358    int waitflags)
4359{
4360	/*
4361	 * go through our old INP and pull off any control structures that
4362	 * belong to stcb and move then to the new inp.
4363	 */
4364	struct socket *old_so, *new_so;
4365	struct sctp_queued_to_read *control, *nctl;
4366	struct sctp_readhead tmp_queue;
4367	struct mbuf *m;
4368	int error = 0;
4369
4370	old_so = old_inp->sctp_socket;
4371	new_so = new_inp->sctp_socket;
4372	TAILQ_INIT(&tmp_queue);
4373	error = sblock(&old_so->so_rcv, waitflags);
4374	if (error) {
4375		/*
4376		 * Gak, can't get sblock, we have a problem. data will be
4377		 * left stranded.. and we don't dare look at it since the
4378		 * other thread may be reading something. Oh well, its a
4379		 * screwed up app that does a peeloff OR a accept while
4380		 * reading from the main socket... actually its only the
4381		 * peeloff() case, since I think read will fail on a
4382		 * listening socket..
4383		 */
4384		return;
4385	}
4386	/* lock the socket buffers */
4387	SCTP_INP_READ_LOCK(old_inp);
4388	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4389		/* Pull off all for out target stcb */
4390		if (control->stcb == stcb) {
4391			/* remove it we want it */
4392			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4393			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4394			m = control->data;
4395			while (m) {
4396				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4397					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4398				}
4399				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4400				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4401					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4402				}
4403				m = SCTP_BUF_NEXT(m);
4404			}
4405		}
4406	}
4407	SCTP_INP_READ_UNLOCK(old_inp);
4408	/* Remove the sb-lock on the old socket */
4409
4410	sbunlock(&old_so->so_rcv);
4411	/* Now we move them over to the new socket buffer */
4412	SCTP_INP_READ_LOCK(new_inp);
4413	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4414		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4415		m = control->data;
4416		while (m) {
4417			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4418				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4419			}
4420			sctp_sballoc(stcb, &new_so->so_rcv, m);
4421			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4422				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4423			}
4424			m = SCTP_BUF_NEXT(m);
4425		}
4426	}
4427	SCTP_INP_READ_UNLOCK(new_inp);
4428}
4429
4430void
4431sctp_add_to_readq(struct sctp_inpcb *inp,
4432    struct sctp_tcb *stcb,
4433    struct sctp_queued_to_read *control,
4434    struct sockbuf *sb,
4435    int end,
4436    int inp_read_lock_held,
4437    int so_locked
4438#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4439    SCTP_UNUSED
4440#endif
4441)
4442{
4443	/*
4444	 * Here we must place the control on the end of the socket read
4445	 * queue AND increment sb_cc so that select will work properly on
4446	 * read.
4447	 */
4448	struct mbuf *m, *prev = NULL;
4449
4450	if (inp == NULL) {
4451		/* Gak, TSNH!! */
4452#ifdef INVARIANTS
4453		panic("Gak, inp NULL on add_to_readq");
4454#endif
4455		return;
4456	}
4457	if (inp_read_lock_held == 0)
4458		SCTP_INP_READ_LOCK(inp);
4459	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4460		sctp_free_remote_addr(control->whoFrom);
4461		if (control->data) {
4462			sctp_m_freem(control->data);
4463			control->data = NULL;
4464		}
4465		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4466		if (inp_read_lock_held == 0)
4467			SCTP_INP_READ_UNLOCK(inp);
4468		return;
4469	}
4470	if (!(control->spec_flags & M_NOTIFICATION)) {
4471		atomic_add_int(&inp->total_recvs, 1);
4472		if (!control->do_not_ref_stcb) {
4473			atomic_add_int(&stcb->total_recvs, 1);
4474		}
4475	}
4476	m = control->data;
4477	control->held_length = 0;
4478	control->length = 0;
4479	while (m) {
4480		if (SCTP_BUF_LEN(m) == 0) {
4481			/* Skip mbufs with NO length */
4482			if (prev == NULL) {
4483				/* First one */
4484				control->data = sctp_m_free(m);
4485				m = control->data;
4486			} else {
4487				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4488				m = SCTP_BUF_NEXT(prev);
4489			}
4490			if (m == NULL) {
4491				control->tail_mbuf = prev;
4492			}
4493			continue;
4494		}
4495		prev = m;
4496		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4497			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4498		}
4499		sctp_sballoc(stcb, sb, m);
4500		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4501			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4502		}
4503		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4504		m = SCTP_BUF_NEXT(m);
4505	}
4506	if (prev != NULL) {
4507		control->tail_mbuf = prev;
4508	} else {
4509		/* Everything got collapsed out?? */
4510		sctp_free_remote_addr(control->whoFrom);
4511		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4512		if (inp_read_lock_held == 0)
4513			SCTP_INP_READ_UNLOCK(inp);
4514		return;
4515	}
4516	if (end) {
4517		control->end_added = 1;
4518	}
4519	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4520	if (inp_read_lock_held == 0)
4521		SCTP_INP_READ_UNLOCK(inp);
4522	if (inp && inp->sctp_socket) {
4523		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4524			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4525		} else {
4526#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4527			struct socket *so;
4528
4529			so = SCTP_INP_SO(inp);
4530			if (!so_locked) {
4531				if (stcb) {
4532					atomic_add_int(&stcb->asoc.refcnt, 1);
4533					SCTP_TCB_UNLOCK(stcb);
4534				}
4535				SCTP_SOCKET_LOCK(so, 1);
4536				if (stcb) {
4537					SCTP_TCB_LOCK(stcb);
4538					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4539				}
4540				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4541					SCTP_SOCKET_UNLOCK(so, 1);
4542					return;
4543				}
4544			}
4545#endif
4546			sctp_sorwakeup(inp, inp->sctp_socket);
4547#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4548			if (!so_locked) {
4549				SCTP_SOCKET_UNLOCK(so, 1);
4550			}
4551#endif
4552		}
4553	}
4554}
4555
4556
4557int
4558sctp_append_to_readq(struct sctp_inpcb *inp,
4559    struct sctp_tcb *stcb,
4560    struct sctp_queued_to_read *control,
4561    struct mbuf *m,
4562    int end,
4563    int ctls_cumack,
4564    struct sockbuf *sb)
4565{
4566	/*
4567	 * A partial delivery API event is underway. OR we are appending on
4568	 * the reassembly queue.
4569	 *
4570	 * If PDAPI this means we need to add m to the end of the data.
4571	 * Increase the length in the control AND increment the sb_cc.
4572	 * Otherwise sb is NULL and all we need to do is put it at the end
4573	 * of the mbuf chain.
4574	 */
4575	int len = 0;
4576	struct mbuf *mm, *tail = NULL, *prev = NULL;
4577
4578	if (inp) {
4579		SCTP_INP_READ_LOCK(inp);
4580	}
4581	if (control == NULL) {
4582get_out:
4583		if (inp) {
4584			SCTP_INP_READ_UNLOCK(inp);
4585		}
4586		return (-1);
4587	}
4588	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4589		SCTP_INP_READ_UNLOCK(inp);
4590		return (0);
4591	}
4592	if (control->end_added) {
4593		/* huh this one is complete? */
4594		goto get_out;
4595	}
4596	mm = m;
4597	if (mm == NULL) {
4598		goto get_out;
4599	}
4600	while (mm) {
4601		if (SCTP_BUF_LEN(mm) == 0) {
4602			/* Skip mbufs with NO lenght */
4603			if (prev == NULL) {
4604				/* First one */
4605				m = sctp_m_free(mm);
4606				mm = m;
4607			} else {
4608				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4609				mm = SCTP_BUF_NEXT(prev);
4610			}
4611			continue;
4612		}
4613		prev = mm;
4614		len += SCTP_BUF_LEN(mm);
4615		if (sb) {
4616			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4617				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4618			}
4619			sctp_sballoc(stcb, sb, mm);
4620			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4621				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4622			}
4623		}
4624		mm = SCTP_BUF_NEXT(mm);
4625	}
4626	if (prev) {
4627		tail = prev;
4628	} else {
4629		/* Really there should always be a prev */
4630		if (m == NULL) {
4631			/* Huh nothing left? */
4632#ifdef INVARIANTS
4633			panic("Nothing left to add?");
4634#else
4635			goto get_out;
4636#endif
4637		}
4638		tail = m;
4639	}
4640	if (control->tail_mbuf) {
4641		/* append */
4642		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4643		control->tail_mbuf = tail;
4644	} else {
4645		/* nothing there */
4646#ifdef INVARIANTS
4647		if (control->data != NULL) {
4648			panic("This should NOT happen");
4649		}
4650#endif
4651		control->data = m;
4652		control->tail_mbuf = tail;
4653	}
4654	atomic_add_int(&control->length, len);
4655	if (end) {
4656		/* message is complete */
4657		if (stcb && (control == stcb->asoc.control_pdapi)) {
4658			stcb->asoc.control_pdapi = NULL;
4659		}
4660		control->held_length = 0;
4661		control->end_added = 1;
4662	}
4663	if (stcb == NULL) {
4664		control->do_not_ref_stcb = 1;
4665	}
4666	/*
4667	 * When we are appending in partial delivery, the cum-ack is used
4668	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4669	 * is populated in the outbound sinfo structure from the true cumack
4670	 * if the association exists...
4671	 */
4672	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4673	if (inp) {
4674		SCTP_INP_READ_UNLOCK(inp);
4675	}
4676	if (inp && inp->sctp_socket) {
4677		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4678			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4679		} else {
4680#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4681			struct socket *so;
4682
4683			so = SCTP_INP_SO(inp);
4684			if (stcb) {
4685				atomic_add_int(&stcb->asoc.refcnt, 1);
4686				SCTP_TCB_UNLOCK(stcb);
4687			}
4688			SCTP_SOCKET_LOCK(so, 1);
4689			if (stcb) {
4690				SCTP_TCB_LOCK(stcb);
4691				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4692			}
4693			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4694				SCTP_SOCKET_UNLOCK(so, 1);
4695				return (0);
4696			}
4697#endif
4698			sctp_sorwakeup(inp, inp->sctp_socket);
4699#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4700			SCTP_SOCKET_UNLOCK(so, 1);
4701#endif
4702		}
4703	}
4704	return (0);
4705}
4706
4707
4708
4709/*************HOLD THIS COMMENT FOR PATCH FILE OF
4710 *************ALTERNATE ROUTING CODE
4711 */
4712
4713/*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4714 *************ALTERNATE ROUTING CODE
4715 */
4716
4717struct mbuf *
4718sctp_generate_cause(uint16_t code, char *info)
4719{
4720	struct mbuf *m;
4721	struct sctp_gen_error_cause *cause;
4722	size_t info_len, len;
4723
4724	if ((code == 0) || (info == NULL)) {
4725		return (NULL);
4726	}
4727	info_len = strlen(info);
4728	len = sizeof(struct sctp_paramhdr) + info_len;
4729	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4730	if (m != NULL) {
4731		SCTP_BUF_LEN(m) = len;
4732		cause = mtod(m, struct sctp_gen_error_cause *);
4733		cause->code = htons(code);
4734		cause->length = htons((uint16_t) len);
4735		memcpy(cause->info, info, info_len);
4736	}
4737	return (m);
4738}
4739
4740struct mbuf *
4741sctp_generate_no_user_data_cause(uint32_t tsn)
4742{
4743	struct mbuf *m;
4744	struct sctp_error_no_user_data *no_user_data_cause;
4745	size_t len;
4746
4747	len = sizeof(struct sctp_error_no_user_data);
4748	m = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
4749	if (m != NULL) {
4750		SCTP_BUF_LEN(m) = len;
4751		no_user_data_cause = mtod(m, struct sctp_error_no_user_data *);
4752		no_user_data_cause->cause.code = htons(SCTP_CAUSE_NO_USER_DATA);
4753		no_user_data_cause->cause.length = htons((uint16_t) len);
4754		no_user_data_cause->tsn = tsn;	/* tsn is passed in as NBO */
4755	}
4756	return (m);
4757}
4758
4759#ifdef SCTP_MBCNT_LOGGING
4760void
4761sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4762    struct sctp_tmit_chunk *tp1, int chk_cnt)
4763{
4764	if (tp1->data == NULL) {
4765		return;
4766	}
4767	asoc->chunks_on_out_queue -= chk_cnt;
4768	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4769		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4770		    asoc->total_output_queue_size,
4771		    tp1->book_size,
4772		    0,
4773		    tp1->mbcnt);
4774	}
4775	if (asoc->total_output_queue_size >= tp1->book_size) {
4776		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4777	} else {
4778		asoc->total_output_queue_size = 0;
4779	}
4780
4781	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4782	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4783		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4784			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4785		} else {
4786			stcb->sctp_socket->so_snd.sb_cc = 0;
4787
4788		}
4789	}
4790}
4791
4792#endif
4793
4794int
4795sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4796    uint8_t sent, int so_locked
4797#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4798    SCTP_UNUSED
4799#endif
4800)
4801{
4802	struct sctp_stream_out *strq;
4803	struct sctp_tmit_chunk *chk = NULL, *tp2;
4804	struct sctp_stream_queue_pending *sp;
4805	uint16_t stream = 0, seq = 0;
4806	uint8_t foundeom = 0;
4807	int ret_sz = 0;
4808	int notdone;
4809	int do_wakeup_routine = 0;
4810
4811	stream = tp1->rec.data.stream_number;
4812	seq = tp1->rec.data.stream_seq;
4813	if (sent || !(tp1->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4814		stcb->asoc.abandoned_sent[0]++;
4815		stcb->asoc.abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4816		stcb->asoc.strmout[stream].abandoned_sent[0]++;
4817#if defined(SCTP_DETAILED_STR_STATS)
4818		stcb->asoc.strmout[stream].abandoned_sent[PR_SCTP_POLICY(tp1->flags)]++;
4819#endif
4820	} else {
4821		stcb->asoc.abandoned_unsent[0]++;
4822		stcb->asoc.abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4823		stcb->asoc.strmout[stream].abandoned_unsent[0]++;
4824#if defined(SCTP_DETAILED_STR_STATS)
4825		stcb->asoc.strmout[stream].abandoned_unsent[PR_SCTP_POLICY(tp1->flags)]++;
4826#endif
4827	}
4828	do {
4829		ret_sz += tp1->book_size;
4830		if (tp1->data != NULL) {
4831			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4832				sctp_flight_size_decrease(tp1);
4833				sctp_total_flight_decrease(stcb, tp1);
4834			}
4835			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4836			stcb->asoc.peers_rwnd += tp1->send_size;
4837			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4838			if (sent) {
4839				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4840			} else {
4841				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4842			}
4843			if (tp1->data) {
4844				sctp_m_freem(tp1->data);
4845				tp1->data = NULL;
4846			}
4847			do_wakeup_routine = 1;
4848			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4849				stcb->asoc.sent_queue_cnt_removeable--;
4850			}
4851		}
4852		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4853		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4854		    SCTP_DATA_NOT_FRAG) {
4855			/* not frag'ed we ae done   */
4856			notdone = 0;
4857			foundeom = 1;
4858		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4859			/* end of frag, we are done */
4860			notdone = 0;
4861			foundeom = 1;
4862		} else {
4863			/*
4864			 * Its a begin or middle piece, we must mark all of
4865			 * it
4866			 */
4867			notdone = 1;
4868			tp1 = TAILQ_NEXT(tp1, sctp_next);
4869		}
4870	} while (tp1 && notdone);
4871	if (foundeom == 0) {
4872		/*
4873		 * The multi-part message was scattered across the send and
4874		 * sent queue.
4875		 */
4876		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4877			if ((tp1->rec.data.stream_number != stream) ||
4878			    (tp1->rec.data.stream_seq != seq)) {
4879				break;
4880			}
4881			/*
4882			 * save to chk in case we have some on stream out
4883			 * queue. If so and we have an un-transmitted one we
4884			 * don't have to fudge the TSN.
4885			 */
4886			chk = tp1;
4887			ret_sz += tp1->book_size;
4888			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4889			if (sent) {
4890				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4891			} else {
4892				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4893			}
4894			if (tp1->data) {
4895				sctp_m_freem(tp1->data);
4896				tp1->data = NULL;
4897			}
4898			/* No flight involved here book the size to 0 */
4899			tp1->book_size = 0;
4900			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4901				foundeom = 1;
4902			}
4903			do_wakeup_routine = 1;
4904			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4905			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4906			/*
4907			 * on to the sent queue so we can wait for it to be
4908			 * passed by.
4909			 */
4910			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4911			    sctp_next);
4912			stcb->asoc.send_queue_cnt--;
4913			stcb->asoc.sent_queue_cnt++;
4914		}
4915	}
4916	if (foundeom == 0) {
4917		/*
4918		 * Still no eom found. That means there is stuff left on the
4919		 * stream out queue.. yuck.
4920		 */
4921		SCTP_TCB_SEND_LOCK(stcb);
4922		strq = &stcb->asoc.strmout[stream];
4923		sp = TAILQ_FIRST(&strq->outqueue);
4924		if (sp != NULL) {
4925			sp->discard_rest = 1;
4926			/*
4927			 * We may need to put a chunk on the queue that
4928			 * holds the TSN that would have been sent with the
4929			 * LAST bit.
4930			 */
4931			if (chk == NULL) {
4932				/* Yep, we have to */
4933				sctp_alloc_a_chunk(stcb, chk);
4934				if (chk == NULL) {
4935					/*
4936					 * we are hosed. All we can do is
4937					 * nothing.. which will cause an
4938					 * abort if the peer is paying
4939					 * attention.
4940					 */
4941					goto oh_well;
4942				}
4943				memset(chk, 0, sizeof(*chk));
4944				chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4945				chk->sent = SCTP_FORWARD_TSN_SKIP;
4946				chk->asoc = &stcb->asoc;
4947				chk->rec.data.stream_seq = strq->next_sequence_send;
4948				chk->rec.data.stream_number = sp->stream;
4949				chk->rec.data.payloadtype = sp->ppid;
4950				chk->rec.data.context = sp->context;
4951				chk->flags = sp->act_flags;
4952				chk->whoTo = NULL;
4953				chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4954				strq->chunks_on_queues++;
4955				TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4956				stcb->asoc.sent_queue_cnt++;
4957				stcb->asoc.pr_sctp_cnt++;
4958			} else {
4959				chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4960			}
4961			strq->next_sequence_send++;
4962	oh_well:
4963			if (sp->data) {
4964				/*
4965				 * Pull any data to free up the SB and allow
4966				 * sender to "add more" while we will throw
4967				 * away :-)
4968				 */
4969				sctp_free_spbufspace(stcb, &stcb->asoc, sp);
4970				ret_sz += sp->length;
4971				do_wakeup_routine = 1;
4972				sp->some_taken = 1;
4973				sctp_m_freem(sp->data);
4974				sp->data = NULL;
4975				sp->tail_mbuf = NULL;
4976				sp->length = 0;
4977			}
4978		}
4979		SCTP_TCB_SEND_UNLOCK(stcb);
4980	}
4981	if (do_wakeup_routine) {
4982#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4983		struct socket *so;
4984
4985		so = SCTP_INP_SO(stcb->sctp_ep);
4986		if (!so_locked) {
4987			atomic_add_int(&stcb->asoc.refcnt, 1);
4988			SCTP_TCB_UNLOCK(stcb);
4989			SCTP_SOCKET_LOCK(so, 1);
4990			SCTP_TCB_LOCK(stcb);
4991			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4992			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4993				/* assoc was freed while we were unlocked */
4994				SCTP_SOCKET_UNLOCK(so, 1);
4995				return (ret_sz);
4996			}
4997		}
4998#endif
4999		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
5000#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
5001		if (!so_locked) {
5002			SCTP_SOCKET_UNLOCK(so, 1);
5003		}
5004#endif
5005	}
5006	return (ret_sz);
5007}
5008
5009/*
5010 * checks to see if the given address, sa, is one that is currently known by
5011 * the kernel note: can't distinguish the same address on multiple interfaces
5012 * and doesn't handle multiple addresses with different zone/scope id's note:
5013 * ifa_ifwithaddr() compares the entire sockaddr struct
5014 */
5015struct sctp_ifa *
5016sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
5017    int holds_lock)
5018{
5019	struct sctp_laddr *laddr;
5020
5021	if (holds_lock == 0) {
5022		SCTP_INP_RLOCK(inp);
5023	}
5024	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
5025		if (laddr->ifa == NULL)
5026			continue;
5027		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
5028			continue;
5029#ifdef INET
5030		if (addr->sa_family == AF_INET) {
5031			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5032			    laddr->ifa->address.sin.sin_addr.s_addr) {
5033				/* found him. */
5034				if (holds_lock == 0) {
5035					SCTP_INP_RUNLOCK(inp);
5036				}
5037				return (laddr->ifa);
5038				break;
5039			}
5040		}
5041#endif
5042#ifdef INET6
5043		if (addr->sa_family == AF_INET6) {
5044			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5045			    &laddr->ifa->address.sin6)) {
5046				/* found him. */
5047				if (holds_lock == 0) {
5048					SCTP_INP_RUNLOCK(inp);
5049				}
5050				return (laddr->ifa);
5051				break;
5052			}
5053		}
5054#endif
5055	}
5056	if (holds_lock == 0) {
5057		SCTP_INP_RUNLOCK(inp);
5058	}
5059	return (NULL);
5060}
5061
5062uint32_t
5063sctp_get_ifa_hash_val(struct sockaddr *addr)
5064{
5065	switch (addr->sa_family) {
5066#ifdef INET
5067	case AF_INET:
5068		{
5069			struct sockaddr_in *sin;
5070
5071			sin = (struct sockaddr_in *)addr;
5072			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
5073		}
5074#endif
5075#ifdef INET6
5076	case AF_INET6:
5077		{
5078			struct sockaddr_in6 *sin6;
5079			uint32_t hash_of_addr;
5080
5081			sin6 = (struct sockaddr_in6 *)addr;
5082			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5083			    sin6->sin6_addr.s6_addr32[1] +
5084			    sin6->sin6_addr.s6_addr32[2] +
5085			    sin6->sin6_addr.s6_addr32[3]);
5086			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5087			return (hash_of_addr);
5088		}
5089#endif
5090	default:
5091		break;
5092	}
5093	return (0);
5094}
5095
5096struct sctp_ifa *
5097sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5098{
5099	struct sctp_ifa *sctp_ifap;
5100	struct sctp_vrf *vrf;
5101	struct sctp_ifalist *hash_head;
5102	uint32_t hash_of_addr;
5103
5104	if (holds_lock == 0)
5105		SCTP_IPI_ADDR_RLOCK();
5106
5107	vrf = sctp_find_vrf(vrf_id);
5108	if (vrf == NULL) {
5109		if (holds_lock == 0)
5110			SCTP_IPI_ADDR_RUNLOCK();
5111		return (NULL);
5112	}
5113	hash_of_addr = sctp_get_ifa_hash_val(addr);
5114
5115	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5116	if (hash_head == NULL) {
5117		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5118		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5119		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5120		sctp_print_address(addr);
5121		SCTP_PRINTF("No such bucket for address\n");
5122		if (holds_lock == 0)
5123			SCTP_IPI_ADDR_RUNLOCK();
5124
5125		return (NULL);
5126	}
5127	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5128		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5129			continue;
5130#ifdef INET
5131		if (addr->sa_family == AF_INET) {
5132			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5133			    sctp_ifap->address.sin.sin_addr.s_addr) {
5134				/* found him. */
5135				if (holds_lock == 0)
5136					SCTP_IPI_ADDR_RUNLOCK();
5137				return (sctp_ifap);
5138				break;
5139			}
5140		}
5141#endif
5142#ifdef INET6
5143		if (addr->sa_family == AF_INET6) {
5144			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5145			    &sctp_ifap->address.sin6)) {
5146				/* found him. */
5147				if (holds_lock == 0)
5148					SCTP_IPI_ADDR_RUNLOCK();
5149				return (sctp_ifap);
5150				break;
5151			}
5152		}
5153#endif
5154	}
5155	if (holds_lock == 0)
5156		SCTP_IPI_ADDR_RUNLOCK();
5157	return (NULL);
5158}
5159
5160static void
5161sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5162    uint32_t rwnd_req)
5163{
5164	/* User pulled some data, do we need a rwnd update? */
5165	int r_unlocked = 0;
5166	uint32_t dif, rwnd;
5167	struct socket *so = NULL;
5168
5169	if (stcb == NULL)
5170		return;
5171
5172	atomic_add_int(&stcb->asoc.refcnt, 1);
5173
5174	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5175	    SCTP_STATE_SHUTDOWN_RECEIVED |
5176	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5177		/* Pre-check If we are freeing no update */
5178		goto no_lock;
5179	}
5180	SCTP_INP_INCR_REF(stcb->sctp_ep);
5181	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5182	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5183		goto out;
5184	}
5185	so = stcb->sctp_socket;
5186	if (so == NULL) {
5187		goto out;
5188	}
5189	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5190	/* Have you have freed enough to look */
5191	*freed_so_far = 0;
5192	/* Yep, its worth a look and the lock overhead */
5193
5194	/* Figure out what the rwnd would be */
5195	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5196	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5197		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5198	} else {
5199		dif = 0;
5200	}
5201	if (dif >= rwnd_req) {
5202		if (hold_rlock) {
5203			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5204			r_unlocked = 1;
5205		}
5206		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5207			/*
5208			 * One last check before we allow the guy possibly
5209			 * to get in. There is a race, where the guy has not
5210			 * reached the gate. In that case
5211			 */
5212			goto out;
5213		}
5214		SCTP_TCB_LOCK(stcb);
5215		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5216			/* No reports here */
5217			SCTP_TCB_UNLOCK(stcb);
5218			goto out;
5219		}
5220		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5221		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5222
5223		sctp_chunk_output(stcb->sctp_ep, stcb,
5224		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5225		/* make sure no timer is running */
5226		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
5227		    SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5228		SCTP_TCB_UNLOCK(stcb);
5229	} else {
5230		/* Update how much we have pending */
5231		stcb->freed_by_sorcv_sincelast = dif;
5232	}
5233out:
5234	if (so && r_unlocked && hold_rlock) {
5235		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5236	}
5237	SCTP_INP_DECR_REF(stcb->sctp_ep);
5238no_lock:
5239	atomic_add_int(&stcb->asoc.refcnt, -1);
5240	return;
5241}
5242
5243int
5244sctp_sorecvmsg(struct socket *so,
5245    struct uio *uio,
5246    struct mbuf **mp,
5247    struct sockaddr *from,
5248    int fromlen,
5249    int *msg_flags,
5250    struct sctp_sndrcvinfo *sinfo,
5251    int filling_sinfo)
5252{
5253	/*
5254	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5255	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5256	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5257	 * On the way out we may send out any combination of:
5258	 * MSG_NOTIFICATION MSG_EOR
5259	 *
5260	 */
5261	struct sctp_inpcb *inp = NULL;
5262	int my_len = 0;
5263	int cp_len = 0, error = 0;
5264	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5265	struct mbuf *m = NULL;
5266	struct sctp_tcb *stcb = NULL;
5267	int wakeup_read_socket = 0;
5268	int freecnt_applied = 0;
5269	int out_flags = 0, in_flags = 0;
5270	int block_allowed = 1;
5271	uint32_t freed_so_far = 0;
5272	uint32_t copied_so_far = 0;
5273	int in_eeor_mode = 0;
5274	int no_rcv_needed = 0;
5275	uint32_t rwnd_req = 0;
5276	int hold_sblock = 0;
5277	int hold_rlock = 0;
5278	int slen = 0;
5279	uint32_t held_length = 0;
5280	int sockbuf_lock = 0;
5281
5282	if (uio == NULL) {
5283		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5284		return (EINVAL);
5285	}
5286	if (msg_flags) {
5287		in_flags = *msg_flags;
5288		if (in_flags & MSG_PEEK)
5289			SCTP_STAT_INCR(sctps_read_peeks);
5290	} else {
5291		in_flags = 0;
5292	}
5293	slen = uio->uio_resid;
5294
5295	/* Pull in and set up our int flags */
5296	if (in_flags & MSG_OOB) {
5297		/* Out of band's NOT supported */
5298		return (EOPNOTSUPP);
5299	}
5300	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5301		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5302		return (EINVAL);
5303	}
5304	if ((in_flags & (MSG_DONTWAIT
5305	    | MSG_NBIO
5306	    )) ||
5307	    SCTP_SO_IS_NBIO(so)) {
5308		block_allowed = 0;
5309	}
5310	/* setup the endpoint */
5311	inp = (struct sctp_inpcb *)so->so_pcb;
5312	if (inp == NULL) {
5313		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5314		return (EFAULT);
5315	}
5316	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5317	/* Must be at least a MTU's worth */
5318	if (rwnd_req < SCTP_MIN_RWND)
5319		rwnd_req = SCTP_MIN_RWND;
5320	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5321	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5322		sctp_misc_ints(SCTP_SORECV_ENTER,
5323		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5324	}
5325	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5326		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5327		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5328	}
5329	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5330	if (error) {
5331		goto release_unlocked;
5332	}
5333	sockbuf_lock = 1;
5334restart:
5335
5336
5337restart_nosblocks:
5338	if (hold_sblock == 0) {
5339		SOCKBUF_LOCK(&so->so_rcv);
5340		hold_sblock = 1;
5341	}
5342	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5343	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5344		goto out;
5345	}
5346	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5347		if (so->so_error) {
5348			error = so->so_error;
5349			if ((in_flags & MSG_PEEK) == 0)
5350				so->so_error = 0;
5351			goto out;
5352		} else {
5353			if (so->so_rcv.sb_cc == 0) {
5354				/* indicate EOF */
5355				error = 0;
5356				goto out;
5357			}
5358		}
5359	}
5360	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5361		/* we need to wait for data */
5362		if ((so->so_rcv.sb_cc == 0) &&
5363		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5364		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5365			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5366				/*
5367				 * For active open side clear flags for
5368				 * re-use passive open is blocked by
5369				 * connect.
5370				 */
5371				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5372					/*
5373					 * You were aborted, passive side
5374					 * always hits here
5375					 */
5376					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5377					error = ECONNRESET;
5378				}
5379				so->so_state &= ~(SS_ISCONNECTING |
5380				    SS_ISDISCONNECTING |
5381				    SS_ISCONFIRMING |
5382				    SS_ISCONNECTED);
5383				if (error == 0) {
5384					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5385						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5386						error = ENOTCONN;
5387					}
5388				}
5389				goto out;
5390			}
5391		}
5392		error = sbwait(&so->so_rcv);
5393		if (error) {
5394			goto out;
5395		}
5396		held_length = 0;
5397		goto restart_nosblocks;
5398	} else if (so->so_rcv.sb_cc == 0) {
5399		if (so->so_error) {
5400			error = so->so_error;
5401			if ((in_flags & MSG_PEEK) == 0)
5402				so->so_error = 0;
5403		} else {
5404			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5405			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5406				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5407					/*
5408					 * For active open side clear flags
5409					 * for re-use passive open is
5410					 * blocked by connect.
5411					 */
5412					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5413						/*
5414						 * You were aborted, passive
5415						 * side always hits here
5416						 */
5417						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5418						error = ECONNRESET;
5419					}
5420					so->so_state &= ~(SS_ISCONNECTING |
5421					    SS_ISDISCONNECTING |
5422					    SS_ISCONFIRMING |
5423					    SS_ISCONNECTED);
5424					if (error == 0) {
5425						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5426							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5427							error = ENOTCONN;
5428						}
5429					}
5430					goto out;
5431				}
5432			}
5433			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5434			error = EWOULDBLOCK;
5435		}
5436		goto out;
5437	}
5438	if (hold_sblock == 1) {
5439		SOCKBUF_UNLOCK(&so->so_rcv);
5440		hold_sblock = 0;
5441	}
5442	/* we possibly have data we can read */
5443	/* sa_ignore FREED_MEMORY */
5444	control = TAILQ_FIRST(&inp->read_queue);
5445	if (control == NULL) {
5446		/*
5447		 * This could be happening since the appender did the
5448		 * increment but as not yet did the tailq insert onto the
5449		 * read_queue
5450		 */
5451		if (hold_rlock == 0) {
5452			SCTP_INP_READ_LOCK(inp);
5453		}
5454		control = TAILQ_FIRST(&inp->read_queue);
5455		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5456#ifdef INVARIANTS
5457			panic("Huh, its non zero and nothing on control?");
5458#endif
5459			so->so_rcv.sb_cc = 0;
5460		}
5461		SCTP_INP_READ_UNLOCK(inp);
5462		hold_rlock = 0;
5463		goto restart;
5464	}
5465	if ((control->length == 0) &&
5466	    (control->do_not_ref_stcb)) {
5467		/*
5468		 * Clean up code for freeing assoc that left behind a
5469		 * pdapi.. maybe a peer in EEOR that just closed after
5470		 * sending and never indicated a EOR.
5471		 */
5472		if (hold_rlock == 0) {
5473			hold_rlock = 1;
5474			SCTP_INP_READ_LOCK(inp);
5475		}
5476		control->held_length = 0;
5477		if (control->data) {
5478			/* Hmm there is data here .. fix */
5479			struct mbuf *m_tmp;
5480			int cnt = 0;
5481
5482			m_tmp = control->data;
5483			while (m_tmp) {
5484				cnt += SCTP_BUF_LEN(m_tmp);
5485				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5486					control->tail_mbuf = m_tmp;
5487					control->end_added = 1;
5488				}
5489				m_tmp = SCTP_BUF_NEXT(m_tmp);
5490			}
5491			control->length = cnt;
5492		} else {
5493			/* remove it */
5494			TAILQ_REMOVE(&inp->read_queue, control, next);
5495			/* Add back any hiddend data */
5496			sctp_free_remote_addr(control->whoFrom);
5497			sctp_free_a_readq(stcb, control);
5498		}
5499		if (hold_rlock) {
5500			hold_rlock = 0;
5501			SCTP_INP_READ_UNLOCK(inp);
5502		}
5503		goto restart;
5504	}
5505	if ((control->length == 0) &&
5506	    (control->end_added == 1)) {
5507		/*
5508		 * Do we also need to check for (control->pdapi_aborted ==
5509		 * 1)?
5510		 */
5511		if (hold_rlock == 0) {
5512			hold_rlock = 1;
5513			SCTP_INP_READ_LOCK(inp);
5514		}
5515		TAILQ_REMOVE(&inp->read_queue, control, next);
5516		if (control->data) {
5517#ifdef INVARIANTS
5518			panic("control->data not null but control->length == 0");
5519#else
5520			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5521			sctp_m_freem(control->data);
5522			control->data = NULL;
5523#endif
5524		}
5525		if (control->aux_data) {
5526			sctp_m_free(control->aux_data);
5527			control->aux_data = NULL;
5528		}
5529		sctp_free_remote_addr(control->whoFrom);
5530		sctp_free_a_readq(stcb, control);
5531		if (hold_rlock) {
5532			hold_rlock = 0;
5533			SCTP_INP_READ_UNLOCK(inp);
5534		}
5535		goto restart;
5536	}
5537	if (control->length == 0) {
5538		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5539		    (filling_sinfo)) {
5540			/* find a more suitable one then this */
5541			ctl = TAILQ_NEXT(control, next);
5542			while (ctl) {
5543				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5544				    (ctl->some_taken ||
5545				    (ctl->spec_flags & M_NOTIFICATION) ||
5546				    ((ctl->do_not_ref_stcb == 0) &&
5547				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5548				    ) {
5549					/*-
5550					 * If we have a different TCB next, and there is data
5551					 * present. If we have already taken some (pdapi), OR we can
5552					 * ref the tcb and no delivery as started on this stream, we
5553					 * take it. Note we allow a notification on a different
5554					 * assoc to be delivered..
5555					 */
5556					control = ctl;
5557					goto found_one;
5558				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5559					    (ctl->length) &&
5560					    ((ctl->some_taken) ||
5561					    ((ctl->do_not_ref_stcb == 0) &&
5562					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5563				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5564					/*-
5565					 * If we have the same tcb, and there is data present, and we
5566					 * have the strm interleave feature present. Then if we have
5567					 * taken some (pdapi) or we can refer to tht tcb AND we have
5568					 * not started a delivery for this stream, we can take it.
5569					 * Note we do NOT allow a notificaiton on the same assoc to
5570					 * be delivered.
5571					 */
5572					control = ctl;
5573					goto found_one;
5574				}
5575				ctl = TAILQ_NEXT(ctl, next);
5576			}
5577		}
5578		/*
5579		 * if we reach here, not suitable replacement is available
5580		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5581		 * into the our held count, and its time to sleep again.
5582		 */
5583		held_length = so->so_rcv.sb_cc;
5584		control->held_length = so->so_rcv.sb_cc;
5585		goto restart;
5586	}
5587	/* Clear the held length since there is something to read */
5588	control->held_length = 0;
5589	if (hold_rlock) {
5590		SCTP_INP_READ_UNLOCK(inp);
5591		hold_rlock = 0;
5592	}
5593found_one:
5594	/*
5595	 * If we reach here, control has a some data for us to read off.
5596	 * Note that stcb COULD be NULL.
5597	 */
5598	control->some_taken++;
5599	if (hold_sblock) {
5600		SOCKBUF_UNLOCK(&so->so_rcv);
5601		hold_sblock = 0;
5602	}
5603	stcb = control->stcb;
5604	if (stcb) {
5605		if ((control->do_not_ref_stcb == 0) &&
5606		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5607			if (freecnt_applied == 0)
5608				stcb = NULL;
5609		} else if (control->do_not_ref_stcb == 0) {
5610			/* you can't free it on me please */
5611			/*
5612			 * The lock on the socket buffer protects us so the
5613			 * free code will stop. But since we used the
5614			 * socketbuf lock and the sender uses the tcb_lock
5615			 * to increment, we need to use the atomic add to
5616			 * the refcnt
5617			 */
5618			if (freecnt_applied) {
5619#ifdef INVARIANTS
5620				panic("refcnt already incremented");
5621#else
5622				SCTP_PRINTF("refcnt already incremented?\n");
5623#endif
5624			} else {
5625				atomic_add_int(&stcb->asoc.refcnt, 1);
5626				freecnt_applied = 1;
5627			}
5628			/*
5629			 * Setup to remember how much we have not yet told
5630			 * the peer our rwnd has opened up. Note we grab the
5631			 * value from the tcb from last time. Note too that
5632			 * sack sending clears this when a sack is sent,
5633			 * which is fine. Once we hit the rwnd_req, we then
5634			 * will go to the sctp_user_rcvd() that will not
5635			 * lock until it KNOWs it MUST send a WUP-SACK.
5636			 */
5637			freed_so_far = stcb->freed_by_sorcv_sincelast;
5638			stcb->freed_by_sorcv_sincelast = 0;
5639		}
5640	}
5641	if (stcb &&
5642	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5643	    control->do_not_ref_stcb == 0) {
5644		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5645	}
5646	/* First lets get off the sinfo and sockaddr info */
5647	if ((sinfo) && filling_sinfo) {
5648		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5649		nxt = TAILQ_NEXT(control, next);
5650		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5651		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5652			struct sctp_extrcvinfo *s_extra;
5653
5654			s_extra = (struct sctp_extrcvinfo *)sinfo;
5655			if ((nxt) &&
5656			    (nxt->length)) {
5657				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5658				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5659					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5660				}
5661				if (nxt->spec_flags & M_NOTIFICATION) {
5662					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5663				}
5664				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5665				s_extra->sreinfo_next_length = nxt->length;
5666				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5667				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5668				if (nxt->tail_mbuf != NULL) {
5669					if (nxt->end_added) {
5670						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5671					}
5672				}
5673			} else {
5674				/*
5675				 * we explicitly 0 this, since the memcpy
5676				 * got some other things beyond the older
5677				 * sinfo_ that is on the control's structure
5678				 * :-D
5679				 */
5680				nxt = NULL;
5681				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5682				s_extra->sreinfo_next_aid = 0;
5683				s_extra->sreinfo_next_length = 0;
5684				s_extra->sreinfo_next_ppid = 0;
5685				s_extra->sreinfo_next_stream = 0;
5686			}
5687		}
5688		/*
5689		 * update off the real current cum-ack, if we have an stcb.
5690		 */
5691		if ((control->do_not_ref_stcb == 0) && stcb)
5692			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5693		/*
5694		 * mask off the high bits, we keep the actual chunk bits in
5695		 * there.
5696		 */
5697		sinfo->sinfo_flags &= 0x00ff;
5698		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5699			sinfo->sinfo_flags |= SCTP_UNORDERED;
5700		}
5701	}
5702#ifdef SCTP_ASOCLOG_OF_TSNS
5703	{
5704		int index, newindex;
5705		struct sctp_pcbtsn_rlog *entry;
5706
5707		do {
5708			index = inp->readlog_index;
5709			newindex = index + 1;
5710			if (newindex >= SCTP_READ_LOG_SIZE) {
5711				newindex = 0;
5712			}
5713		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5714		entry = &inp->readlog[index];
5715		entry->vtag = control->sinfo_assoc_id;
5716		entry->strm = control->sinfo_stream;
5717		entry->seq = control->sinfo_ssn;
5718		entry->sz = control->length;
5719		entry->flgs = control->sinfo_flags;
5720	}
5721#endif
5722	if ((fromlen > 0) && (from != NULL)) {
5723		union sctp_sockstore store;
5724		size_t len;
5725
5726		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5727#ifdef INET6
5728		case AF_INET6:
5729			len = sizeof(struct sockaddr_in6);
5730			store.sin6 = control->whoFrom->ro._l_addr.sin6;
5731			store.sin6.sin6_port = control->port_from;
5732			break;
5733#endif
5734#ifdef INET
5735		case AF_INET:
5736#ifdef INET6
5737			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
5738				len = sizeof(struct sockaddr_in6);
5739				in6_sin_2_v4mapsin6(&control->whoFrom->ro._l_addr.sin,
5740				    &store.sin6);
5741				store.sin6.sin6_port = control->port_from;
5742			} else {
5743				len = sizeof(struct sockaddr_in);
5744				store.sin = control->whoFrom->ro._l_addr.sin;
5745				store.sin.sin_port = control->port_from;
5746			}
5747#else
5748			len = sizeof(struct sockaddr_in);
5749			store.sin = control->whoFrom->ro._l_addr.sin;
5750			store.sin.sin_port = control->port_from;
5751#endif
5752			break;
5753#endif
5754		default:
5755			len = 0;
5756			break;
5757		}
5758		memcpy(from, &store, min((size_t)fromlen, len));
5759#ifdef INET6
5760		{
5761			struct sockaddr_in6 lsa6, *from6;
5762
5763			from6 = (struct sockaddr_in6 *)from;
5764			sctp_recover_scope_mac(from6, (&lsa6));
5765		}
5766#endif
5767	}
5768	/* now copy out what data we can */
5769	if (mp == NULL) {
5770		/* copy out each mbuf in the chain up to length */
5771get_more_data:
5772		m = control->data;
5773		while (m) {
5774			/* Move out all we can */
5775			cp_len = (int)uio->uio_resid;
5776			my_len = (int)SCTP_BUF_LEN(m);
5777			if (cp_len > my_len) {
5778				/* not enough in this buf */
5779				cp_len = my_len;
5780			}
5781			if (hold_rlock) {
5782				SCTP_INP_READ_UNLOCK(inp);
5783				hold_rlock = 0;
5784			}
5785			if (cp_len > 0)
5786				error = uiomove(mtod(m, char *), cp_len, uio);
5787			/* re-read */
5788			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5789				goto release;
5790			}
5791			if ((control->do_not_ref_stcb == 0) && stcb &&
5792			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5793				no_rcv_needed = 1;
5794			}
5795			if (error) {
5796				/* error we are out of here */
5797				goto release;
5798			}
5799			if ((SCTP_BUF_NEXT(m) == NULL) &&
5800			    (cp_len >= SCTP_BUF_LEN(m)) &&
5801			    ((control->end_added == 0) ||
5802			    (control->end_added &&
5803			    (TAILQ_NEXT(control, next) == NULL)))
5804			    ) {
5805				SCTP_INP_READ_LOCK(inp);
5806				hold_rlock = 1;
5807			}
5808			if (cp_len == SCTP_BUF_LEN(m)) {
5809				if ((SCTP_BUF_NEXT(m) == NULL) &&
5810				    (control->end_added)) {
5811					out_flags |= MSG_EOR;
5812					if ((control->do_not_ref_stcb == 0) &&
5813					    (control->stcb != NULL) &&
5814					    ((control->spec_flags & M_NOTIFICATION) == 0))
5815						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5816				}
5817				if (control->spec_flags & M_NOTIFICATION) {
5818					out_flags |= MSG_NOTIFICATION;
5819				}
5820				/* we ate up the mbuf */
5821				if (in_flags & MSG_PEEK) {
5822					/* just looking */
5823					m = SCTP_BUF_NEXT(m);
5824					copied_so_far += cp_len;
5825				} else {
5826					/* dispose of the mbuf */
5827					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5828						sctp_sblog(&so->so_rcv,
5829						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5830					}
5831					sctp_sbfree(control, stcb, &so->so_rcv, m);
5832					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5833						sctp_sblog(&so->so_rcv,
5834						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5835					}
5836					copied_so_far += cp_len;
5837					freed_so_far += cp_len;
5838					freed_so_far += MSIZE;
5839					atomic_subtract_int(&control->length, cp_len);
5840					control->data = sctp_m_free(m);
5841					m = control->data;
5842					/*
5843					 * been through it all, must hold sb
5844					 * lock ok to null tail
5845					 */
5846					if (control->data == NULL) {
5847#ifdef INVARIANTS
5848						if ((control->end_added == 0) ||
5849						    (TAILQ_NEXT(control, next) == NULL)) {
5850							/*
5851							 * If the end is not
5852							 * added, OR the
5853							 * next is NOT null
5854							 * we MUST have the
5855							 * lock.
5856							 */
5857							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5858								panic("Hmm we don't own the lock?");
5859							}
5860						}
5861#endif
5862						control->tail_mbuf = NULL;
5863#ifdef INVARIANTS
5864						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5865							panic("end_added, nothing left and no MSG_EOR");
5866						}
5867#endif
5868					}
5869				}
5870			} else {
5871				/* Do we need to trim the mbuf? */
5872				if (control->spec_flags & M_NOTIFICATION) {
5873					out_flags |= MSG_NOTIFICATION;
5874				}
5875				if ((in_flags & MSG_PEEK) == 0) {
5876					SCTP_BUF_RESV_UF(m, cp_len);
5877					SCTP_BUF_LEN(m) -= cp_len;
5878					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5879						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5880					}
5881					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5882					if ((control->do_not_ref_stcb == 0) &&
5883					    stcb) {
5884						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5885					}
5886					copied_so_far += cp_len;
5887					freed_so_far += cp_len;
5888					freed_so_far += MSIZE;
5889					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5890						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5891						    SCTP_LOG_SBRESULT, 0);
5892					}
5893					atomic_subtract_int(&control->length, cp_len);
5894				} else {
5895					copied_so_far += cp_len;
5896				}
5897			}
5898			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5899				break;
5900			}
5901			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5902			    (control->do_not_ref_stcb == 0) &&
5903			    (freed_so_far >= rwnd_req)) {
5904				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5905			}
5906		}		/* end while(m) */
5907		/*
5908		 * At this point we have looked at it all and we either have
5909		 * a MSG_EOR/or read all the user wants... <OR>
5910		 * control->length == 0.
5911		 */
5912		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5913			/* we are done with this control */
5914			if (control->length == 0) {
5915				if (control->data) {
5916#ifdef INVARIANTS
5917					panic("control->data not null at read eor?");
5918#else
5919					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5920					sctp_m_freem(control->data);
5921					control->data = NULL;
5922#endif
5923				}
5924		done_with_control:
5925				if (TAILQ_NEXT(control, next) == NULL) {
5926					/*
5927					 * If we don't have a next we need a
5928					 * lock, if there is a next
5929					 * interrupt is filling ahead of us
5930					 * and we don't need a lock to
5931					 * remove this guy (which is the
5932					 * head of the queue).
5933					 */
5934					if (hold_rlock == 0) {
5935						SCTP_INP_READ_LOCK(inp);
5936						hold_rlock = 1;
5937					}
5938				}
5939				TAILQ_REMOVE(&inp->read_queue, control, next);
5940				/* Add back any hiddend data */
5941				if (control->held_length) {
5942					held_length = 0;
5943					control->held_length = 0;
5944					wakeup_read_socket = 1;
5945				}
5946				if (control->aux_data) {
5947					sctp_m_free(control->aux_data);
5948					control->aux_data = NULL;
5949				}
5950				no_rcv_needed = control->do_not_ref_stcb;
5951				sctp_free_remote_addr(control->whoFrom);
5952				control->data = NULL;
5953				sctp_free_a_readq(stcb, control);
5954				control = NULL;
5955				if ((freed_so_far >= rwnd_req) &&
5956				    (no_rcv_needed == 0))
5957					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5958
5959			} else {
5960				/*
5961				 * The user did not read all of this
5962				 * message, turn off the returned MSG_EOR
5963				 * since we are leaving more behind on the
5964				 * control to read.
5965				 */
5966#ifdef INVARIANTS
5967				if (control->end_added &&
5968				    (control->data == NULL) &&
5969				    (control->tail_mbuf == NULL)) {
5970					panic("Gak, control->length is corrupt?");
5971				}
5972#endif
5973				no_rcv_needed = control->do_not_ref_stcb;
5974				out_flags &= ~MSG_EOR;
5975			}
5976		}
5977		if (out_flags & MSG_EOR) {
5978			goto release;
5979		}
5980		if ((uio->uio_resid == 0) ||
5981		    ((in_eeor_mode) &&
5982		    (copied_so_far >= (uint32_t) max(so->so_rcv.sb_lowat, 1)))) {
5983			goto release;
5984		}
5985		/*
5986		 * If I hit here the receiver wants more and this message is
5987		 * NOT done (pd-api). So two questions. Can we block? if not
5988		 * we are done. Did the user NOT set MSG_WAITALL?
5989		 */
5990		if (block_allowed == 0) {
5991			goto release;
5992		}
5993		/*
5994		 * We need to wait for more data a few things: - We don't
5995		 * sbunlock() so we don't get someone else reading. - We
5996		 * must be sure to account for the case where what is added
5997		 * is NOT to our control when we wakeup.
5998		 */
5999
6000		/*
6001		 * Do we need to tell the transport a rwnd update might be
6002		 * needed before we go to sleep?
6003		 */
6004		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
6005		    ((freed_so_far >= rwnd_req) &&
6006		    (control->do_not_ref_stcb == 0) &&
6007		    (no_rcv_needed == 0))) {
6008			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6009		}
6010wait_some_more:
6011		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
6012			goto release;
6013		}
6014		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
6015			goto release;
6016
6017		if (hold_rlock == 1) {
6018			SCTP_INP_READ_UNLOCK(inp);
6019			hold_rlock = 0;
6020		}
6021		if (hold_sblock == 0) {
6022			SOCKBUF_LOCK(&so->so_rcv);
6023			hold_sblock = 1;
6024		}
6025		if ((copied_so_far) && (control->length == 0) &&
6026		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
6027			goto release;
6028		}
6029		if (so->so_rcv.sb_cc <= control->held_length) {
6030			error = sbwait(&so->so_rcv);
6031			if (error) {
6032				goto release;
6033			}
6034			control->held_length = 0;
6035		}
6036		if (hold_sblock) {
6037			SOCKBUF_UNLOCK(&so->so_rcv);
6038			hold_sblock = 0;
6039		}
6040		if (control->length == 0) {
6041			/* still nothing here */
6042			if (control->end_added == 1) {
6043				/* he aborted, or is done i.e.did a shutdown */
6044				out_flags |= MSG_EOR;
6045				if (control->pdapi_aborted) {
6046					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6047						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6048
6049					out_flags |= MSG_TRUNC;
6050				} else {
6051					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
6052						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6053				}
6054				goto done_with_control;
6055			}
6056			if (so->so_rcv.sb_cc > held_length) {
6057				control->held_length = so->so_rcv.sb_cc;
6058				held_length = 0;
6059			}
6060			goto wait_some_more;
6061		} else if (control->data == NULL) {
6062			/*
6063			 * we must re-sync since data is probably being
6064			 * added
6065			 */
6066			SCTP_INP_READ_LOCK(inp);
6067			if ((control->length > 0) && (control->data == NULL)) {
6068				/*
6069				 * big trouble.. we have the lock and its
6070				 * corrupt?
6071				 */
6072#ifdef INVARIANTS
6073				panic("Impossible data==NULL length !=0");
6074#endif
6075				out_flags |= MSG_EOR;
6076				out_flags |= MSG_TRUNC;
6077				control->length = 0;
6078				SCTP_INP_READ_UNLOCK(inp);
6079				goto done_with_control;
6080			}
6081			SCTP_INP_READ_UNLOCK(inp);
6082			/* We will fall around to get more data */
6083		}
6084		goto get_more_data;
6085	} else {
6086		/*-
6087		 * Give caller back the mbuf chain,
6088		 * store in uio_resid the length
6089		 */
6090		wakeup_read_socket = 0;
6091		if ((control->end_added == 0) ||
6092		    (TAILQ_NEXT(control, next) == NULL)) {
6093			/* Need to get rlock */
6094			if (hold_rlock == 0) {
6095				SCTP_INP_READ_LOCK(inp);
6096				hold_rlock = 1;
6097			}
6098		}
6099		if (control->end_added) {
6100			out_flags |= MSG_EOR;
6101			if ((control->do_not_ref_stcb == 0) &&
6102			    (control->stcb != NULL) &&
6103			    ((control->spec_flags & M_NOTIFICATION) == 0))
6104				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6105		}
6106		if (control->spec_flags & M_NOTIFICATION) {
6107			out_flags |= MSG_NOTIFICATION;
6108		}
6109		uio->uio_resid = control->length;
6110		*mp = control->data;
6111		m = control->data;
6112		while (m) {
6113			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6114				sctp_sblog(&so->so_rcv,
6115				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6116			}
6117			sctp_sbfree(control, stcb, &so->so_rcv, m);
6118			freed_so_far += SCTP_BUF_LEN(m);
6119			freed_so_far += MSIZE;
6120			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6121				sctp_sblog(&so->so_rcv,
6122				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6123			}
6124			m = SCTP_BUF_NEXT(m);
6125		}
6126		control->data = control->tail_mbuf = NULL;
6127		control->length = 0;
6128		if (out_flags & MSG_EOR) {
6129			/* Done with this control */
6130			goto done_with_control;
6131		}
6132	}
6133release:
6134	if (hold_rlock == 1) {
6135		SCTP_INP_READ_UNLOCK(inp);
6136		hold_rlock = 0;
6137	}
6138	if (hold_sblock == 1) {
6139		SOCKBUF_UNLOCK(&so->so_rcv);
6140		hold_sblock = 0;
6141	}
6142	sbunlock(&so->so_rcv);
6143	sockbuf_lock = 0;
6144
6145release_unlocked:
6146	if (hold_sblock) {
6147		SOCKBUF_UNLOCK(&so->so_rcv);
6148		hold_sblock = 0;
6149	}
6150	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6151		if ((freed_so_far >= rwnd_req) &&
6152		    (control && (control->do_not_ref_stcb == 0)) &&
6153		    (no_rcv_needed == 0))
6154			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6155	}
6156out:
6157	if (msg_flags) {
6158		*msg_flags = out_flags;
6159	}
6160	if (((out_flags & MSG_EOR) == 0) &&
6161	    ((in_flags & MSG_PEEK) == 0) &&
6162	    (sinfo) &&
6163	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6164	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6165		struct sctp_extrcvinfo *s_extra;
6166
6167		s_extra = (struct sctp_extrcvinfo *)sinfo;
6168		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6169	}
6170	if (hold_rlock == 1) {
6171		SCTP_INP_READ_UNLOCK(inp);
6172	}
6173	if (hold_sblock) {
6174		SOCKBUF_UNLOCK(&so->so_rcv);
6175	}
6176	if (sockbuf_lock) {
6177		sbunlock(&so->so_rcv);
6178	}
6179	if (freecnt_applied) {
6180		/*
6181		 * The lock on the socket buffer protects us so the free
6182		 * code will stop. But since we used the socketbuf lock and
6183		 * the sender uses the tcb_lock to increment, we need to use
6184		 * the atomic add to the refcnt.
6185		 */
6186		if (stcb == NULL) {
6187#ifdef INVARIANTS
6188			panic("stcb for refcnt has gone NULL?");
6189			goto stage_left;
6190#else
6191			goto stage_left;
6192#endif
6193		}
6194		atomic_add_int(&stcb->asoc.refcnt, -1);
6195		/* Save the value back for next time */
6196		stcb->freed_by_sorcv_sincelast = freed_so_far;
6197	}
6198	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6199		if (stcb) {
6200			sctp_misc_ints(SCTP_SORECV_DONE,
6201			    freed_so_far,
6202			    ((uio) ? (slen - uio->uio_resid) : slen),
6203			    stcb->asoc.my_rwnd,
6204			    so->so_rcv.sb_cc);
6205		} else {
6206			sctp_misc_ints(SCTP_SORECV_DONE,
6207			    freed_so_far,
6208			    ((uio) ? (slen - uio->uio_resid) : slen),
6209			    0,
6210			    so->so_rcv.sb_cc);
6211		}
6212	}
6213stage_left:
6214	if (wakeup_read_socket) {
6215		sctp_sorwakeup(inp, so);
6216	}
6217	return (error);
6218}
6219
6220
6221#ifdef SCTP_MBUF_LOGGING
6222struct mbuf *
6223sctp_m_free(struct mbuf *m)
6224{
6225	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6226		sctp_log_mb(m, SCTP_MBUF_IFREE);
6227	}
6228	return (m_free(m));
6229}
6230
6231void
6232sctp_m_freem(struct mbuf *mb)
6233{
6234	while (mb != NULL)
6235		mb = sctp_m_free(mb);
6236}
6237
6238#endif
6239
6240int
6241sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6242{
6243	/*
6244	 * Given a local address. For all associations that holds the
6245	 * address, request a peer-set-primary.
6246	 */
6247	struct sctp_ifa *ifa;
6248	struct sctp_laddr *wi;
6249
6250	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6251	if (ifa == NULL) {
6252		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6253		return (EADDRNOTAVAIL);
6254	}
6255	/*
6256	 * Now that we have the ifa we must awaken the iterator with this
6257	 * message.
6258	 */
6259	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6260	if (wi == NULL) {
6261		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6262		return (ENOMEM);
6263	}
6264	/* Now incr the count and int wi structure */
6265	SCTP_INCR_LADDR_COUNT();
6266	bzero(wi, sizeof(*wi));
6267	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6268	wi->ifa = ifa;
6269	wi->action = SCTP_SET_PRIM_ADDR;
6270	atomic_add_int(&ifa->refcount, 1);
6271
6272	/* Now add it to the work queue */
6273	SCTP_WQ_ADDR_LOCK();
6274	/*
6275	 * Should this really be a tailq? As it is we will process the
6276	 * newest first :-0
6277	 */
6278	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6279	SCTP_WQ_ADDR_UNLOCK();
6280	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6281	    (struct sctp_inpcb *)NULL,
6282	    (struct sctp_tcb *)NULL,
6283	    (struct sctp_nets *)NULL);
6284	return (0);
6285}
6286
6287
6288int
6289sctp_soreceive(struct socket *so,
6290    struct sockaddr **psa,
6291    struct uio *uio,
6292    struct mbuf **mp0,
6293    struct mbuf **controlp,
6294    int *flagsp)
6295{
6296	int error, fromlen;
6297	uint8_t sockbuf[256];
6298	struct sockaddr *from;
6299	struct sctp_extrcvinfo sinfo;
6300	int filling_sinfo = 1;
6301	struct sctp_inpcb *inp;
6302
6303	inp = (struct sctp_inpcb *)so->so_pcb;
6304	/* pickup the assoc we are reading from */
6305	if (inp == NULL) {
6306		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6307		return (EINVAL);
6308	}
6309	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6310	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6311	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6312	    (controlp == NULL)) {
6313		/* user does not want the sndrcv ctl */
6314		filling_sinfo = 0;
6315	}
6316	if (psa) {
6317		from = (struct sockaddr *)sockbuf;
6318		fromlen = sizeof(sockbuf);
6319		from->sa_len = 0;
6320	} else {
6321		from = NULL;
6322		fromlen = 0;
6323	}
6324
6325	if (filling_sinfo) {
6326		memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo));
6327	}
6328	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6329	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6330	if (controlp != NULL) {
6331		/* copy back the sinfo in a CMSG format */
6332		if (filling_sinfo)
6333			*controlp = sctp_build_ctl_nchunk(inp,
6334			    (struct sctp_sndrcvinfo *)&sinfo);
6335		else
6336			*controlp = NULL;
6337	}
6338	if (psa) {
6339		/* copy back the address info */
6340		if (from && from->sa_len) {
6341			*psa = sodupsockaddr(from, M_NOWAIT);
6342		} else {
6343			*psa = NULL;
6344		}
6345	}
6346	return (error);
6347}
6348
6349
6350
6351
6352
6353int
6354sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6355    int totaddr, int *error)
6356{
6357	int added = 0;
6358	int i;
6359	struct sctp_inpcb *inp;
6360	struct sockaddr *sa;
6361	size_t incr = 0;
6362
6363#ifdef INET
6364	struct sockaddr_in *sin;
6365
6366#endif
6367#ifdef INET6
6368	struct sockaddr_in6 *sin6;
6369
6370#endif
6371
6372	sa = addr;
6373	inp = stcb->sctp_ep;
6374	*error = 0;
6375	for (i = 0; i < totaddr; i++) {
6376		switch (sa->sa_family) {
6377#ifdef INET
6378		case AF_INET:
6379			incr = sizeof(struct sockaddr_in);
6380			sin = (struct sockaddr_in *)sa;
6381			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6382			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6383			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6384				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6385				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6386				    SCTP_FROM_SCTPUTIL + SCTP_LOC_7);
6387				*error = EINVAL;
6388				goto out_now;
6389			}
6390			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6391				/* assoc gone no un-lock */
6392				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6393				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6394				    SCTP_FROM_SCTPUTIL + SCTP_LOC_8);
6395				*error = ENOBUFS;
6396				goto out_now;
6397			}
6398			added++;
6399			break;
6400#endif
6401#ifdef INET6
6402		case AF_INET6:
6403			incr = sizeof(struct sockaddr_in6);
6404			sin6 = (struct sockaddr_in6 *)sa;
6405			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6406			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6407				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6408				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6409				    SCTP_FROM_SCTPUTIL + SCTP_LOC_9);
6410				*error = EINVAL;
6411				goto out_now;
6412			}
6413			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6414				/* assoc gone no un-lock */
6415				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6416				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC,
6417				    SCTP_FROM_SCTPUTIL + SCTP_LOC_10);
6418				*error = ENOBUFS;
6419				goto out_now;
6420			}
6421			added++;
6422			break;
6423#endif
6424		default:
6425			break;
6426		}
6427		sa = (struct sockaddr *)((caddr_t)sa + incr);
6428	}
6429out_now:
6430	return (added);
6431}
6432
6433struct sctp_tcb *
6434sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6435    int *totaddr, int *num_v4, int *num_v6, int *error,
6436    int limit, int *bad_addr)
6437{
6438	struct sockaddr *sa;
6439	struct sctp_tcb *stcb = NULL;
6440	size_t incr, at, i;
6441
6442	at = incr = 0;
6443	sa = addr;
6444
6445	*error = *num_v6 = *num_v4 = 0;
6446	/* account and validate addresses */
6447	for (i = 0; i < (size_t)*totaddr; i++) {
6448		switch (sa->sa_family) {
6449#ifdef INET
6450		case AF_INET:
6451			(*num_v4) += 1;
6452			incr = sizeof(struct sockaddr_in);
6453			if (sa->sa_len != incr) {
6454				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6455				*error = EINVAL;
6456				*bad_addr = 1;
6457				return (NULL);
6458			}
6459			break;
6460#endif
6461#ifdef INET6
6462		case AF_INET6:
6463			{
6464				struct sockaddr_in6 *sin6;
6465
6466				sin6 = (struct sockaddr_in6 *)sa;
6467				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6468					/* Must be non-mapped for connectx */
6469					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6470					*error = EINVAL;
6471					*bad_addr = 1;
6472					return (NULL);
6473				}
6474				(*num_v6) += 1;
6475				incr = sizeof(struct sockaddr_in6);
6476				if (sa->sa_len != incr) {
6477					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6478					*error = EINVAL;
6479					*bad_addr = 1;
6480					return (NULL);
6481				}
6482				break;
6483			}
6484#endif
6485		default:
6486			*totaddr = i;
6487			/* we are done */
6488			break;
6489		}
6490		if (i == (size_t)*totaddr) {
6491			break;
6492		}
6493		SCTP_INP_INCR_REF(inp);
6494		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6495		if (stcb != NULL) {
6496			/* Already have or am bring up an association */
6497			return (stcb);
6498		} else {
6499			SCTP_INP_DECR_REF(inp);
6500		}
6501		if ((at + incr) > (size_t)limit) {
6502			*totaddr = i;
6503			break;
6504		}
6505		sa = (struct sockaddr *)((caddr_t)sa + incr);
6506	}
6507	return ((struct sctp_tcb *)NULL);
6508}
6509
6510/*
6511 * sctp_bindx(ADD) for one address.
6512 * assumes all arguments are valid/checked by caller.
6513 */
6514void
6515sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6516    struct sockaddr *sa, sctp_assoc_t assoc_id,
6517    uint32_t vrf_id, int *error, void *p)
6518{
6519	struct sockaddr *addr_touse;
6520
6521#if defined(INET) && defined(INET6)
6522	struct sockaddr_in sin;
6523
6524#endif
6525
6526	/* see if we're bound all already! */
6527	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6528		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6529		*error = EINVAL;
6530		return;
6531	}
6532	addr_touse = sa;
6533#ifdef INET6
6534	if (sa->sa_family == AF_INET6) {
6535#ifdef INET
6536		struct sockaddr_in6 *sin6;
6537
6538#endif
6539		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6540			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6541			*error = EINVAL;
6542			return;
6543		}
6544		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6545			/* can only bind v6 on PF_INET6 sockets */
6546			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6547			*error = EINVAL;
6548			return;
6549		}
6550#ifdef INET
6551		sin6 = (struct sockaddr_in6 *)addr_touse;
6552		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6553			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6554			    SCTP_IPV6_V6ONLY(inp)) {
6555				/* can't bind v4-mapped on PF_INET sockets */
6556				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6557				*error = EINVAL;
6558				return;
6559			}
6560			in6_sin6_2_sin(&sin, sin6);
6561			addr_touse = (struct sockaddr *)&sin;
6562		}
6563#endif
6564	}
6565#endif
6566#ifdef INET
6567	if (sa->sa_family == AF_INET) {
6568		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6569			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6570			*error = EINVAL;
6571			return;
6572		}
6573		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6574		    SCTP_IPV6_V6ONLY(inp)) {
6575			/* can't bind v4 on PF_INET sockets */
6576			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6577			*error = EINVAL;
6578			return;
6579		}
6580	}
6581#endif
6582	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6583		if (p == NULL) {
6584			/* Can't get proc for Net/Open BSD */
6585			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6586			*error = EINVAL;
6587			return;
6588		}
6589		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6590		return;
6591	}
6592	/*
6593	 * No locks required here since bind and mgmt_ep_sa all do their own
6594	 * locking. If we do something for the FIX: below we may need to
6595	 * lock in that case.
6596	 */
6597	if (assoc_id == 0) {
6598		/* add the address */
6599		struct sctp_inpcb *lep;
6600		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6601
6602		/* validate the incoming port */
6603		if ((lsin->sin_port != 0) &&
6604		    (lsin->sin_port != inp->sctp_lport)) {
6605			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6606			*error = EINVAL;
6607			return;
6608		} else {
6609			/* user specified 0 port, set it to existing port */
6610			lsin->sin_port = inp->sctp_lport;
6611		}
6612
6613		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6614		if (lep != NULL) {
6615			/*
6616			 * We must decrement the refcount since we have the
6617			 * ep already and are binding. No remove going on
6618			 * here.
6619			 */
6620			SCTP_INP_DECR_REF(lep);
6621		}
6622		if (lep == inp) {
6623			/* already bound to it.. ok */
6624			return;
6625		} else if (lep == NULL) {
6626			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6627			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6628			    SCTP_ADD_IP_ADDRESS,
6629			    vrf_id, NULL);
6630		} else {
6631			*error = EADDRINUSE;
6632		}
6633		if (*error)
6634			return;
6635	} else {
6636		/*
6637		 * FIX: decide whether we allow assoc based bindx
6638		 */
6639	}
6640}
6641
6642/*
6643 * sctp_bindx(DELETE) for one address.
6644 * assumes all arguments are valid/checked by caller.
6645 */
6646void
6647sctp_bindx_delete_address(struct sctp_inpcb *inp,
6648    struct sockaddr *sa, sctp_assoc_t assoc_id,
6649    uint32_t vrf_id, int *error)
6650{
6651	struct sockaddr *addr_touse;
6652
6653#if defined(INET) && defined(INET6)
6654	struct sockaddr_in sin;
6655
6656#endif
6657
6658	/* see if we're bound all already! */
6659	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6660		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6661		*error = EINVAL;
6662		return;
6663	}
6664	addr_touse = sa;
6665#ifdef INET6
6666	if (sa->sa_family == AF_INET6) {
6667#ifdef INET
6668		struct sockaddr_in6 *sin6;
6669
6670#endif
6671
6672		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6673			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6674			*error = EINVAL;
6675			return;
6676		}
6677		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6678			/* can only bind v6 on PF_INET6 sockets */
6679			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6680			*error = EINVAL;
6681			return;
6682		}
6683#ifdef INET
6684		sin6 = (struct sockaddr_in6 *)addr_touse;
6685		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6686			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6687			    SCTP_IPV6_V6ONLY(inp)) {
6688				/* can't bind mapped-v4 on PF_INET sockets */
6689				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6690				*error = EINVAL;
6691				return;
6692			}
6693			in6_sin6_2_sin(&sin, sin6);
6694			addr_touse = (struct sockaddr *)&sin;
6695		}
6696#endif
6697	}
6698#endif
6699#ifdef INET
6700	if (sa->sa_family == AF_INET) {
6701		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6702			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6703			*error = EINVAL;
6704			return;
6705		}
6706		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6707		    SCTP_IPV6_V6ONLY(inp)) {
6708			/* can't bind v4 on PF_INET sockets */
6709			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6710			*error = EINVAL;
6711			return;
6712		}
6713	}
6714#endif
6715	/*
6716	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6717	 * below is ever changed we may need to lock before calling
6718	 * association level binding.
6719	 */
6720	if (assoc_id == 0) {
6721		/* delete the address */
6722		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6723		    SCTP_DEL_IP_ADDRESS,
6724		    vrf_id, NULL);
6725	} else {
6726		/*
6727		 * FIX: decide whether we allow assoc based bindx
6728		 */
6729	}
6730}
6731
6732/*
6733 * returns the valid local address count for an assoc, taking into account
6734 * all scoping rules
6735 */
6736int
6737sctp_local_addr_count(struct sctp_tcb *stcb)
6738{
6739	int loopback_scope;
6740
6741#if defined(INET)
6742	int ipv4_local_scope, ipv4_addr_legal;
6743
6744#endif
6745#if defined (INET6)
6746	int local_scope, site_scope, ipv6_addr_legal;
6747
6748#endif
6749	struct sctp_vrf *vrf;
6750	struct sctp_ifn *sctp_ifn;
6751	struct sctp_ifa *sctp_ifa;
6752	int count = 0;
6753
6754	/* Turn on all the appropriate scopes */
6755	loopback_scope = stcb->asoc.scope.loopback_scope;
6756#if defined(INET)
6757	ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope;
6758	ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal;
6759#endif
6760#if defined(INET6)
6761	local_scope = stcb->asoc.scope.local_scope;
6762	site_scope = stcb->asoc.scope.site_scope;
6763	ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal;
6764#endif
6765	SCTP_IPI_ADDR_RLOCK();
6766	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6767	if (vrf == NULL) {
6768		/* no vrf, no addresses */
6769		SCTP_IPI_ADDR_RUNLOCK();
6770		return (0);
6771	}
6772	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6773		/*
6774		 * bound all case: go through all ifns on the vrf
6775		 */
6776		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6777			if ((loopback_scope == 0) &&
6778			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6779				continue;
6780			}
6781			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6782				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6783					continue;
6784				switch (sctp_ifa->address.sa.sa_family) {
6785#ifdef INET
6786				case AF_INET:
6787					if (ipv4_addr_legal) {
6788						struct sockaddr_in *sin;
6789
6790						sin = &sctp_ifa->address.sin;
6791						if (sin->sin_addr.s_addr == 0) {
6792							/*
6793							 * skip unspecified
6794							 * addrs
6795							 */
6796							continue;
6797						}
6798						if (prison_check_ip4(stcb->sctp_ep->ip_inp.inp.inp_cred,
6799						    &sin->sin_addr) != 0) {
6800							continue;
6801						}
6802						if ((ipv4_local_scope == 0) &&
6803						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6804							continue;
6805						}
6806						/* count this one */
6807						count++;
6808					} else {
6809						continue;
6810					}
6811					break;
6812#endif
6813#ifdef INET6
6814				case AF_INET6:
6815					if (ipv6_addr_legal) {
6816						struct sockaddr_in6 *sin6;
6817
6818						sin6 = &sctp_ifa->address.sin6;
6819						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6820							continue;
6821						}
6822						if (prison_check_ip6(stcb->sctp_ep->ip_inp.inp.inp_cred,
6823						    &sin6->sin6_addr) != 0) {
6824							continue;
6825						}
6826						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6827							if (local_scope == 0)
6828								continue;
6829							if (sin6->sin6_scope_id == 0) {
6830								if (sa6_recoverscope(sin6) != 0)
6831									/*
6832									 *
6833									 * bad
6834									 *
6835									 * li
6836									 * nk
6837									 *
6838									 * loc
6839									 * al
6840									 *
6841									 * add
6842									 * re
6843									 * ss
6844									 * */
6845									continue;
6846							}
6847						}
6848						if ((site_scope == 0) &&
6849						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6850							continue;
6851						}
6852						/* count this one */
6853						count++;
6854					}
6855					break;
6856#endif
6857				default:
6858					/* TSNH */
6859					break;
6860				}
6861			}
6862		}
6863	} else {
6864		/*
6865		 * subset bound case
6866		 */
6867		struct sctp_laddr *laddr;
6868
6869		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6870		    sctp_nxt_addr) {
6871			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6872				continue;
6873			}
6874			/* count this one */
6875			count++;
6876		}
6877	}
6878	SCTP_IPI_ADDR_RUNLOCK();
6879	return (count);
6880}
6881
6882#if defined(SCTP_LOCAL_TRACE_BUF)
6883
6884void
6885sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6886{
6887	uint32_t saveindex, newindex;
6888
6889	do {
6890		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6891		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6892			newindex = 1;
6893		} else {
6894			newindex = saveindex + 1;
6895		}
6896	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6897	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6898		saveindex = 0;
6899	}
6900	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6901	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6902	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6903	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6904	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6905	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6906	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6907	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6908}
6909
6910#endif
6911static void
6912sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *inp,
6913    const struct sockaddr *sa SCTP_UNUSED, void *ctx SCTP_UNUSED)
6914{
6915	struct ip *iph;
6916
6917#ifdef INET6
6918	struct ip6_hdr *ip6;
6919
6920#endif
6921	struct mbuf *sp, *last;
6922	struct udphdr *uhdr;
6923	uint16_t port;
6924
6925	if ((m->m_flags & M_PKTHDR) == 0) {
6926		/* Can't handle one that is not a pkt hdr */
6927		goto out;
6928	}
6929	/* Pull the src port */
6930	iph = mtod(m, struct ip *);
6931	uhdr = (struct udphdr *)((caddr_t)iph + off);
6932	port = uhdr->uh_sport;
6933	/*
6934	 * Split out the mbuf chain. Leave the IP header in m, place the
6935	 * rest in the sp.
6936	 */
6937	sp = m_split(m, off, M_NOWAIT);
6938	if (sp == NULL) {
6939		/* Gak, drop packet, we can't do a split */
6940		goto out;
6941	}
6942	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6943		/* Gak, packet can't have an SCTP header in it - too small */
6944		m_freem(sp);
6945		goto out;
6946	}
6947	/* Now pull up the UDP header and SCTP header together */
6948	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6949	if (sp == NULL) {
6950		/* Gak pullup failed */
6951		goto out;
6952	}
6953	/* Trim out the UDP header */
6954	m_adj(sp, sizeof(struct udphdr));
6955
6956	/* Now reconstruct the mbuf chain */
6957	for (last = m; last->m_next; last = last->m_next);
6958	last->m_next = sp;
6959	m->m_pkthdr.len += sp->m_pkthdr.len;
6960	iph = mtod(m, struct ip *);
6961	switch (iph->ip_v) {
6962#ifdef INET
6963	case IPVERSION:
6964		iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr));
6965		sctp_input_with_port(m, off, port);
6966		break;
6967#endif
6968#ifdef INET6
6969	case IPV6_VERSION >> 4:
6970		ip6 = mtod(m, struct ip6_hdr *);
6971		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr));
6972		sctp6_input_with_port(&m, &off, port);
6973		break;
6974#endif
6975	default:
6976		goto out;
6977		break;
6978	}
6979	return;
6980out:
6981	m_freem(m);
6982}
6983
6984void
6985sctp_over_udp_stop(void)
6986{
6987	/*
6988	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6989	 * for writting!
6990	 */
6991#ifdef INET
6992	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
6993		soclose(SCTP_BASE_INFO(udp4_tun_socket));
6994		SCTP_BASE_INFO(udp4_tun_socket) = NULL;
6995	}
6996#endif
6997#ifdef INET6
6998	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
6999		soclose(SCTP_BASE_INFO(udp6_tun_socket));
7000		SCTP_BASE_INFO(udp6_tun_socket) = NULL;
7001	}
7002#endif
7003}
7004
7005int
7006sctp_over_udp_start(void)
7007{
7008	uint16_t port;
7009	int ret;
7010
7011#ifdef INET
7012	struct sockaddr_in sin;
7013
7014#endif
7015#ifdef INET6
7016	struct sockaddr_in6 sin6;
7017
7018#endif
7019	/*
7020	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
7021	 * for writting!
7022	 */
7023	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
7024	if (ntohs(port) == 0) {
7025		/* Must have a port set */
7026		return (EINVAL);
7027	}
7028#ifdef INET
7029	if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) {
7030		/* Already running -- must stop first */
7031		return (EALREADY);
7032	}
7033#endif
7034#ifdef INET6
7035	if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) {
7036		/* Already running -- must stop first */
7037		return (EALREADY);
7038	}
7039#endif
7040#ifdef INET
7041	if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket),
7042	    SOCK_DGRAM, IPPROTO_UDP,
7043	    curthread->td_ucred, curthread))) {
7044		sctp_over_udp_stop();
7045		return (ret);
7046	}
7047	/* Call the special UDP hook. */
7048	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket),
7049	    sctp_recv_udp_tunneled_packet, NULL))) {
7050		sctp_over_udp_stop();
7051		return (ret);
7052	}
7053	/* Ok, we have a socket, bind it to the port. */
7054	memset(&sin, 0, sizeof(struct sockaddr_in));
7055	sin.sin_len = sizeof(struct sockaddr_in);
7056	sin.sin_family = AF_INET;
7057	sin.sin_port = htons(port);
7058	if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket),
7059	    (struct sockaddr *)&sin, curthread))) {
7060		sctp_over_udp_stop();
7061		return (ret);
7062	}
7063#endif
7064#ifdef INET6
7065	if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket),
7066	    SOCK_DGRAM, IPPROTO_UDP,
7067	    curthread->td_ucred, curthread))) {
7068		sctp_over_udp_stop();
7069		return (ret);
7070	}
7071	/* Call the special UDP hook. */
7072	if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket),
7073	    sctp_recv_udp_tunneled_packet, NULL))) {
7074		sctp_over_udp_stop();
7075		return (ret);
7076	}
7077	/* Ok, we have a socket, bind it to the port. */
7078	memset(&sin6, 0, sizeof(struct sockaddr_in6));
7079	sin6.sin6_len = sizeof(struct sockaddr_in6);
7080	sin6.sin6_family = AF_INET6;
7081	sin6.sin6_port = htons(port);
7082	if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket),
7083	    (struct sockaddr *)&sin6, curthread))) {
7084		sctp_over_udp_stop();
7085		return (ret);
7086	}
7087#endif
7088	return (0);
7089}
7090