Deleted Added
full compact
sctputil.c (166675) sctputil.c (167598)
1/*-
2 * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $ */
32
33#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $ */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 166675 2007-02-12 23:24:31Z rrs $");
34__FBSDID("$FreeBSD: head/sys/netinet/sctputil.c 167598 2007-03-15 11:27:14Z rrs $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_pcb.h>
38#include <netinet/sctputil.h>
39#include <netinet/sctp_var.h>
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_pcb.h>
38#include <netinet/sctputil.h>
39#include <netinet/sctp_var.h>
40#include <netinet/sctp_sysctl.h>
40#ifdef INET6
41#include <netinet6/sctp6_var.h>
42#endif
43#include <netinet/sctp_header.h>
44#include <netinet/sctp_output.h>
45#include <netinet/sctp_uio.h>
46#include <netinet/sctp_timer.h>
47#include <netinet/sctp_crc32.h>
48#include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
49#include <netinet/sctp_auth.h>
50#include <netinet/sctp_asconf.h>
41#ifdef INET6
42#include <netinet6/sctp6_var.h>
43#endif
44#include <netinet/sctp_header.h>
45#include <netinet/sctp_output.h>
46#include <netinet/sctp_uio.h>
47#include <netinet/sctp_timer.h>
48#include <netinet/sctp_crc32.h>
49#include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50#include <netinet/sctp_auth.h>
51#include <netinet/sctp_asconf.h>
52#include <netinet/sctp_bsd_addr.h>
51
53
52extern int sctp_warm_the_crc32_table;
53
54#define NUMBER_OF_MTU_SIZES 18
55
54#define NUMBER_OF_MTU_SIZES 18
55
56#ifdef SCTP_DEBUG
57extern uint32_t sctp_debug_on;
58
56
59#endif
60
61
62#ifdef SCTP_STAT_LOGGING
63int global_sctp_cwnd_log_at = 0;
64int global_sctp_cwnd_log_rolled = 0;
65struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE];
66
67static uint32_t
68sctp_get_time_of_event(void)
69{
70 struct timeval now;
71 uint32_t timeval;
72
73 SCTP_GETPTIME_TIMEVAL(&now);
74 timeval = (now.tv_sec % 0x00000fff);
75 timeval <<= 20;
76 timeval |= now.tv_usec & 0xfffff;
77 return (timeval);
78}
79
80
81void
82sctp_clr_stat_log(void)
83{
84 global_sctp_cwnd_log_at = 0;
85 global_sctp_cwnd_log_rolled = 0;
86}
87
88
89void
90sctp_sblog(struct sockbuf *sb,
91 struct sctp_tcb *stcb, int from, int incr)
92{
93 int sctp_cwnd_log_at;
94
95 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
96 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
97 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
98 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SB;
99 sctp_clog[sctp_cwnd_log_at].x.sb.stcb = stcb;
100 sctp_clog[sctp_cwnd_log_at].x.sb.so_sbcc = sb->sb_cc;
101 if (stcb)
102 sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = stcb->asoc.sb_cc;
103 else
104 sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = 0;
105 sctp_clog[sctp_cwnd_log_at].x.sb.incr = incr;
106}
107
108void
109sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
110{
111 int sctp_cwnd_log_at;
112
113 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
114 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
115 sctp_clog[sctp_cwnd_log_at].from = 0;
116 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CLOSE;
117 sctp_clog[sctp_cwnd_log_at].x.close.inp = (void *)inp;
118 sctp_clog[sctp_cwnd_log_at].x.close.sctp_flags = inp->sctp_flags;
119 if (stcb) {
120 sctp_clog[sctp_cwnd_log_at].x.close.stcb = (void *)stcb;
121 sctp_clog[sctp_cwnd_log_at].x.close.state = (uint16_t) stcb->asoc.state;
122 } else {
123 sctp_clog[sctp_cwnd_log_at].x.close.stcb = 0;
124 sctp_clog[sctp_cwnd_log_at].x.close.state = 0;
125 }
126 sctp_clog[sctp_cwnd_log_at].x.close.loc = loc;
127}
128
129
130void
131rto_logging(struct sctp_nets *net, int from)
132{
133 int sctp_cwnd_log_at;
134
135 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
136 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
137 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
138 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RTT;
139 sctp_clog[sctp_cwnd_log_at].x.rto.net = (void *)net;
140 sctp_clog[sctp_cwnd_log_at].x.rto.rtt = net->prev_rtt;
141 sctp_clog[sctp_cwnd_log_at].x.rto.rttvar = net->rtt_variance;
142 sctp_clog[sctp_cwnd_log_at].x.rto.direction = net->rto_variance_dir;
143}
144
145void
146sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
147{
148 int sctp_cwnd_log_at;
149
150 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
151 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
152 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
153 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM;
154 sctp_clog[sctp_cwnd_log_at].x.strlog.stcb = stcb;
155 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn;
156 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq;
157 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
158 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
159 sctp_clog[sctp_cwnd_log_at].x.strlog.strm = stream;
160}
161
162void
163sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
164{
165 int sctp_cwnd_log_at;
166
167 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
168 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
169 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) action;
170 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_NAGLE;
171 sctp_clog[sctp_cwnd_log_at].x.nagle.stcb = (void *)stcb;
172 sctp_clog[sctp_cwnd_log_at].x.nagle.total_flight = stcb->asoc.total_flight;
173 sctp_clog[sctp_cwnd_log_at].x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
174 sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
175 sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_flight = stcb->asoc.total_flight_count;
176}
177
178
179void
180sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
181{
182 int sctp_cwnd_log_at;
183
184 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
185 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
186 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
187 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SACK;
188 sctp_clog[sctp_cwnd_log_at].x.sack.cumack = cumack;
189 sctp_clog[sctp_cwnd_log_at].x.sack.oldcumack = old_cumack;
190 sctp_clog[sctp_cwnd_log_at].x.sack.tsn = tsn;
191 sctp_clog[sctp_cwnd_log_at].x.sack.numGaps = gaps;
192 sctp_clog[sctp_cwnd_log_at].x.sack.numDups = dups;
193}
194
195void
196sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
197{
198 int sctp_cwnd_log_at;
199
200 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
201 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
202 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
203 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAP;
204 sctp_clog[sctp_cwnd_log_at].x.map.base = map;
205 sctp_clog[sctp_cwnd_log_at].x.map.cum = cum;
206 sctp_clog[sctp_cwnd_log_at].x.map.high = high;
207}
208
209void
210sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
211 int from)
212{
213 int sctp_cwnd_log_at;
214
215 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
216 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
217 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
218 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_FR;
219 sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn;
220 sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn;
221 sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn;
222}
223
224
225void
226sctp_log_mb(struct mbuf *m, int from)
227{
228 int sctp_cwnd_log_at;
229
230 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
231 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
232 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
233 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBUF;
234 sctp_clog[sctp_cwnd_log_at].x.mb.mp = m;
235 sctp_clog[sctp_cwnd_log_at].x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
236 sctp_clog[sctp_cwnd_log_at].x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
237 sctp_clog[sctp_cwnd_log_at].x.mb.data = SCTP_BUF_AT(m, 0);
238 if (SCTP_BUF_IS_EXTENDED(m)) {
239 sctp_clog[sctp_cwnd_log_at].x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
240 sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
241 } else {
242 sctp_clog[sctp_cwnd_log_at].x.mb.ext = 0;
243 sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = 0;
244 }
245}
246
247
248void
249sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
250 int from)
251{
252 int sctp_cwnd_log_at;
253
254 if (control == NULL) {
255 printf("Gak log of NULL?\n");
256 return;
257 }
258 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
259 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
260 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
261 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM;
262 sctp_clog[sctp_cwnd_log_at].x.strlog.stcb = control->stcb;
263 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = control->sinfo_tsn;
264 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = control->sinfo_ssn;
265 sctp_clog[sctp_cwnd_log_at].x.strlog.strm = control->sinfo_stream;
266 if (poschk != NULL) {
267 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = poschk->sinfo_tsn;
268 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = poschk->sinfo_ssn;
269 } else {
270 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
271 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
272 }
273}
274
275void
276sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
277{
278 int sctp_cwnd_log_at;
279
280 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
281 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
282 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
283 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CWND;
284 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
285 if (stcb->asoc.send_queue_cnt > 255)
286 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255;
287 else
288 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
289 if (stcb->asoc.stream_queue_cnt > 255)
290 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255;
291 else
292 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
293
294 if (net) {
295 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd;
296 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
297 sctp_clog[sctp_cwnd_log_at].x.cwnd.pseudo_cumack = net->pseudo_cumack;
298 sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
299 sctp_clog[sctp_cwnd_log_at].x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
300 }
301 if (SCTP_CWNDLOG_PRESEND == from) {
302 sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
303 }
304 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment;
305}
306
307void
308sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
309{
310 int sctp_cwnd_log_at;
311
312 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
313 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
314 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
315 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_LOCK_EVENT;
316 if (inp) {
317 sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)inp->sctp_socket;
318
319 } else {
320 sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)NULL;
321 }
322 sctp_clog[sctp_cwnd_log_at].x.lock.inp = (void *)inp;
323 if (stcb) {
324 sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
325 } else {
326 sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
327 }
328 if (inp) {
329 sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
330 sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
331 } else {
332 sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
333 sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = SCTP_LOCK_UNKNOWN;
334 }
335 sctp_clog[sctp_cwnd_log_at].x.lock.info_lock = mtx_owned(&sctppcbinfo.ipi_ep_mtx);
336 if (inp->sctp_socket) {
337 sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
338 sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
339 sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
340 } else {
341 sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
342 sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
343 sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
344 }
345}
346
347void
348sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
349{
350 int sctp_cwnd_log_at;
351
352 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
353 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
354 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
355 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAXBURST;
356 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
357 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error;
358 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
359 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst;
360 if (stcb->asoc.send_queue_cnt > 255)
361 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255;
362 else
363 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
364 if (stcb->asoc.stream_queue_cnt > 255)
365 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255;
366 else
367 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
368}
369
370void
371sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
372{
373 int sctp_cwnd_log_at;
374
375 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
376 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
377 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
378 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND;
379 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
380 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size;
381 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
382 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0;
383}
384
385void
386sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
387{
388 int sctp_cwnd_log_at;
389
390 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
391 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
392 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
393 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND;
394 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
395 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size;
396 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
397 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval;
398}
399
400void
401sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
402{
403 int sctp_cwnd_log_at;
404
405 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
406 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
407 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
408 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBCNT;
409 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq;
410 sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change = book;
411 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q;
412 sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt;
413}
414
415void
416sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
417{
418 int sctp_cwnd_log_at;
419
420 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
421 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
422 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
423 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_MISC_EVENT;
424 sctp_clog[sctp_cwnd_log_at].x.misc.log1 = a;
425 sctp_clog[sctp_cwnd_log_at].x.misc.log2 = b;
426 sctp_clog[sctp_cwnd_log_at].x.misc.log3 = c;
427 sctp_clog[sctp_cwnd_log_at].x.misc.log4 = d;
428}
429
430void
431sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
432{
433 int sctp_cwnd_log_at;
434
435 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
436 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
437 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
438 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_WAKE;
439 sctp_clog[sctp_cwnd_log_at].x.wake.stcb = (void *)stcb;
440 sctp_clog[sctp_cwnd_log_at].x.wake.wake_cnt = wake_cnt;
441 sctp_clog[sctp_cwnd_log_at].x.wake.flight = stcb->asoc.total_flight_count;
442 sctp_clog[sctp_cwnd_log_at].x.wake.send_q = stcb->asoc.send_queue_cnt;
443 sctp_clog[sctp_cwnd_log_at].x.wake.sent_q = stcb->asoc.sent_queue_cnt;
444
445 if (stcb->asoc.stream_queue_cnt < 0xff)
446 sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
447 else
448 sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = 0xff;
449
450 if (stcb->asoc.chunks_on_out_queue < 0xff)
451 sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
452 else
453 sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = 0xff;
454
455 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags = 0;
456 /* set in the defered mode stuff */
457 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
458 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 1;
459 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
460 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 2;
461 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
462 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 4;
463 /* what about the sb */
464 if (stcb->sctp_socket) {
465 struct socket *so = stcb->sctp_socket;
466
467 sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
468 } else {
469 sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = 0xff;
470 }
471}
472
473void
474sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
475{
476 int sctp_cwnd_log_at;
477
478 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
479 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
480 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
481 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_BLOCK;
482 sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size;
483 sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
484 sctp_clog[sctp_cwnd_log_at].x.blk.peer_rwnd = asoc->peers_rwnd;
485 sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
486 sctp_clog[sctp_cwnd_log_at].x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
487 sctp_clog[sctp_cwnd_log_at].x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
488 sctp_clog[sctp_cwnd_log_at].x.blk.sndlen = sendlen;
489}
490
491int
57#ifdef SCTP_STAT_LOGGING
58int global_sctp_cwnd_log_at = 0;
59int global_sctp_cwnd_log_rolled = 0;
60struct sctp_cwnd_log sctp_clog[SCTP_STAT_LOG_SIZE];
61
62static uint32_t
63sctp_get_time_of_event(void)
64{
65 struct timeval now;
66 uint32_t timeval;
67
68 SCTP_GETPTIME_TIMEVAL(&now);
69 timeval = (now.tv_sec % 0x00000fff);
70 timeval <<= 20;
71 timeval |= now.tv_usec & 0xfffff;
72 return (timeval);
73}
74
75
76void
77sctp_clr_stat_log(void)
78{
79 global_sctp_cwnd_log_at = 0;
80 global_sctp_cwnd_log_rolled = 0;
81}
82
83
84void
85sctp_sblog(struct sockbuf *sb,
86 struct sctp_tcb *stcb, int from, int incr)
87{
88 int sctp_cwnd_log_at;
89
90 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
91 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
92 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
93 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SB;
94 sctp_clog[sctp_cwnd_log_at].x.sb.stcb = stcb;
95 sctp_clog[sctp_cwnd_log_at].x.sb.so_sbcc = sb->sb_cc;
96 if (stcb)
97 sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = stcb->asoc.sb_cc;
98 else
99 sctp_clog[sctp_cwnd_log_at].x.sb.stcb_sbcc = 0;
100 sctp_clog[sctp_cwnd_log_at].x.sb.incr = incr;
101}
102
103void
104sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
105{
106 int sctp_cwnd_log_at;
107
108 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
109 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
110 sctp_clog[sctp_cwnd_log_at].from = 0;
111 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CLOSE;
112 sctp_clog[sctp_cwnd_log_at].x.close.inp = (void *)inp;
113 sctp_clog[sctp_cwnd_log_at].x.close.sctp_flags = inp->sctp_flags;
114 if (stcb) {
115 sctp_clog[sctp_cwnd_log_at].x.close.stcb = (void *)stcb;
116 sctp_clog[sctp_cwnd_log_at].x.close.state = (uint16_t) stcb->asoc.state;
117 } else {
118 sctp_clog[sctp_cwnd_log_at].x.close.stcb = 0;
119 sctp_clog[sctp_cwnd_log_at].x.close.state = 0;
120 }
121 sctp_clog[sctp_cwnd_log_at].x.close.loc = loc;
122}
123
124
125void
126rto_logging(struct sctp_nets *net, int from)
127{
128 int sctp_cwnd_log_at;
129
130 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
131 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
132 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
133 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RTT;
134 sctp_clog[sctp_cwnd_log_at].x.rto.net = (void *)net;
135 sctp_clog[sctp_cwnd_log_at].x.rto.rtt = net->prev_rtt;
136 sctp_clog[sctp_cwnd_log_at].x.rto.rttvar = net->rtt_variance;
137 sctp_clog[sctp_cwnd_log_at].x.rto.direction = net->rto_variance_dir;
138}
139
140void
141sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
142{
143 int sctp_cwnd_log_at;
144
145 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
146 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
147 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
148 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM;
149 sctp_clog[sctp_cwnd_log_at].x.strlog.stcb = stcb;
150 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = tsn;
151 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = sseq;
152 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
153 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
154 sctp_clog[sctp_cwnd_log_at].x.strlog.strm = stream;
155}
156
157void
158sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
159{
160 int sctp_cwnd_log_at;
161
162 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
163 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
164 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) action;
165 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_NAGLE;
166 sctp_clog[sctp_cwnd_log_at].x.nagle.stcb = (void *)stcb;
167 sctp_clog[sctp_cwnd_log_at].x.nagle.total_flight = stcb->asoc.total_flight;
168 sctp_clog[sctp_cwnd_log_at].x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
169 sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
170 sctp_clog[sctp_cwnd_log_at].x.nagle.count_in_flight = stcb->asoc.total_flight_count;
171}
172
173
174void
175sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
176{
177 int sctp_cwnd_log_at;
178
179 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
180 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
181 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
182 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_SACK;
183 sctp_clog[sctp_cwnd_log_at].x.sack.cumack = cumack;
184 sctp_clog[sctp_cwnd_log_at].x.sack.oldcumack = old_cumack;
185 sctp_clog[sctp_cwnd_log_at].x.sack.tsn = tsn;
186 sctp_clog[sctp_cwnd_log_at].x.sack.numGaps = gaps;
187 sctp_clog[sctp_cwnd_log_at].x.sack.numDups = dups;
188}
189
190void
191sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
192{
193 int sctp_cwnd_log_at;
194
195 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
196 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
197 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
198 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAP;
199 sctp_clog[sctp_cwnd_log_at].x.map.base = map;
200 sctp_clog[sctp_cwnd_log_at].x.map.cum = cum;
201 sctp_clog[sctp_cwnd_log_at].x.map.high = high;
202}
203
204void
205sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
206 int from)
207{
208 int sctp_cwnd_log_at;
209
210 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
211 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
212 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
213 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_FR;
214 sctp_clog[sctp_cwnd_log_at].x.fr.largest_tsn = biggest_tsn;
215 sctp_clog[sctp_cwnd_log_at].x.fr.largest_new_tsn = biggest_new_tsn;
216 sctp_clog[sctp_cwnd_log_at].x.fr.tsn = tsn;
217}
218
219
220void
221sctp_log_mb(struct mbuf *m, int from)
222{
223 int sctp_cwnd_log_at;
224
225 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
226 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
227 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
228 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBUF;
229 sctp_clog[sctp_cwnd_log_at].x.mb.mp = m;
230 sctp_clog[sctp_cwnd_log_at].x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
231 sctp_clog[sctp_cwnd_log_at].x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
232 sctp_clog[sctp_cwnd_log_at].x.mb.data = SCTP_BUF_AT(m, 0);
233 if (SCTP_BUF_IS_EXTENDED(m)) {
234 sctp_clog[sctp_cwnd_log_at].x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
235 sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
236 } else {
237 sctp_clog[sctp_cwnd_log_at].x.mb.ext = 0;
238 sctp_clog[sctp_cwnd_log_at].x.mb.refcnt = 0;
239 }
240}
241
242
243void
244sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
245 int from)
246{
247 int sctp_cwnd_log_at;
248
249 if (control == NULL) {
250 printf("Gak log of NULL?\n");
251 return;
252 }
253 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
254 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
255 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
256 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_STRM;
257 sctp_clog[sctp_cwnd_log_at].x.strlog.stcb = control->stcb;
258 sctp_clog[sctp_cwnd_log_at].x.strlog.n_tsn = control->sinfo_tsn;
259 sctp_clog[sctp_cwnd_log_at].x.strlog.n_sseq = control->sinfo_ssn;
260 sctp_clog[sctp_cwnd_log_at].x.strlog.strm = control->sinfo_stream;
261 if (poschk != NULL) {
262 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = poschk->sinfo_tsn;
263 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = poschk->sinfo_ssn;
264 } else {
265 sctp_clog[sctp_cwnd_log_at].x.strlog.e_tsn = 0;
266 sctp_clog[sctp_cwnd_log_at].x.strlog.e_sseq = 0;
267 }
268}
269
270void
271sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
272{
273 int sctp_cwnd_log_at;
274
275 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
276 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
277 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
278 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_CWND;
279 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
280 if (stcb->asoc.send_queue_cnt > 255)
281 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255;
282 else
283 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
284 if (stcb->asoc.stream_queue_cnt > 255)
285 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255;
286 else
287 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
288
289 if (net) {
290 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = net->cwnd;
291 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
292 sctp_clog[sctp_cwnd_log_at].x.cwnd.pseudo_cumack = net->pseudo_cumack;
293 sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
294 sctp_clog[sctp_cwnd_log_at].x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
295 }
296 if (SCTP_CWNDLOG_PRESEND == from) {
297 sctp_clog[sctp_cwnd_log_at].x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
298 }
299 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = augment;
300}
301
302void
303sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
304{
305 int sctp_cwnd_log_at;
306
307 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
308 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
309 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
310 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_LOCK_EVENT;
311 if (inp) {
312 sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)inp->sctp_socket;
313
314 } else {
315 sctp_clog[sctp_cwnd_log_at].x.lock.sock = (void *)NULL;
316 }
317 sctp_clog[sctp_cwnd_log_at].x.lock.inp = (void *)inp;
318 if (stcb) {
319 sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
320 } else {
321 sctp_clog[sctp_cwnd_log_at].x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
322 }
323 if (inp) {
324 sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
325 sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
326 } else {
327 sctp_clog[sctp_cwnd_log_at].x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
328 sctp_clog[sctp_cwnd_log_at].x.lock.create_lock = SCTP_LOCK_UNKNOWN;
329 }
330 sctp_clog[sctp_cwnd_log_at].x.lock.info_lock = mtx_owned(&sctppcbinfo.ipi_ep_mtx);
331 if (inp->sctp_socket) {
332 sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
333 sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
334 sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
335 } else {
336 sctp_clog[sctp_cwnd_log_at].x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
337 sctp_clog[sctp_cwnd_log_at].x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
338 sctp_clog[sctp_cwnd_log_at].x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
339 }
340}
341
342void
343sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
344{
345 int sctp_cwnd_log_at;
346
347 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
348 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
349 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
350 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MAXBURST;
351 sctp_clog[sctp_cwnd_log_at].x.cwnd.net = net;
352 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_new_value = error;
353 sctp_clog[sctp_cwnd_log_at].x.cwnd.inflight = net->flight_size;
354 sctp_clog[sctp_cwnd_log_at].x.cwnd.cwnd_augment = burst;
355 if (stcb->asoc.send_queue_cnt > 255)
356 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = 255;
357 else
358 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
359 if (stcb->asoc.stream_queue_cnt > 255)
360 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = 255;
361 else
362 sctp_clog[sctp_cwnd_log_at].x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
363}
364
365void
366sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
367{
368 int sctp_cwnd_log_at;
369
370 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
371 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
372 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
373 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND;
374 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
375 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = snd_size;
376 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
377 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = 0;
378}
379
380void
381sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
382{
383 int sctp_cwnd_log_at;
384
385 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
386 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
387 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
388 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RWND;
389 sctp_clog[sctp_cwnd_log_at].x.rwnd.rwnd = peers_rwnd;
390 sctp_clog[sctp_cwnd_log_at].x.rwnd.send_size = flight_size;
391 sctp_clog[sctp_cwnd_log_at].x.rwnd.overhead = overhead;
392 sctp_clog[sctp_cwnd_log_at].x.rwnd.new_rwnd = a_rwndval;
393}
394
395void
396sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
397{
398 int sctp_cwnd_log_at;
399
400 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
401 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
402 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
403 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_MBCNT;
404 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_size = total_oq;
405 sctp_clog[sctp_cwnd_log_at].x.mbcnt.size_change = book;
406 sctp_clog[sctp_cwnd_log_at].x.mbcnt.total_queue_mb_size = total_mbcnt_q;
407 sctp_clog[sctp_cwnd_log_at].x.mbcnt.mbcnt_change = mbcnt;
408}
409
410void
411sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
412{
413 int sctp_cwnd_log_at;
414
415 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
416 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
417 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
418 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_MISC_EVENT;
419 sctp_clog[sctp_cwnd_log_at].x.misc.log1 = a;
420 sctp_clog[sctp_cwnd_log_at].x.misc.log2 = b;
421 sctp_clog[sctp_cwnd_log_at].x.misc.log3 = c;
422 sctp_clog[sctp_cwnd_log_at].x.misc.log4 = d;
423}
424
425void
426sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
427{
428 int sctp_cwnd_log_at;
429
430 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
431 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
432 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
433 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_WAKE;
434 sctp_clog[sctp_cwnd_log_at].x.wake.stcb = (void *)stcb;
435 sctp_clog[sctp_cwnd_log_at].x.wake.wake_cnt = wake_cnt;
436 sctp_clog[sctp_cwnd_log_at].x.wake.flight = stcb->asoc.total_flight_count;
437 sctp_clog[sctp_cwnd_log_at].x.wake.send_q = stcb->asoc.send_queue_cnt;
438 sctp_clog[sctp_cwnd_log_at].x.wake.sent_q = stcb->asoc.sent_queue_cnt;
439
440 if (stcb->asoc.stream_queue_cnt < 0xff)
441 sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
442 else
443 sctp_clog[sctp_cwnd_log_at].x.wake.stream_qcnt = 0xff;
444
445 if (stcb->asoc.chunks_on_out_queue < 0xff)
446 sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
447 else
448 sctp_clog[sctp_cwnd_log_at].x.wake.chunks_on_oque = 0xff;
449
450 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags = 0;
451 /* set in the defered mode stuff */
452 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
453 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 1;
454 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
455 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 2;
456 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
457 sctp_clog[sctp_cwnd_log_at].x.wake.sctpflags |= 4;
458 /* what about the sb */
459 if (stcb->sctp_socket) {
460 struct socket *so = stcb->sctp_socket;
461
462 sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
463 } else {
464 sctp_clog[sctp_cwnd_log_at].x.wake.sbflags = 0xff;
465 }
466}
467
468void
469sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
470{
471 int sctp_cwnd_log_at;
472
473 SCTP_STATLOG_GETREF(sctp_cwnd_log_at);
474 sctp_clog[sctp_cwnd_log_at].from = (uint8_t) from;
475 sctp_clog[sctp_cwnd_log_at].time_event = sctp_get_time_of_event();
476 sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_BLOCK;
477 sctp_clog[sctp_cwnd_log_at].x.blk.onsb = asoc->total_output_queue_size;
478 sctp_clog[sctp_cwnd_log_at].x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
479 sctp_clog[sctp_cwnd_log_at].x.blk.peer_rwnd = asoc->peers_rwnd;
480 sctp_clog[sctp_cwnd_log_at].x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
481 sctp_clog[sctp_cwnd_log_at].x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
482 sctp_clog[sctp_cwnd_log_at].x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
483 sctp_clog[sctp_cwnd_log_at].x.blk.sndlen = sendlen;
484}
485
486int
492sctp_fill_stat_log(struct mbuf *m)
487sctp_fill_stat_log(void *optval, size_t *optsize)
493{
494 int sctp_cwnd_log_at;
495 struct sctp_cwnd_log_req *req;
496 size_t size_limit;
497 int num, i, at, cnt_out = 0;
498
488{
489 int sctp_cwnd_log_at;
490 struct sctp_cwnd_log_req *req;
491 size_t size_limit;
492 int num, i, at, cnt_out = 0;
493
499 if (m == NULL)
494 if (*optsize < sizeof(struct sctp_cwnd_log_req)) {
500 return (EINVAL);
495 return (EINVAL);
501
502 size_limit = (SCTP_BUF_LEN(m) - sizeof(struct sctp_cwnd_log_req));
496 }
497 size_limit = (*optsize - sizeof(struct sctp_cwnd_log_req));
503 if (size_limit < sizeof(struct sctp_cwnd_log)) {
504 return (EINVAL);
505 }
506 sctp_cwnd_log_at = global_sctp_cwnd_log_at;
498 if (size_limit < sizeof(struct sctp_cwnd_log)) {
499 return (EINVAL);
500 }
501 sctp_cwnd_log_at = global_sctp_cwnd_log_at;
507 req = mtod(m, struct sctp_cwnd_log_req *);
502 req = (struct sctp_cwnd_log_req *)optval;
508 num = size_limit / sizeof(struct sctp_cwnd_log);
509 if (global_sctp_cwnd_log_rolled) {
510 req->num_in_log = SCTP_STAT_LOG_SIZE;
511 } else {
512 req->num_in_log = sctp_cwnd_log_at;
513 /*
514 * if the log has not rolled, we don't let you have old
515 * data.
516 */
517 if (req->end_at > sctp_cwnd_log_at) {
518 req->end_at = sctp_cwnd_log_at;
519 }
520 }
521 if ((num < SCTP_STAT_LOG_SIZE) &&
522 ((global_sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) {
523 /* we can't return all of it */
524 if (((req->start_at == 0) && (req->end_at == 0)) ||
525 (req->start_at >= SCTP_STAT_LOG_SIZE) ||
526 (req->end_at >= SCTP_STAT_LOG_SIZE)) {
527 /* No user request or user is wacked. */
528 req->num_ret = num;
529 req->end_at = sctp_cwnd_log_at - 1;
530 if ((sctp_cwnd_log_at - num) < 0) {
531 int cc;
532
533 cc = num - sctp_cwnd_log_at;
534 req->start_at = SCTP_STAT_LOG_SIZE - cc;
535 } else {
536 req->start_at = sctp_cwnd_log_at - num;
537 }
538 } else {
539 /* a user request */
540 int cc;
541
542 if (req->start_at > req->end_at) {
543 cc = (SCTP_STAT_LOG_SIZE - req->start_at) +
544 (req->end_at + 1);
545 } else {
546
547 cc = (req->end_at - req->start_at) + 1;
548 }
549 if (cc < num) {
550 num = cc;
551 }
552 req->num_ret = num;
553 }
554 } else {
555 /* We can return all of it */
556 req->start_at = 0;
557 req->end_at = sctp_cwnd_log_at - 1;
558 req->num_ret = sctp_cwnd_log_at;
559 }
560#ifdef INVARIANTS
561 if (req->num_ret > num) {
562 panic("Bad statlog get?");
563 }
564#endif
565 for (i = 0, at = req->start_at; i < req->num_ret; i++) {
566 req->log[i] = sctp_clog[at];
567 cnt_out++;
568 at++;
569 if (at >= SCTP_STAT_LOG_SIZE)
570 at = 0;
571 }
503 num = size_limit / sizeof(struct sctp_cwnd_log);
504 if (global_sctp_cwnd_log_rolled) {
505 req->num_in_log = SCTP_STAT_LOG_SIZE;
506 } else {
507 req->num_in_log = sctp_cwnd_log_at;
508 /*
509 * if the log has not rolled, we don't let you have old
510 * data.
511 */
512 if (req->end_at > sctp_cwnd_log_at) {
513 req->end_at = sctp_cwnd_log_at;
514 }
515 }
516 if ((num < SCTP_STAT_LOG_SIZE) &&
517 ((global_sctp_cwnd_log_rolled) || (sctp_cwnd_log_at > num))) {
518 /* we can't return all of it */
519 if (((req->start_at == 0) && (req->end_at == 0)) ||
520 (req->start_at >= SCTP_STAT_LOG_SIZE) ||
521 (req->end_at >= SCTP_STAT_LOG_SIZE)) {
522 /* No user request or user is wacked. */
523 req->num_ret = num;
524 req->end_at = sctp_cwnd_log_at - 1;
525 if ((sctp_cwnd_log_at - num) < 0) {
526 int cc;
527
528 cc = num - sctp_cwnd_log_at;
529 req->start_at = SCTP_STAT_LOG_SIZE - cc;
530 } else {
531 req->start_at = sctp_cwnd_log_at - num;
532 }
533 } else {
534 /* a user request */
535 int cc;
536
537 if (req->start_at > req->end_at) {
538 cc = (SCTP_STAT_LOG_SIZE - req->start_at) +
539 (req->end_at + 1);
540 } else {
541
542 cc = (req->end_at - req->start_at) + 1;
543 }
544 if (cc < num) {
545 num = cc;
546 }
547 req->num_ret = num;
548 }
549 } else {
550 /* We can return all of it */
551 req->start_at = 0;
552 req->end_at = sctp_cwnd_log_at - 1;
553 req->num_ret = sctp_cwnd_log_at;
554 }
555#ifdef INVARIANTS
556 if (req->num_ret > num) {
557 panic("Bad statlog get?");
558 }
559#endif
560 for (i = 0, at = req->start_at; i < req->num_ret; i++) {
561 req->log[i] = sctp_clog[at];
562 cnt_out++;
563 at++;
564 if (at >= SCTP_STAT_LOG_SIZE)
565 at = 0;
566 }
572 SCTP_BUF_LEN(m) = (cnt_out * sizeof(struct sctp_cwnd_log)) + sizeof(struct sctp_cwnd_log_req);
567 *optsize = (cnt_out * sizeof(struct sctp_cwnd_log)) + sizeof(struct sctp_cwnd_log_req);
573 return (0);
574}
575
576#endif
577
578#ifdef SCTP_AUDITING_ENABLED
579uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
580static int sctp_audit_indx = 0;
581
582static
583void
584sctp_print_audit_report(void)
585{
586 int i;
587 int cnt;
588
589 cnt = 0;
590 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
591 if ((sctp_audit_data[i][0] == 0xe0) &&
592 (sctp_audit_data[i][1] == 0x01)) {
593 cnt = 0;
594 printf("\n");
595 } else if (sctp_audit_data[i][0] == 0xf0) {
596 cnt = 0;
597 printf("\n");
598 } else if ((sctp_audit_data[i][0] == 0xc0) &&
599 (sctp_audit_data[i][1] == 0x01)) {
600 printf("\n");
601 cnt = 0;
602 }
603 printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
604 (uint32_t) sctp_audit_data[i][1]);
605 cnt++;
606 if ((cnt % 14) == 0)
607 printf("\n");
608 }
609 for (i = 0; i < sctp_audit_indx; i++) {
610 if ((sctp_audit_data[i][0] == 0xe0) &&
611 (sctp_audit_data[i][1] == 0x01)) {
612 cnt = 0;
613 printf("\n");
614 } else if (sctp_audit_data[i][0] == 0xf0) {
615 cnt = 0;
616 printf("\n");
617 } else if ((sctp_audit_data[i][0] == 0xc0) &&
618 (sctp_audit_data[i][1] == 0x01)) {
619 printf("\n");
620 cnt = 0;
621 }
622 printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
623 (uint32_t) sctp_audit_data[i][1]);
624 cnt++;
625 if ((cnt % 14) == 0)
626 printf("\n");
627 }
628 printf("\n");
629}
630
631void
632sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
633 struct sctp_nets *net)
634{
635 int resend_cnt, tot_out, rep, tot_book_cnt;
636 struct sctp_nets *lnet;
637 struct sctp_tmit_chunk *chk;
638
639 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
640 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
641 sctp_audit_indx++;
642 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 sctp_audit_indx = 0;
644 }
645 if (inp == NULL) {
646 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
647 sctp_audit_data[sctp_audit_indx][1] = 0x01;
648 sctp_audit_indx++;
649 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
650 sctp_audit_indx = 0;
651 }
652 return;
653 }
654 if (stcb == NULL) {
655 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 sctp_audit_data[sctp_audit_indx][1] = 0x02;
657 sctp_audit_indx++;
658 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 sctp_audit_indx = 0;
660 }
661 return;
662 }
663 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
664 sctp_audit_data[sctp_audit_indx][1] =
665 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
666 sctp_audit_indx++;
667 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
668 sctp_audit_indx = 0;
669 }
670 rep = 0;
671 tot_book_cnt = 0;
672 resend_cnt = tot_out = 0;
673 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
674 if (chk->sent == SCTP_DATAGRAM_RESEND) {
675 resend_cnt++;
676 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
677 tot_out += chk->book_size;
678 tot_book_cnt++;
679 }
680 }
681 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
682 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
683 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
684 sctp_audit_indx++;
685 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
686 sctp_audit_indx = 0;
687 }
688 printf("resend_cnt:%d asoc-tot:%d\n",
689 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
690 rep = 1;
691 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
692 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
693 sctp_audit_data[sctp_audit_indx][1] =
694 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
695 sctp_audit_indx++;
696 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
697 sctp_audit_indx = 0;
698 }
699 }
700 if (tot_out != stcb->asoc.total_flight) {
701 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
702 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
703 sctp_audit_indx++;
704 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
705 sctp_audit_indx = 0;
706 }
707 rep = 1;
708 printf("tot_flt:%d asoc_tot:%d\n", tot_out,
709 (int)stcb->asoc.total_flight);
710 stcb->asoc.total_flight = tot_out;
711 }
712 if (tot_book_cnt != stcb->asoc.total_flight_count) {
713 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
714 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
715 sctp_audit_indx++;
716 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 sctp_audit_indx = 0;
718 }
719 rep = 1;
720 printf("tot_flt_book:%d\n", tot_book);
721
722 stcb->asoc.total_flight_count = tot_book_cnt;
723 }
724 tot_out = 0;
725 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
726 tot_out += lnet->flight_size;
727 }
728 if (tot_out != stcb->asoc.total_flight) {
729 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
730 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
731 sctp_audit_indx++;
732 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
733 sctp_audit_indx = 0;
734 }
735 rep = 1;
736 printf("real flight:%d net total was %d\n",
737 stcb->asoc.total_flight, tot_out);
738 /* now corrective action */
739 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
740
741 tot_out = 0;
742 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
743 if ((chk->whoTo == lnet) &&
744 (chk->sent < SCTP_DATAGRAM_RESEND)) {
745 tot_out += chk->book_size;
746 }
747 }
748 if (lnet->flight_size != tot_out) {
749 printf("net:%x flight was %d corrected to %d\n",
750 (uint32_t) lnet, lnet->flight_size, tot_out);
751 lnet->flight_size = tot_out;
752 }
753 }
754 }
755 if (rep) {
756 sctp_print_audit_report();
757 }
758}
759
760void
761sctp_audit_log(uint8_t ev, uint8_t fd)
762{
763
764 sctp_audit_data[sctp_audit_indx][0] = ev;
765 sctp_audit_data[sctp_audit_indx][1] = fd;
766 sctp_audit_indx++;
767 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
768 sctp_audit_indx = 0;
769 }
770}
771
772#endif
773
774/*
775 * a list of sizes based on typical mtu's, used only if next hop size not
776 * returned.
777 */
778static int sctp_mtu_sizes[] = {
779 68,
780 296,
781 508,
782 512,
783 544,
784 576,
785 1006,
786 1492,
787 1500,
788 1536,
789 2002,
790 2048,
791 4352,
792 4464,
793 8166,
794 17914,
795 32000,
796 65535
797};
798
799void
800sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
801{
802 struct sctp_association *asoc;
803 struct sctp_nets *net;
804
805 asoc = &stcb->asoc;
806
807 SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
808 SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
809 SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
810 SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
811 SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
812 SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
813 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
814 SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
815 SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
816 }
817}
818
819int
820find_next_best_mtu(int totsz)
821{
822 int i, perfer;
823
824 /*
825 * if we are in here we must find the next best fit based on the
826 * size of the dg that failed to be sent.
827 */
828 perfer = 0;
829 for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
830 if (totsz < sctp_mtu_sizes[i]) {
831 perfer = i - 1;
832 if (perfer < 0)
833 perfer = 0;
834 break;
835 }
836 }
837 return (sctp_mtu_sizes[perfer]);
838}
839
840void
841sctp_fill_random_store(struct sctp_pcb *m)
842{
843 /*
844 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
845 * our counter. The result becomes our good random numbers and we
846 * then setup to give these out. Note that we do no locking to
847 * protect this. This is ok, since if competing folks call this we
848 * will get more gobbled gook in the random store whic is what we
849 * want. There is a danger that two guys will use the same random
850 * numbers, but thats ok too since that is random as well :->
851 */
852 m->store_at = 0;
853 sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
854 sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
855 sizeof(m->random_counter), (uint8_t *) m->random_store);
856 m->random_counter++;
857}
858
859uint32_t
860sctp_select_initial_TSN(struct sctp_pcb *m)
861{
862 /*
863 * A true implementation should use random selection process to get
864 * the initial stream sequence number, using RFC1750 as a good
865 * guideline
866 */
867 uint32_t x, *xp;
868 uint8_t *p;
869
870 if (m->initial_sequence_debug != 0) {
871 uint32_t ret;
872
873 ret = m->initial_sequence_debug;
874 m->initial_sequence_debug++;
875 return (ret);
876 }
877 if ((m->store_at + sizeof(u_long)) > SCTP_SIGNATURE_SIZE) {
878 /* Refill the random store */
879 sctp_fill_random_store(m);
880 }
881 p = &m->random_store[(int)m->store_at];
882 xp = (uint32_t *) p;
883 x = *xp;
884 m->store_at += sizeof(uint32_t);
885 return (x);
886}
887
888uint32_t
889sctp_select_a_tag(struct sctp_inpcb *m)
890{
891 u_long x, not_done;
892 struct timeval now;
893
894 SCTP_GETTIME_TIMEVAL(&now);
895 not_done = 1;
896 while (not_done) {
897 x = sctp_select_initial_TSN(&m->sctp_ep);
898 if (x == 0) {
899 /* we never use 0 */
900 continue;
901 }
902 if (sctp_is_vtag_good(m, x, &now)) {
903 not_done = 0;
904 }
905 }
906 return (x);
907}
908
909
910int
911sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc,
568 return (0);
569}
570
571#endif
572
573#ifdef SCTP_AUDITING_ENABLED
574uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
575static int sctp_audit_indx = 0;
576
577static
578void
579sctp_print_audit_report(void)
580{
581 int i;
582 int cnt;
583
584 cnt = 0;
585 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
586 if ((sctp_audit_data[i][0] == 0xe0) &&
587 (sctp_audit_data[i][1] == 0x01)) {
588 cnt = 0;
589 printf("\n");
590 } else if (sctp_audit_data[i][0] == 0xf0) {
591 cnt = 0;
592 printf("\n");
593 } else if ((sctp_audit_data[i][0] == 0xc0) &&
594 (sctp_audit_data[i][1] == 0x01)) {
595 printf("\n");
596 cnt = 0;
597 }
598 printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
599 (uint32_t) sctp_audit_data[i][1]);
600 cnt++;
601 if ((cnt % 14) == 0)
602 printf("\n");
603 }
604 for (i = 0; i < sctp_audit_indx; i++) {
605 if ((sctp_audit_data[i][0] == 0xe0) &&
606 (sctp_audit_data[i][1] == 0x01)) {
607 cnt = 0;
608 printf("\n");
609 } else if (sctp_audit_data[i][0] == 0xf0) {
610 cnt = 0;
611 printf("\n");
612 } else if ((sctp_audit_data[i][0] == 0xc0) &&
613 (sctp_audit_data[i][1] == 0x01)) {
614 printf("\n");
615 cnt = 0;
616 }
617 printf("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
618 (uint32_t) sctp_audit_data[i][1]);
619 cnt++;
620 if ((cnt % 14) == 0)
621 printf("\n");
622 }
623 printf("\n");
624}
625
626void
627sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
628 struct sctp_nets *net)
629{
630 int resend_cnt, tot_out, rep, tot_book_cnt;
631 struct sctp_nets *lnet;
632 struct sctp_tmit_chunk *chk;
633
634 sctp_audit_data[sctp_audit_indx][0] = 0xAA;
635 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
636 sctp_audit_indx++;
637 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
638 sctp_audit_indx = 0;
639 }
640 if (inp == NULL) {
641 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
642 sctp_audit_data[sctp_audit_indx][1] = 0x01;
643 sctp_audit_indx++;
644 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
645 sctp_audit_indx = 0;
646 }
647 return;
648 }
649 if (stcb == NULL) {
650 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
651 sctp_audit_data[sctp_audit_indx][1] = 0x02;
652 sctp_audit_indx++;
653 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
654 sctp_audit_indx = 0;
655 }
656 return;
657 }
658 sctp_audit_data[sctp_audit_indx][0] = 0xA1;
659 sctp_audit_data[sctp_audit_indx][1] =
660 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
661 sctp_audit_indx++;
662 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
663 sctp_audit_indx = 0;
664 }
665 rep = 0;
666 tot_book_cnt = 0;
667 resend_cnt = tot_out = 0;
668 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
669 if (chk->sent == SCTP_DATAGRAM_RESEND) {
670 resend_cnt++;
671 } else if (chk->sent < SCTP_DATAGRAM_RESEND) {
672 tot_out += chk->book_size;
673 tot_book_cnt++;
674 }
675 }
676 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
677 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
678 sctp_audit_data[sctp_audit_indx][1] = 0xA1;
679 sctp_audit_indx++;
680 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
681 sctp_audit_indx = 0;
682 }
683 printf("resend_cnt:%d asoc-tot:%d\n",
684 resend_cnt, stcb->asoc.sent_queue_retran_cnt);
685 rep = 1;
686 stcb->asoc.sent_queue_retran_cnt = resend_cnt;
687 sctp_audit_data[sctp_audit_indx][0] = 0xA2;
688 sctp_audit_data[sctp_audit_indx][1] =
689 (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
690 sctp_audit_indx++;
691 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
692 sctp_audit_indx = 0;
693 }
694 }
695 if (tot_out != stcb->asoc.total_flight) {
696 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
697 sctp_audit_data[sctp_audit_indx][1] = 0xA2;
698 sctp_audit_indx++;
699 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
700 sctp_audit_indx = 0;
701 }
702 rep = 1;
703 printf("tot_flt:%d asoc_tot:%d\n", tot_out,
704 (int)stcb->asoc.total_flight);
705 stcb->asoc.total_flight = tot_out;
706 }
707 if (tot_book_cnt != stcb->asoc.total_flight_count) {
708 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
709 sctp_audit_data[sctp_audit_indx][1] = 0xA5;
710 sctp_audit_indx++;
711 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
712 sctp_audit_indx = 0;
713 }
714 rep = 1;
715 printf("tot_flt_book:%d\n", tot_book);
716
717 stcb->asoc.total_flight_count = tot_book_cnt;
718 }
719 tot_out = 0;
720 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
721 tot_out += lnet->flight_size;
722 }
723 if (tot_out != stcb->asoc.total_flight) {
724 sctp_audit_data[sctp_audit_indx][0] = 0xAF;
725 sctp_audit_data[sctp_audit_indx][1] = 0xA3;
726 sctp_audit_indx++;
727 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
728 sctp_audit_indx = 0;
729 }
730 rep = 1;
731 printf("real flight:%d net total was %d\n",
732 stcb->asoc.total_flight, tot_out);
733 /* now corrective action */
734 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
735
736 tot_out = 0;
737 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
738 if ((chk->whoTo == lnet) &&
739 (chk->sent < SCTP_DATAGRAM_RESEND)) {
740 tot_out += chk->book_size;
741 }
742 }
743 if (lnet->flight_size != tot_out) {
744 printf("net:%x flight was %d corrected to %d\n",
745 (uint32_t) lnet, lnet->flight_size, tot_out);
746 lnet->flight_size = tot_out;
747 }
748 }
749 }
750 if (rep) {
751 sctp_print_audit_report();
752 }
753}
754
755void
756sctp_audit_log(uint8_t ev, uint8_t fd)
757{
758
759 sctp_audit_data[sctp_audit_indx][0] = ev;
760 sctp_audit_data[sctp_audit_indx][1] = fd;
761 sctp_audit_indx++;
762 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
763 sctp_audit_indx = 0;
764 }
765}
766
767#endif
768
769/*
770 * a list of sizes based on typical mtu's, used only if next hop size not
771 * returned.
772 */
773static int sctp_mtu_sizes[] = {
774 68,
775 296,
776 508,
777 512,
778 544,
779 576,
780 1006,
781 1492,
782 1500,
783 1536,
784 2002,
785 2048,
786 4352,
787 4464,
788 8166,
789 17914,
790 32000,
791 65535
792};
793
794void
795sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
796{
797 struct sctp_association *asoc;
798 struct sctp_nets *net;
799
800 asoc = &stcb->asoc;
801
802 SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
803 SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
804 SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
805 SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
806 SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
807 SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
808 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
809 SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
810 SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
811 }
812}
813
814int
815find_next_best_mtu(int totsz)
816{
817 int i, perfer;
818
819 /*
820 * if we are in here we must find the next best fit based on the
821 * size of the dg that failed to be sent.
822 */
823 perfer = 0;
824 for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
825 if (totsz < sctp_mtu_sizes[i]) {
826 perfer = i - 1;
827 if (perfer < 0)
828 perfer = 0;
829 break;
830 }
831 }
832 return (sctp_mtu_sizes[perfer]);
833}
834
835void
836sctp_fill_random_store(struct sctp_pcb *m)
837{
838 /*
839 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
840 * our counter. The result becomes our good random numbers and we
841 * then setup to give these out. Note that we do no locking to
842 * protect this. This is ok, since if competing folks call this we
843 * will get more gobbled gook in the random store whic is what we
844 * want. There is a danger that two guys will use the same random
845 * numbers, but thats ok too since that is random as well :->
846 */
847 m->store_at = 0;
848 sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
849 sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
850 sizeof(m->random_counter), (uint8_t *) m->random_store);
851 m->random_counter++;
852}
853
854uint32_t
855sctp_select_initial_TSN(struct sctp_pcb *m)
856{
857 /*
858 * A true implementation should use random selection process to get
859 * the initial stream sequence number, using RFC1750 as a good
860 * guideline
861 */
862 uint32_t x, *xp;
863 uint8_t *p;
864
865 if (m->initial_sequence_debug != 0) {
866 uint32_t ret;
867
868 ret = m->initial_sequence_debug;
869 m->initial_sequence_debug++;
870 return (ret);
871 }
872 if ((m->store_at + sizeof(u_long)) > SCTP_SIGNATURE_SIZE) {
873 /* Refill the random store */
874 sctp_fill_random_store(m);
875 }
876 p = &m->random_store[(int)m->store_at];
877 xp = (uint32_t *) p;
878 x = *xp;
879 m->store_at += sizeof(uint32_t);
880 return (x);
881}
882
883uint32_t
884sctp_select_a_tag(struct sctp_inpcb *m)
885{
886 u_long x, not_done;
887 struct timeval now;
888
889 SCTP_GETTIME_TIMEVAL(&now);
890 not_done = 1;
891 while (not_done) {
892 x = sctp_select_initial_TSN(&m->sctp_ep);
893 if (x == 0) {
894 /* we never use 0 */
895 continue;
896 }
897 if (sctp_is_vtag_good(m, x, &now)) {
898 not_done = 0;
899 }
900 }
901 return (x);
902}
903
904
905int
906sctp_init_asoc(struct sctp_inpcb *m, struct sctp_association *asoc,
912 int for_a_init, uint32_t override_tag)
907 int for_a_init, uint32_t override_tag, uint32_t vrf_id)
913{
914 /*
915 * Anything set to zero is taken care of by the allocation routine's
916 * bzero
917 */
918
919 /*
920 * Up front select what scoping to apply on addresses I tell my peer
921 * Not sure what to do with these right now, we will need to come up
922 * with a way to set them. We may need to pass them through from the
923 * caller in the sctp_aloc_assoc() function.
924 */
925 int i;
926
927 /* init all variables to a known value. */
928 asoc->state = SCTP_STATE_INUSE;
929 asoc->max_burst = m->sctp_ep.max_burst;
930 asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
931 asoc->cookie_life = m->sctp_ep.def_cookie_life;
932 asoc->sctp_cmt_on_off = (uint8_t) sctp_cmt_on_off;
908{
909 /*
910 * Anything set to zero is taken care of by the allocation routine's
911 * bzero
912 */
913
914 /*
915 * Up front select what scoping to apply on addresses I tell my peer
916 * Not sure what to do with these right now, we will need to come up
917 * with a way to set them. We may need to pass them through from the
918 * caller in the sctp_aloc_assoc() function.
919 */
920 int i;
921
922 /* init all variables to a known value. */
923 asoc->state = SCTP_STATE_INUSE;
924 asoc->max_burst = m->sctp_ep.max_burst;
925 asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
926 asoc->cookie_life = m->sctp_ep.def_cookie_life;
927 asoc->sctp_cmt_on_off = (uint8_t) sctp_cmt_on_off;
933#ifdef AF_INET
928#ifdef INET
934 asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
935#else
936 asoc->default_tos = 0;
937#endif
938
929 asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
930#else
931 asoc->default_tos = 0;
932#endif
933
939#ifdef AF_INET6
934#ifdef INET6
940 asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
941#else
942 asoc->default_flowlabel = 0;
943#endif
944 if (override_tag) {
945 struct timeval now;
946
947 SCTP_GETTIME_TIMEVAL(&now);
948 if (sctp_is_vtag_good(m, override_tag, &now)) {
949 asoc->my_vtag = override_tag;
950 } else {
951 return (ENOMEM);
952 }
953
954 } else {
955 asoc->my_vtag = sctp_select_a_tag(m);
956 }
957 /* Get the nonce tags */
958 asoc->my_vtag_nonce = sctp_select_a_tag(m);
959 asoc->peer_vtag_nonce = sctp_select_a_tag(m);
935 asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
936#else
937 asoc->default_flowlabel = 0;
938#endif
939 if (override_tag) {
940 struct timeval now;
941
942 SCTP_GETTIME_TIMEVAL(&now);
943 if (sctp_is_vtag_good(m, override_tag, &now)) {
944 asoc->my_vtag = override_tag;
945 } else {
946 return (ENOMEM);
947 }
948
949 } else {
950 asoc->my_vtag = sctp_select_a_tag(m);
951 }
952 /* Get the nonce tags */
953 asoc->my_vtag_nonce = sctp_select_a_tag(m);
954 asoc->peer_vtag_nonce = sctp_select_a_tag(m);
955 asoc->vrf_id = vrf_id;
960
961 if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
962 asoc->hb_is_disabled = 1;
963 else
964 asoc->hb_is_disabled = 0;
965
966 asoc->refcnt = 0;
967 asoc->assoc_up_sent = 0;
968 asoc->assoc_id = asoc->my_vtag;
969 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
970 sctp_select_initial_TSN(&m->sctp_ep);
971 /* we are optimisitic here */
972 asoc->peer_supports_pktdrop = 1;
973
974 asoc->sent_queue_retran_cnt = 0;
975
976 /* for CMT */
977 asoc->last_net_data_came_from = NULL;
978
979 /* This will need to be adjusted */
980 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
981 asoc->last_acked_seq = asoc->init_seq_number - 1;
982 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
983 asoc->asconf_seq_in = asoc->last_acked_seq;
984
985 /* here we are different, we hold the next one we expect */
986 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
987
988 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
989 asoc->initial_rto = m->sctp_ep.initial_rto;
990
991 asoc->max_init_times = m->sctp_ep.max_init_times;
992 asoc->max_send_times = m->sctp_ep.max_send_times;
993 asoc->def_net_failure = m->sctp_ep.def_net_failure;
994 asoc->free_chunk_cnt = 0;
995
996 asoc->iam_blocking = 0;
997 /* ECN Nonce initialization */
998 asoc->context = m->sctp_context;
999 asoc->def_send = m->def_send;
1000 asoc->ecn_nonce_allowed = 0;
1001 asoc->receiver_nonce_sum = 1;
1002 asoc->nonce_sum_expect_base = 1;
1003 asoc->nonce_sum_check = 1;
1004 asoc->nonce_resync_tsn = 0;
1005 asoc->nonce_wait_for_ecne = 0;
1006 asoc->nonce_wait_tsn = 0;
1007 asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
956
957 if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
958 asoc->hb_is_disabled = 1;
959 else
960 asoc->hb_is_disabled = 0;
961
962 asoc->refcnt = 0;
963 asoc->assoc_up_sent = 0;
964 asoc->assoc_id = asoc->my_vtag;
965 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
966 sctp_select_initial_TSN(&m->sctp_ep);
967 /* we are optimisitic here */
968 asoc->peer_supports_pktdrop = 1;
969
970 asoc->sent_queue_retran_cnt = 0;
971
972 /* for CMT */
973 asoc->last_net_data_came_from = NULL;
974
975 /* This will need to be adjusted */
976 asoc->last_cwr_tsn = asoc->init_seq_number - 1;
977 asoc->last_acked_seq = asoc->init_seq_number - 1;
978 asoc->advanced_peer_ack_point = asoc->last_acked_seq;
979 asoc->asconf_seq_in = asoc->last_acked_seq;
980
981 /* here we are different, we hold the next one we expect */
982 asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
983
984 asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
985 asoc->initial_rto = m->sctp_ep.initial_rto;
986
987 asoc->max_init_times = m->sctp_ep.max_init_times;
988 asoc->max_send_times = m->sctp_ep.max_send_times;
989 asoc->def_net_failure = m->sctp_ep.def_net_failure;
990 asoc->free_chunk_cnt = 0;
991
992 asoc->iam_blocking = 0;
993 /* ECN Nonce initialization */
994 asoc->context = m->sctp_context;
995 asoc->def_send = m->def_send;
996 asoc->ecn_nonce_allowed = 0;
997 asoc->receiver_nonce_sum = 1;
998 asoc->nonce_sum_expect_base = 1;
999 asoc->nonce_sum_check = 1;
1000 asoc->nonce_resync_tsn = 0;
1001 asoc->nonce_wait_for_ecne = 0;
1002 asoc->nonce_wait_tsn = 0;
1003 asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1004 asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
1008 asoc->pr_sctp_cnt = 0;
1009 asoc->total_output_queue_size = 0;
1010
1011 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1012 struct in6pcb *inp6;
1013
1014 /* Its a V6 socket */
1015 inp6 = (struct in6pcb *)m;
1016 asoc->ipv6_addr_legal = 1;
1017 /* Now look at the binding flag to see if V4 will be legal */
1018 if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1019 asoc->ipv4_addr_legal = 1;
1020 } else {
1021 /* V4 addresses are NOT legal on the association */
1022 asoc->ipv4_addr_legal = 0;
1023 }
1024 } else {
1025 /* Its a V4 socket, no - V6 */
1026 asoc->ipv4_addr_legal = 1;
1027 asoc->ipv6_addr_legal = 0;
1028 }
1029
1030 asoc->my_rwnd = max(m->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND);
1031 asoc->peers_rwnd = m->sctp_socket->so_rcv.sb_hiwat;
1032
1033 asoc->smallest_mtu = m->sctp_frag_point;
1034 asoc->minrto = m->sctp_ep.sctp_minrto;
1035 asoc->maxrto = m->sctp_ep.sctp_maxrto;
1036
1037 asoc->locked_on_sending = NULL;
1038 asoc->stream_locked_on = 0;
1039 asoc->ecn_echo_cnt_onq = 0;
1040 asoc->stream_locked = 0;
1041
1005 asoc->pr_sctp_cnt = 0;
1006 asoc->total_output_queue_size = 0;
1007
1008 if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1009 struct in6pcb *inp6;
1010
1011 /* Its a V6 socket */
1012 inp6 = (struct in6pcb *)m;
1013 asoc->ipv6_addr_legal = 1;
1014 /* Now look at the binding flag to see if V4 will be legal */
1015 if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1016 asoc->ipv4_addr_legal = 1;
1017 } else {
1018 /* V4 addresses are NOT legal on the association */
1019 asoc->ipv4_addr_legal = 0;
1020 }
1021 } else {
1022 /* Its a V4 socket, no - V6 */
1023 asoc->ipv4_addr_legal = 1;
1024 asoc->ipv6_addr_legal = 0;
1025 }
1026
1027 asoc->my_rwnd = max(m->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND);
1028 asoc->peers_rwnd = m->sctp_socket->so_rcv.sb_hiwat;
1029
1030 asoc->smallest_mtu = m->sctp_frag_point;
1031 asoc->minrto = m->sctp_ep.sctp_minrto;
1032 asoc->maxrto = m->sctp_ep.sctp_maxrto;
1033
1034 asoc->locked_on_sending = NULL;
1035 asoc->stream_locked_on = 0;
1036 asoc->ecn_echo_cnt_onq = 0;
1037 asoc->stream_locked = 0;
1038
1042 LIST_INIT(&asoc->sctp_local_addr_list);
1039 asoc->send_sack = 1;
1040
1041 LIST_INIT(&asoc->sctp_restricted_addrs);
1042
1043 TAILQ_INIT(&asoc->nets);
1044 TAILQ_INIT(&asoc->pending_reply_queue);
1045 asoc->last_asconf_ack_sent = NULL;
1046 /* Setup to fill the hb random cache at first HB */
1047 asoc->hb_random_idx = 4;
1048
1049 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1050
1051 /*
1052 * Now the stream parameters, here we allocate space for all streams
1053 * that we request by default.
1054 */
1055 asoc->streamoutcnt = asoc->pre_open_streams =
1056 m->sctp_ep.pre_open_stream_count;
1057 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1058 asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1059 "StreamsOut");
1060 if (asoc->strmout == NULL) {
1061 /* big trouble no memory */
1062 return (ENOMEM);
1063 }
1064 for (i = 0; i < asoc->streamoutcnt; i++) {
1065 /*
1066 * inbound side must be set to 0xffff, also NOTE when we get
1067 * the INIT-ACK back (for INIT sender) we MUST reduce the
1068 * count (streamoutcnt) but first check if we sent to any of
1069 * the upper streams that were dropped (if some were). Those
1070 * that were dropped must be notified to the upper layer as
1071 * failed to send.
1072 */
1073 asoc->strmout[i].next_sequence_sent = 0x0;
1074 TAILQ_INIT(&asoc->strmout[i].outqueue);
1075 asoc->strmout[i].stream_no = i;
1076 asoc->strmout[i].last_msg_incomplete = 0;
1077 asoc->strmout[i].next_spoke.tqe_next = 0;
1078 asoc->strmout[i].next_spoke.tqe_prev = 0;
1079 }
1080 /* Now the mapping array */
1081 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1082 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1083 "MappingArray");
1084 if (asoc->mapping_array == NULL) {
1085 SCTP_FREE(asoc->strmout);
1086 return (ENOMEM);
1087 }
1088 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1089 /* Now the init of the other outqueues */
1090 TAILQ_INIT(&asoc->free_chunks);
1091 TAILQ_INIT(&asoc->free_strmoq);
1092 TAILQ_INIT(&asoc->out_wheel);
1093 TAILQ_INIT(&asoc->control_send_queue);
1094 TAILQ_INIT(&asoc->send_queue);
1095 TAILQ_INIT(&asoc->sent_queue);
1096 TAILQ_INIT(&asoc->reasmqueue);
1097 TAILQ_INIT(&asoc->resetHead);
1098 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1099 TAILQ_INIT(&asoc->asconf_queue);
1100 /* authentication fields */
1101 asoc->authinfo.random = NULL;
1102 asoc->authinfo.assoc_key = NULL;
1103 asoc->authinfo.assoc_keyid = 0;
1104 asoc->authinfo.recv_key = NULL;
1105 asoc->authinfo.recv_keyid = 0;
1106 LIST_INIT(&asoc->shared_keys);
1107 asoc->marked_retrans = 0;
1108 asoc->timoinit = 0;
1109 asoc->timodata = 0;
1110 asoc->timosack = 0;
1111 asoc->timoshutdown = 0;
1112 asoc->timoheartbeat = 0;
1113 asoc->timocookie = 0;
1114 asoc->timoshutdownack = 0;
1115 SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1116 SCTP_GETTIME_TIMEVAL(&asoc->discontinuity_time);
1117
1118 return (0);
1119}
1120
1121int
1122sctp_expand_mapping_array(struct sctp_association *asoc)
1123{
1124 /* mapping array needs to grow */
1125 uint8_t *new_array;
1126 uint16_t new_size;
1127
1128 new_size = asoc->mapping_array_size + SCTP_MAPPING_ARRAY_INCR;
1129 SCTP_MALLOC(new_array, uint8_t *, new_size, "MappingArray");
1130 if (new_array == NULL) {
1131 /* can't get more, forget it */
1132 printf("No memory for expansion of SCTP mapping array %d\n",
1133 new_size);
1134 return (-1);
1135 }
1136 memset(new_array, 0, new_size);
1137 memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1138 SCTP_FREE(asoc->mapping_array);
1139 asoc->mapping_array = new_array;
1140 asoc->mapping_array_size = new_size;
1141 return (0);
1142}
1143
1043 TAILQ_INIT(&asoc->nets);
1044 TAILQ_INIT(&asoc->pending_reply_queue);
1045 asoc->last_asconf_ack_sent = NULL;
1046 /* Setup to fill the hb random cache at first HB */
1047 asoc->hb_random_idx = 4;
1048
1049 asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1050
1051 /*
1052 * Now the stream parameters, here we allocate space for all streams
1053 * that we request by default.
1054 */
1055 asoc->streamoutcnt = asoc->pre_open_streams =
1056 m->sctp_ep.pre_open_stream_count;
1057 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1058 asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1059 "StreamsOut");
1060 if (asoc->strmout == NULL) {
1061 /* big trouble no memory */
1062 return (ENOMEM);
1063 }
1064 for (i = 0; i < asoc->streamoutcnt; i++) {
1065 /*
1066 * inbound side must be set to 0xffff, also NOTE when we get
1067 * the INIT-ACK back (for INIT sender) we MUST reduce the
1068 * count (streamoutcnt) but first check if we sent to any of
1069 * the upper streams that were dropped (if some were). Those
1070 * that were dropped must be notified to the upper layer as
1071 * failed to send.
1072 */
1073 asoc->strmout[i].next_sequence_sent = 0x0;
1074 TAILQ_INIT(&asoc->strmout[i].outqueue);
1075 asoc->strmout[i].stream_no = i;
1076 asoc->strmout[i].last_msg_incomplete = 0;
1077 asoc->strmout[i].next_spoke.tqe_next = 0;
1078 asoc->strmout[i].next_spoke.tqe_prev = 0;
1079 }
1080 /* Now the mapping array */
1081 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1082 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1083 "MappingArray");
1084 if (asoc->mapping_array == NULL) {
1085 SCTP_FREE(asoc->strmout);
1086 return (ENOMEM);
1087 }
1088 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1089 /* Now the init of the other outqueues */
1090 TAILQ_INIT(&asoc->free_chunks);
1091 TAILQ_INIT(&asoc->free_strmoq);
1092 TAILQ_INIT(&asoc->out_wheel);
1093 TAILQ_INIT(&asoc->control_send_queue);
1094 TAILQ_INIT(&asoc->send_queue);
1095 TAILQ_INIT(&asoc->sent_queue);
1096 TAILQ_INIT(&asoc->reasmqueue);
1097 TAILQ_INIT(&asoc->resetHead);
1098 asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1099 TAILQ_INIT(&asoc->asconf_queue);
1100 /* authentication fields */
1101 asoc->authinfo.random = NULL;
1102 asoc->authinfo.assoc_key = NULL;
1103 asoc->authinfo.assoc_keyid = 0;
1104 asoc->authinfo.recv_key = NULL;
1105 asoc->authinfo.recv_keyid = 0;
1106 LIST_INIT(&asoc->shared_keys);
1107 asoc->marked_retrans = 0;
1108 asoc->timoinit = 0;
1109 asoc->timodata = 0;
1110 asoc->timosack = 0;
1111 asoc->timoshutdown = 0;
1112 asoc->timoheartbeat = 0;
1113 asoc->timocookie = 0;
1114 asoc->timoshutdownack = 0;
1115 SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1116 SCTP_GETTIME_TIMEVAL(&asoc->discontinuity_time);
1117
1118 return (0);
1119}
1120
1121int
1122sctp_expand_mapping_array(struct sctp_association *asoc)
1123{
1124 /* mapping array needs to grow */
1125 uint8_t *new_array;
1126 uint16_t new_size;
1127
1128 new_size = asoc->mapping_array_size + SCTP_MAPPING_ARRAY_INCR;
1129 SCTP_MALLOC(new_array, uint8_t *, new_size, "MappingArray");
1130 if (new_array == NULL) {
1131 /* can't get more, forget it */
1132 printf("No memory for expansion of SCTP mapping array %d\n",
1133 new_size);
1134 return (-1);
1135 }
1136 memset(new_array, 0, new_size);
1137 memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1138 SCTP_FREE(asoc->mapping_array);
1139 asoc->mapping_array = new_array;
1140 asoc->mapping_array_size = new_size;
1141 return (0);
1142}
1143
1144extern unsigned int sctp_early_fr_msec;
1144#if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1145static void
1146sctp_iterator_work(struct sctp_iterator *it)
1147{
1148 int iteration_count = 0;
1149 int inp_skip = 0;
1145
1150
1151 SCTP_ITERATOR_LOCK();
1152 if (it->inp)
1153 SCTP_INP_DECR_REF(it->inp);
1154
1155 if (it->inp == NULL) {
1156 /* iterator is complete */
1157done_with_iterator:
1158 SCTP_ITERATOR_UNLOCK();
1159 if (it->function_atend != NULL) {
1160 (*it->function_atend) (it->pointer, it->val);
1161 }
1162 SCTP_FREE(it);
1163 return;
1164 }
1165select_a_new_ep:
1166 SCTP_INP_WLOCK(it->inp);
1167 while (((it->pcb_flags) &&
1168 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1169 ((it->pcb_features) &&
1170 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1171 /* endpoint flags or features don't match, so keep looking */
1172 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1173 SCTP_INP_WUNLOCK(it->inp);
1174 goto done_with_iterator;
1175 }
1176 SCTP_INP_WUNLOCK(it->inp);
1177 it->inp = LIST_NEXT(it->inp, sctp_list);
1178 if (it->inp == NULL) {
1179 goto done_with_iterator;
1180 }
1181 SCTP_INP_WLOCK(it->inp);
1182 }
1183
1184 /* mark the current iterator on the endpoint */
1185 it->inp->inp_starting_point_for_iterator = it;
1186 SCTP_INP_WUNLOCK(it->inp);
1187 SCTP_INP_RLOCK(it->inp);
1188
1189 /* now go through each assoc which is in the desired state */
1190 if (it->done_current_ep == 0) {
1191 if (it->function_inp != NULL)
1192 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1193 it->done_current_ep = 1;
1194 }
1195 if (it->stcb == NULL) {
1196 /* run the per instance function */
1197 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1198 }
1199 if ((inp_skip) || it->stcb == NULL) {
1200 if (it->function_inp_end != NULL) {
1201 inp_skip = (*it->function_inp_end) (it->inp,
1202 it->pointer,
1203 it->val);
1204 }
1205 SCTP_INP_RUNLOCK(it->inp);
1206 goto no_stcb;
1207 }
1208 if ((it->stcb) &&
1209 (it->stcb->asoc.stcb_starting_point_for_iterator == it)) {
1210 it->stcb->asoc.stcb_starting_point_for_iterator = NULL;
1211 }
1212 while (it->stcb) {
1213 SCTP_TCB_LOCK(it->stcb);
1214 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1215 /* not in the right state... keep looking */
1216 SCTP_TCB_UNLOCK(it->stcb);
1217 goto next_assoc;
1218 }
1219 /* mark the current iterator on the assoc */
1220 it->stcb->asoc.stcb_starting_point_for_iterator = it;
1221 /* see if we have limited out the iterator loop */
1222 iteration_count++;
1223 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1224 /* Pause to let others grab the lock */
1225 atomic_add_int(&it->stcb->asoc.refcnt, 1);
1226 SCTP_TCB_UNLOCK(it->stcb);
1227 SCTP_INP_RUNLOCK(it->inp);
1228 SCTP_ITERATOR_UNLOCK();
1229 SCTP_ITERATOR_LOCK();
1230 SCTP_INP_RLOCK(it->inp);
1231 SCTP_TCB_LOCK(it->stcb);
1232 atomic_add_int(&it->stcb->asoc.refcnt, -1);
1233 iteration_count = 0;
1234 }
1235 /* run function on this one */
1236 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1237
1238 /*
1239 * we lie here, it really needs to have its own type but
1240 * first I must verify that this won't effect things :-0
1241 */
1242 if (it->no_chunk_output == 0)
1243 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3);
1244
1245 SCTP_TCB_UNLOCK(it->stcb);
1246next_assoc:
1247 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1248 if (it->stcb == NULL) {
1249 /* Run last function */
1250 if (it->function_inp_end != NULL) {
1251 inp_skip = (*it->function_inp_end) (it->inp,
1252 it->pointer,
1253 it->val);
1254 }
1255 }
1256 }
1257 SCTP_INP_RUNLOCK(it->inp);
1258no_stcb:
1259 /* done with all assocs on this endpoint, move on to next endpoint */
1260 it->done_current_ep = 0;
1261 SCTP_INP_WLOCK(it->inp);
1262 it->inp->inp_starting_point_for_iterator = NULL;
1263 SCTP_INP_WUNLOCK(it->inp);
1264 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1265 it->inp = NULL;
1266 } else {
1267 SCTP_INP_INFO_RLOCK();
1268 it->inp = LIST_NEXT(it->inp, sctp_list);
1269 SCTP_INP_INFO_RUNLOCK();
1270 }
1271 if (it->inp == NULL) {
1272 goto done_with_iterator;
1273 }
1274 goto select_a_new_ep;
1275}
1276
1277void
1278sctp_iterator_worker(void)
1279{
1280 struct sctp_iterator *it = NULL;
1281
1282 /* This function is called with the WQ lock in place */
1283
1284 sctppcbinfo.iterator_running = 1;
1285again:
1286 it = TAILQ_FIRST(&sctppcbinfo.iteratorhead);
1287 while (it) {
1288 /* now lets work on this one */
1289 TAILQ_REMOVE(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr);
1290 SCTP_IPI_ITERATOR_WQ_UNLOCK();
1291 sctp_iterator_work(it);
1292 SCTP_IPI_ITERATOR_WQ_LOCK();
1293 it = TAILQ_FIRST(&sctppcbinfo.iteratorhead);
1294 }
1295 if (TAILQ_FIRST(&sctppcbinfo.iteratorhead)) {
1296 goto again;
1297 }
1298 sctppcbinfo.iterator_running = 0;
1299 return;
1300}
1301
1302#endif
1303
1304
1146static void
1147sctp_handle_addr_wq(void)
1148{
1149 /* deal with the ADDR wq from the rtsock calls */
1150 struct sctp_laddr *wi;
1305static void
1306sctp_handle_addr_wq(void)
1307{
1308 /* deal with the ADDR wq from the rtsock calls */
1309 struct sctp_laddr *wi;
1310 struct sctp_asconf_iterator *asc;
1151
1311
1152 SCTP_IPI_ADDR_LOCK();
1153 wi = LIST_FIRST(&sctppcbinfo.addr_wq);
1154 if (wi == NULL) {
1155 SCTP_IPI_ADDR_UNLOCK();
1156 return;
1157 }
1158 LIST_REMOVE(wi, sctp_nxt_addr);
1159 if (!SCTP_LIST_EMPTY(&sctppcbinfo.addr_wq)) {
1312 SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1313 sizeof(struct sctp_asconf_iterator), "SCTP_ASCONF_ITERATOR");
1314 if (asc == NULL) {
1315 /* Try later, no memory */
1160 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1161 (struct sctp_inpcb *)NULL,
1162 (struct sctp_tcb *)NULL,
1163 (struct sctp_nets *)NULL);
1316 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1317 (struct sctp_inpcb *)NULL,
1318 (struct sctp_tcb *)NULL,
1319 (struct sctp_nets *)NULL);
1320 return;
1164 }
1321 }
1165 SCTP_IPI_ADDR_UNLOCK();
1166 if (wi->action == RTM_ADD) {
1167 sctp_add_ip_address(wi->ifa);
1168 } else if (wi->action == RTM_DELETE) {
1169 sctp_delete_ip_address(wi->ifa);
1322 LIST_INIT(&asc->list_of_work);
1323 asc->cnt = 0;
1324 SCTP_IPI_ITERATOR_WQ_LOCK();
1325 wi = LIST_FIRST(&sctppcbinfo.addr_wq);
1326 while (wi != NULL) {
1327 LIST_REMOVE(wi, sctp_nxt_addr);
1328 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1329 asc->cnt++;
1330 wi = LIST_FIRST(&sctppcbinfo.addr_wq);
1170 }
1331 }
1171 IFAFREE(wi->ifa);
1172 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, wi);
1173 SCTP_DECR_LADDR_COUNT();
1332 SCTP_IPI_ITERATOR_WQ_UNLOCK();
1333 if (asc->cnt == 0) {
1334 SCTP_FREE(asc);
1335 } else {
1336 sctp_initiate_iterator(sctp_iterator_ep,
1337 sctp_iterator_stcb,
1338 NULL, /* No ep end for boundall */
1339 SCTP_PCB_FLAGS_BOUNDALL,
1340 SCTP_PCB_ANY_FEATURES,
1341 SCTP_ASOC_ANY_STATE, (void *)asc, 0,
1342 sctp_iterator_end, NULL, 0);
1343 }
1344
1174}
1175
1176void
1177sctp_timeout_handler(void *t)
1178{
1179 struct sctp_inpcb *inp;
1180 struct sctp_tcb *stcb;
1181 struct sctp_nets *net;
1182 struct sctp_timer *tmr;
1183 int did_output;
1184 struct sctp_iterator *it = NULL;
1185
1186
1187 tmr = (struct sctp_timer *)t;
1188 inp = (struct sctp_inpcb *)tmr->ep;
1189 stcb = (struct sctp_tcb *)tmr->tcb;
1190 net = (struct sctp_nets *)tmr->net;
1191 did_output = 1;
1192
1193#ifdef SCTP_AUDITING_ENABLED
1194 sctp_audit_log(0xF0, (uint8_t) tmr->type);
1195 sctp_auditing(3, inp, stcb, net);
1196#endif
1197
1198 /* sanity checks... */
1199 if (tmr->self != (void *)tmr) {
1200 /*
1201 * printf("Stale SCTP timer fired (%p), ignoring...\n",
1202 * tmr);
1203 */
1204 return;
1205 }
1206 tmr->stopped_from = 0xa001;
1207 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1208 /*
1209 * printf("SCTP timer fired with invalid type: 0x%x\n",
1210 * tmr->type);
1211 */
1212 return;
1213 }
1214 tmr->stopped_from = 0xa002;
1215 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1216 return;
1217 }
1218 /* if this is an iterator timeout, get the struct and clear inp */
1219 tmr->stopped_from = 0xa003;
1220 if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1221 it = (struct sctp_iterator *)inp;
1222 inp = NULL;
1223 }
1224 if (inp) {
1225 SCTP_INP_INCR_REF(inp);
1226 if ((inp->sctp_socket == 0) &&
1227 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1228 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1229 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1230 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1231 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1232 ) {
1233 SCTP_INP_DECR_REF(inp);
1234 return;
1235 }
1236 }
1237 tmr->stopped_from = 0xa004;
1238 if (stcb) {
1239 if (stcb->asoc.state == 0) {
1240 if (inp) {
1241 SCTP_INP_DECR_REF(inp);
1242 }
1243 return;
1244 }
1245 }
1246 tmr->stopped_from = 0xa005;
1247#ifdef SCTP_DEBUG
1248 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1249 printf("Timer type %d goes off\n", tmr->type);
1250 }
1251#endif /* SCTP_DEBUG */
1252 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1253 if (inp) {
1254 SCTP_INP_DECR_REF(inp);
1255 }
1256 return;
1257 }
1258 tmr->stopped_from = 0xa006;
1259
1260 if (stcb) {
1261 atomic_add_int(&stcb->asoc.refcnt, 1);
1262 SCTP_TCB_LOCK(stcb);
1263 atomic_add_int(&stcb->asoc.refcnt, -1);
1264 }
1265 /* record in stopped what t-o occured */
1266 tmr->stopped_from = tmr->type;
1267
1268 /* mark as being serviced now */
1269 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1270 /*
1271 * Callout has been rescheduled.
1272 */
1273 goto get_out;
1274 }
1275 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1276 /*
1277 * Not active, so no action.
1278 */
1279 goto get_out;
1280 }
1281 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1282
1283 /* call the handler for the appropriate timer type */
1284 switch (tmr->type) {
1285 case SCTP_TIMER_TYPE_ADDR_WQ:
1286 sctp_handle_addr_wq();
1287 break;
1288 case SCTP_TIMER_TYPE_ITERATOR:
1289 SCTP_STAT_INCR(sctps_timoiterator);
1290 sctp_iterator_timer(it);
1291 break;
1292 case SCTP_TIMER_TYPE_SEND:
1293 SCTP_STAT_INCR(sctps_timodata);
1294 stcb->asoc.timodata++;
1295 stcb->asoc.num_send_timers_up--;
1296 if (stcb->asoc.num_send_timers_up < 0) {
1297 stcb->asoc.num_send_timers_up = 0;
1298 }
1299 if (sctp_t3rxt_timer(inp, stcb, net)) {
1300 /* no need to unlock on tcb its gone */
1301
1302 goto out_decr;
1303 }
1304#ifdef SCTP_AUDITING_ENABLED
1305 sctp_auditing(4, inp, stcb, net);
1306#endif
1307 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3);
1308 if ((stcb->asoc.num_send_timers_up == 0) &&
1309 (stcb->asoc.sent_queue_cnt > 0)
1310 ) {
1311 struct sctp_tmit_chunk *chk;
1312
1313 /*
1314 * safeguard. If there on some on the sent queue
1315 * somewhere but no timers running something is
1316 * wrong... so we start a timer on the first chunk
1317 * on the send queue on whatever net it is sent to.
1318 */
1319 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1320 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1321 chk->whoTo);
1322 }
1323 break;
1324 case SCTP_TIMER_TYPE_INIT:
1325 SCTP_STAT_INCR(sctps_timoinit);
1326 stcb->asoc.timoinit++;
1327 if (sctp_t1init_timer(inp, stcb, net)) {
1328 /* no need to unlock on tcb its gone */
1329 goto out_decr;
1330 }
1331 /* We do output but not here */
1332 did_output = 0;
1333 break;
1334 case SCTP_TIMER_TYPE_RECV:
1335 SCTP_STAT_INCR(sctps_timosack);
1336 stcb->asoc.timosack++;
1337 sctp_send_sack(stcb);
1338#ifdef SCTP_AUDITING_ENABLED
1339 sctp_auditing(4, inp, stcb, net);
1340#endif
1341 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR);
1342 break;
1343 case SCTP_TIMER_TYPE_SHUTDOWN:
1344 if (sctp_shutdown_timer(inp, stcb, net)) {
1345 /* no need to unlock on tcb its gone */
1346 goto out_decr;
1347 }
1348 SCTP_STAT_INCR(sctps_timoshutdown);
1349 stcb->asoc.timoshutdown++;
1350#ifdef SCTP_AUDITING_ENABLED
1351 sctp_auditing(4, inp, stcb, net);
1352#endif
1353 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR);
1354 break;
1355 case SCTP_TIMER_TYPE_HEARTBEAT:
1356 {
1357 struct sctp_nets *net;
1358 int cnt_of_unconf = 0;
1359
1360 SCTP_STAT_INCR(sctps_timoheartbeat);
1361 stcb->asoc.timoheartbeat++;
1362 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1363 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1364 (net->dest_state & SCTP_ADDR_REACHABLE)) {
1365 cnt_of_unconf++;
1366 }
1367 }
1368 if (cnt_of_unconf == 0) {
1369 if (sctp_heartbeat_timer(inp, stcb, net, cnt_of_unconf)) {
1370 /* no need to unlock on tcb its gone */
1371 goto out_decr;
1372 }
1373 }
1374#ifdef SCTP_AUDITING_ENABLED
1375 sctp_auditing(4, inp, stcb, net);
1376#endif
1377 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
1378 stcb, net);
1379 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR);
1380 }
1381 break;
1382 case SCTP_TIMER_TYPE_COOKIE:
1383 if (sctp_cookie_timer(inp, stcb, net)) {
1384 /* no need to unlock on tcb its gone */
1385 goto out_decr;
1386 }
1387 SCTP_STAT_INCR(sctps_timocookie);
1388 stcb->asoc.timocookie++;
1389#ifdef SCTP_AUDITING_ENABLED
1390 sctp_auditing(4, inp, stcb, net);
1391#endif
1392 /*
1393 * We consider T3 and Cookie timer pretty much the same with
1394 * respect to where from in chunk_output.
1395 */
1396 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3);
1397 break;
1398 case SCTP_TIMER_TYPE_NEWCOOKIE:
1399 {
1400 struct timeval tv;
1401 int i, secret;
1402
1403 SCTP_STAT_INCR(sctps_timosecret);
1404 SCTP_GETTIME_TIMEVAL(&tv);
1405 SCTP_INP_WLOCK(inp);
1406 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1407 inp->sctp_ep.last_secret_number =
1408 inp->sctp_ep.current_secret_number;
1409 inp->sctp_ep.current_secret_number++;
1410 if (inp->sctp_ep.current_secret_number >=
1411 SCTP_HOW_MANY_SECRETS) {
1412 inp->sctp_ep.current_secret_number = 0;
1413 }
1414 secret = (int)inp->sctp_ep.current_secret_number;
1415 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1416 inp->sctp_ep.secret_key[secret][i] =
1417 sctp_select_initial_TSN(&inp->sctp_ep);
1418 }
1419 SCTP_INP_WUNLOCK(inp);
1420 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1421 }
1422 did_output = 0;
1423 break;
1424 case SCTP_TIMER_TYPE_PATHMTURAISE:
1425 SCTP_STAT_INCR(sctps_timopathmtu);
1426 sctp_pathmtu_timer(inp, stcb, net);
1427 did_output = 0;
1428 break;
1429 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1430 if (sctp_shutdownack_timer(inp, stcb, net)) {
1431 /* no need to unlock on tcb its gone */
1432 goto out_decr;
1433 }
1434 SCTP_STAT_INCR(sctps_timoshutdownack);
1435 stcb->asoc.timoshutdownack++;
1436#ifdef SCTP_AUDITING_ENABLED
1437 sctp_auditing(4, inp, stcb, net);
1438#endif
1439 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR);
1440 break;
1441 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1442 SCTP_STAT_INCR(sctps_timoshutdownguard);
1443 sctp_abort_an_association(inp, stcb,
1444 SCTP_SHUTDOWN_GUARD_EXPIRES, NULL);
1445 /* no need to unlock on tcb its gone */
1446 goto out_decr;
1447 break;
1448
1449 case SCTP_TIMER_TYPE_STRRESET:
1450 if (sctp_strreset_timer(inp, stcb, net)) {
1451 /* no need to unlock on tcb its gone */
1452 goto out_decr;
1453 }
1454 SCTP_STAT_INCR(sctps_timostrmrst);
1455 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR);
1456 break;
1457 case SCTP_TIMER_TYPE_EARLYFR:
1458 /* Need to do FR of things for net */
1459 SCTP_STAT_INCR(sctps_timoearlyfr);
1460 sctp_early_fr_timer(inp, stcb, net);
1461 break;
1462 case SCTP_TIMER_TYPE_ASCONF:
1463 if (sctp_asconf_timer(inp, stcb, net)) {
1464 /* no need to unlock on tcb its gone */
1465 goto out_decr;
1466 }
1467 SCTP_STAT_INCR(sctps_timoasconf);
1468#ifdef SCTP_AUDITING_ENABLED
1469 sctp_auditing(4, inp, stcb, net);
1470#endif
1471 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR);
1472 break;
1473
1474 case SCTP_TIMER_TYPE_AUTOCLOSE:
1475 SCTP_STAT_INCR(sctps_timoautoclose);
1476 sctp_autoclose_timer(inp, stcb, net);
1477 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR);
1478 did_output = 0;
1479 break;
1480 case SCTP_TIMER_TYPE_ASOCKILL:
1481 SCTP_STAT_INCR(sctps_timoassockill);
1482 /* Can we free it yet? */
1483 SCTP_INP_DECR_REF(inp);
1484 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1485 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1486 /*
1487 * free asoc, always unlocks (or destroy's) so prevent
1488 * duplicate unlock or unlock of a free mtx :-0
1489 */
1490 stcb = NULL;
1491 goto out_no_decr;
1492 break;
1493 case SCTP_TIMER_TYPE_INPKILL:
1494 SCTP_STAT_INCR(sctps_timoinpkill);
1495 /*
1496 * special case, take away our increment since WE are the
1497 * killer
1498 */
1499 SCTP_INP_DECR_REF(inp);
1500 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1501 sctp_inpcb_free(inp, 1, 0);
1502 goto out_no_decr;
1503 break;
1504 default:
1505#ifdef SCTP_DEBUG
1506 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1507 printf("sctp_timeout_handler:unknown timer %d\n",
1508 tmr->type);
1509 }
1510#endif /* SCTP_DEBUG */
1511 break;
1512 };
1513#ifdef SCTP_AUDITING_ENABLED
1514 sctp_audit_log(0xF1, (uint8_t) tmr->type);
1515 if (inp)
1516 sctp_auditing(5, inp, stcb, net);
1517#endif
1518 if ((did_output) && stcb) {
1519 /*
1520 * Now we need to clean up the control chunk chain if an
1521 * ECNE is on it. It must be marked as UNSENT again so next
1522 * call will continue to send it until such time that we get
1523 * a CWR, to remove it. It is, however, less likely that we
1524 * will find a ecn echo on the chain though.
1525 */
1526 sctp_fix_ecn_echo(&stcb->asoc);
1527 }
1528get_out:
1529 if (stcb) {
1530 SCTP_TCB_UNLOCK(stcb);
1531 }
1532out_decr:
1533 if (inp) {
1534 SCTP_INP_DECR_REF(inp);
1535 }
1536out_no_decr:
1537
1538#ifdef SCTP_DEBUG
1539 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1540 printf("Timer now complete (type %d)\n", tmr->type);
1541 }
1542#endif /* SCTP_DEBUG */
1543 if (inp) {
1544 }
1545}
1546
1547int
1548sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1549 struct sctp_nets *net)
1550{
1551 int to_ticks;
1552 struct sctp_timer *tmr;
1553
1554 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1555 return (EFAULT);
1556
1557 to_ticks = 0;
1558
1559 tmr = NULL;
1560 if (stcb) {
1561 SCTP_TCB_LOCK_ASSERT(stcb);
1562 }
1563 switch (t_type) {
1564 case SCTP_TIMER_TYPE_ADDR_WQ:
1565 /* Only 1 tick away :-) */
1566 tmr = &sctppcbinfo.addr_wq_timer;
1345}
1346
1347void
1348sctp_timeout_handler(void *t)
1349{
1350 struct sctp_inpcb *inp;
1351 struct sctp_tcb *stcb;
1352 struct sctp_nets *net;
1353 struct sctp_timer *tmr;
1354 int did_output;
1355 struct sctp_iterator *it = NULL;
1356
1357
1358 tmr = (struct sctp_timer *)t;
1359 inp = (struct sctp_inpcb *)tmr->ep;
1360 stcb = (struct sctp_tcb *)tmr->tcb;
1361 net = (struct sctp_nets *)tmr->net;
1362 did_output = 1;
1363
1364#ifdef SCTP_AUDITING_ENABLED
1365 sctp_audit_log(0xF0, (uint8_t) tmr->type);
1366 sctp_auditing(3, inp, stcb, net);
1367#endif
1368
1369 /* sanity checks... */
1370 if (tmr->self != (void *)tmr) {
1371 /*
1372 * printf("Stale SCTP timer fired (%p), ignoring...\n",
1373 * tmr);
1374 */
1375 return;
1376 }
1377 tmr->stopped_from = 0xa001;
1378 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1379 /*
1380 * printf("SCTP timer fired with invalid type: 0x%x\n",
1381 * tmr->type);
1382 */
1383 return;
1384 }
1385 tmr->stopped_from = 0xa002;
1386 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1387 return;
1388 }
1389 /* if this is an iterator timeout, get the struct and clear inp */
1390 tmr->stopped_from = 0xa003;
1391 if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1392 it = (struct sctp_iterator *)inp;
1393 inp = NULL;
1394 }
1395 if (inp) {
1396 SCTP_INP_INCR_REF(inp);
1397 if ((inp->sctp_socket == 0) &&
1398 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1399 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1400 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1401 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1402 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1403 ) {
1404 SCTP_INP_DECR_REF(inp);
1405 return;
1406 }
1407 }
1408 tmr->stopped_from = 0xa004;
1409 if (stcb) {
1410 if (stcb->asoc.state == 0) {
1411 if (inp) {
1412 SCTP_INP_DECR_REF(inp);
1413 }
1414 return;
1415 }
1416 }
1417 tmr->stopped_from = 0xa005;
1418#ifdef SCTP_DEBUG
1419 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1420 printf("Timer type %d goes off\n", tmr->type);
1421 }
1422#endif /* SCTP_DEBUG */
1423 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1424 if (inp) {
1425 SCTP_INP_DECR_REF(inp);
1426 }
1427 return;
1428 }
1429 tmr->stopped_from = 0xa006;
1430
1431 if (stcb) {
1432 atomic_add_int(&stcb->asoc.refcnt, 1);
1433 SCTP_TCB_LOCK(stcb);
1434 atomic_add_int(&stcb->asoc.refcnt, -1);
1435 }
1436 /* record in stopped what t-o occured */
1437 tmr->stopped_from = tmr->type;
1438
1439 /* mark as being serviced now */
1440 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1441 /*
1442 * Callout has been rescheduled.
1443 */
1444 goto get_out;
1445 }
1446 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1447 /*
1448 * Not active, so no action.
1449 */
1450 goto get_out;
1451 }
1452 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1453
1454 /* call the handler for the appropriate timer type */
1455 switch (tmr->type) {
1456 case SCTP_TIMER_TYPE_ADDR_WQ:
1457 sctp_handle_addr_wq();
1458 break;
1459 case SCTP_TIMER_TYPE_ITERATOR:
1460 SCTP_STAT_INCR(sctps_timoiterator);
1461 sctp_iterator_timer(it);
1462 break;
1463 case SCTP_TIMER_TYPE_SEND:
1464 SCTP_STAT_INCR(sctps_timodata);
1465 stcb->asoc.timodata++;
1466 stcb->asoc.num_send_timers_up--;
1467 if (stcb->asoc.num_send_timers_up < 0) {
1468 stcb->asoc.num_send_timers_up = 0;
1469 }
1470 if (sctp_t3rxt_timer(inp, stcb, net)) {
1471 /* no need to unlock on tcb its gone */
1472
1473 goto out_decr;
1474 }
1475#ifdef SCTP_AUDITING_ENABLED
1476 sctp_auditing(4, inp, stcb, net);
1477#endif
1478 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3);
1479 if ((stcb->asoc.num_send_timers_up == 0) &&
1480 (stcb->asoc.sent_queue_cnt > 0)
1481 ) {
1482 struct sctp_tmit_chunk *chk;
1483
1484 /*
1485 * safeguard. If there on some on the sent queue
1486 * somewhere but no timers running something is
1487 * wrong... so we start a timer on the first chunk
1488 * on the send queue on whatever net it is sent to.
1489 */
1490 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1491 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1492 chk->whoTo);
1493 }
1494 break;
1495 case SCTP_TIMER_TYPE_INIT:
1496 SCTP_STAT_INCR(sctps_timoinit);
1497 stcb->asoc.timoinit++;
1498 if (sctp_t1init_timer(inp, stcb, net)) {
1499 /* no need to unlock on tcb its gone */
1500 goto out_decr;
1501 }
1502 /* We do output but not here */
1503 did_output = 0;
1504 break;
1505 case SCTP_TIMER_TYPE_RECV:
1506 SCTP_STAT_INCR(sctps_timosack);
1507 stcb->asoc.timosack++;
1508 sctp_send_sack(stcb);
1509#ifdef SCTP_AUDITING_ENABLED
1510 sctp_auditing(4, inp, stcb, net);
1511#endif
1512 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR);
1513 break;
1514 case SCTP_TIMER_TYPE_SHUTDOWN:
1515 if (sctp_shutdown_timer(inp, stcb, net)) {
1516 /* no need to unlock on tcb its gone */
1517 goto out_decr;
1518 }
1519 SCTP_STAT_INCR(sctps_timoshutdown);
1520 stcb->asoc.timoshutdown++;
1521#ifdef SCTP_AUDITING_ENABLED
1522 sctp_auditing(4, inp, stcb, net);
1523#endif
1524 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR);
1525 break;
1526 case SCTP_TIMER_TYPE_HEARTBEAT:
1527 {
1528 struct sctp_nets *net;
1529 int cnt_of_unconf = 0;
1530
1531 SCTP_STAT_INCR(sctps_timoheartbeat);
1532 stcb->asoc.timoheartbeat++;
1533 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1534 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1535 (net->dest_state & SCTP_ADDR_REACHABLE)) {
1536 cnt_of_unconf++;
1537 }
1538 }
1539 if (cnt_of_unconf == 0) {
1540 if (sctp_heartbeat_timer(inp, stcb, net, cnt_of_unconf)) {
1541 /* no need to unlock on tcb its gone */
1542 goto out_decr;
1543 }
1544 }
1545#ifdef SCTP_AUDITING_ENABLED
1546 sctp_auditing(4, inp, stcb, net);
1547#endif
1548 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
1549 stcb, net);
1550 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR);
1551 }
1552 break;
1553 case SCTP_TIMER_TYPE_COOKIE:
1554 if (sctp_cookie_timer(inp, stcb, net)) {
1555 /* no need to unlock on tcb its gone */
1556 goto out_decr;
1557 }
1558 SCTP_STAT_INCR(sctps_timocookie);
1559 stcb->asoc.timocookie++;
1560#ifdef SCTP_AUDITING_ENABLED
1561 sctp_auditing(4, inp, stcb, net);
1562#endif
1563 /*
1564 * We consider T3 and Cookie timer pretty much the same with
1565 * respect to where from in chunk_output.
1566 */
1567 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3);
1568 break;
1569 case SCTP_TIMER_TYPE_NEWCOOKIE:
1570 {
1571 struct timeval tv;
1572 int i, secret;
1573
1574 SCTP_STAT_INCR(sctps_timosecret);
1575 SCTP_GETTIME_TIMEVAL(&tv);
1576 SCTP_INP_WLOCK(inp);
1577 inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1578 inp->sctp_ep.last_secret_number =
1579 inp->sctp_ep.current_secret_number;
1580 inp->sctp_ep.current_secret_number++;
1581 if (inp->sctp_ep.current_secret_number >=
1582 SCTP_HOW_MANY_SECRETS) {
1583 inp->sctp_ep.current_secret_number = 0;
1584 }
1585 secret = (int)inp->sctp_ep.current_secret_number;
1586 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1587 inp->sctp_ep.secret_key[secret][i] =
1588 sctp_select_initial_TSN(&inp->sctp_ep);
1589 }
1590 SCTP_INP_WUNLOCK(inp);
1591 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1592 }
1593 did_output = 0;
1594 break;
1595 case SCTP_TIMER_TYPE_PATHMTURAISE:
1596 SCTP_STAT_INCR(sctps_timopathmtu);
1597 sctp_pathmtu_timer(inp, stcb, net);
1598 did_output = 0;
1599 break;
1600 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1601 if (sctp_shutdownack_timer(inp, stcb, net)) {
1602 /* no need to unlock on tcb its gone */
1603 goto out_decr;
1604 }
1605 SCTP_STAT_INCR(sctps_timoshutdownack);
1606 stcb->asoc.timoshutdownack++;
1607#ifdef SCTP_AUDITING_ENABLED
1608 sctp_auditing(4, inp, stcb, net);
1609#endif
1610 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR);
1611 break;
1612 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1613 SCTP_STAT_INCR(sctps_timoshutdownguard);
1614 sctp_abort_an_association(inp, stcb,
1615 SCTP_SHUTDOWN_GUARD_EXPIRES, NULL);
1616 /* no need to unlock on tcb its gone */
1617 goto out_decr;
1618 break;
1619
1620 case SCTP_TIMER_TYPE_STRRESET:
1621 if (sctp_strreset_timer(inp, stcb, net)) {
1622 /* no need to unlock on tcb its gone */
1623 goto out_decr;
1624 }
1625 SCTP_STAT_INCR(sctps_timostrmrst);
1626 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR);
1627 break;
1628 case SCTP_TIMER_TYPE_EARLYFR:
1629 /* Need to do FR of things for net */
1630 SCTP_STAT_INCR(sctps_timoearlyfr);
1631 sctp_early_fr_timer(inp, stcb, net);
1632 break;
1633 case SCTP_TIMER_TYPE_ASCONF:
1634 if (sctp_asconf_timer(inp, stcb, net)) {
1635 /* no need to unlock on tcb its gone */
1636 goto out_decr;
1637 }
1638 SCTP_STAT_INCR(sctps_timoasconf);
1639#ifdef SCTP_AUDITING_ENABLED
1640 sctp_auditing(4, inp, stcb, net);
1641#endif
1642 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR);
1643 break;
1644
1645 case SCTP_TIMER_TYPE_AUTOCLOSE:
1646 SCTP_STAT_INCR(sctps_timoautoclose);
1647 sctp_autoclose_timer(inp, stcb, net);
1648 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR);
1649 did_output = 0;
1650 break;
1651 case SCTP_TIMER_TYPE_ASOCKILL:
1652 SCTP_STAT_INCR(sctps_timoassockill);
1653 /* Can we free it yet? */
1654 SCTP_INP_DECR_REF(inp);
1655 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1656 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1657 /*
1658 * free asoc, always unlocks (or destroy's) so prevent
1659 * duplicate unlock or unlock of a free mtx :-0
1660 */
1661 stcb = NULL;
1662 goto out_no_decr;
1663 break;
1664 case SCTP_TIMER_TYPE_INPKILL:
1665 SCTP_STAT_INCR(sctps_timoinpkill);
1666 /*
1667 * special case, take away our increment since WE are the
1668 * killer
1669 */
1670 SCTP_INP_DECR_REF(inp);
1671 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1672 sctp_inpcb_free(inp, 1, 0);
1673 goto out_no_decr;
1674 break;
1675 default:
1676#ifdef SCTP_DEBUG
1677 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1678 printf("sctp_timeout_handler:unknown timer %d\n",
1679 tmr->type);
1680 }
1681#endif /* SCTP_DEBUG */
1682 break;
1683 };
1684#ifdef SCTP_AUDITING_ENABLED
1685 sctp_audit_log(0xF1, (uint8_t) tmr->type);
1686 if (inp)
1687 sctp_auditing(5, inp, stcb, net);
1688#endif
1689 if ((did_output) && stcb) {
1690 /*
1691 * Now we need to clean up the control chunk chain if an
1692 * ECNE is on it. It must be marked as UNSENT again so next
1693 * call will continue to send it until such time that we get
1694 * a CWR, to remove it. It is, however, less likely that we
1695 * will find a ecn echo on the chain though.
1696 */
1697 sctp_fix_ecn_echo(&stcb->asoc);
1698 }
1699get_out:
1700 if (stcb) {
1701 SCTP_TCB_UNLOCK(stcb);
1702 }
1703out_decr:
1704 if (inp) {
1705 SCTP_INP_DECR_REF(inp);
1706 }
1707out_no_decr:
1708
1709#ifdef SCTP_DEBUG
1710 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1711 printf("Timer now complete (type %d)\n", tmr->type);
1712 }
1713#endif /* SCTP_DEBUG */
1714 if (inp) {
1715 }
1716}
1717
1718int
1719sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1720 struct sctp_nets *net)
1721{
1722 int to_ticks;
1723 struct sctp_timer *tmr;
1724
1725 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1726 return (EFAULT);
1727
1728 to_ticks = 0;
1729
1730 tmr = NULL;
1731 if (stcb) {
1732 SCTP_TCB_LOCK_ASSERT(stcb);
1733 }
1734 switch (t_type) {
1735 case SCTP_TIMER_TYPE_ADDR_WQ:
1736 /* Only 1 tick away :-) */
1737 tmr = &sctppcbinfo.addr_wq_timer;
1567 to_ticks = 1;
1738 to_ticks = SCTP_ADDRESS_TICK_DELAY;
1568 break;
1569 case SCTP_TIMER_TYPE_ITERATOR:
1570 {
1571 struct sctp_iterator *it;
1572
1573 it = (struct sctp_iterator *)inp;
1574 tmr = &it->tmr;
1575 to_ticks = SCTP_ITERATOR_TICKS;
1576 }
1577 break;
1578 case SCTP_TIMER_TYPE_SEND:
1579 /* Here we use the RTO timer */
1580 {
1581 int rto_val;
1582
1583 if ((stcb == NULL) || (net == NULL)) {
1584 return (EFAULT);
1585 }
1586 tmr = &net->rxt_timer;
1587 if (net->RTO == 0) {
1588 rto_val = stcb->asoc.initial_rto;
1589 } else {
1590 rto_val = net->RTO;
1591 }
1592 to_ticks = MSEC_TO_TICKS(rto_val);
1593 }
1594 break;
1595 case SCTP_TIMER_TYPE_INIT:
1596 /*
1597 * Here we use the INIT timer default usually about 1
1598 * minute.
1599 */
1600 if ((stcb == NULL) || (net == NULL)) {
1601 return (EFAULT);
1602 }
1603 tmr = &net->rxt_timer;
1604 if (net->RTO == 0) {
1605 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1606 } else {
1607 to_ticks = MSEC_TO_TICKS(net->RTO);
1608 }
1609 break;
1610 case SCTP_TIMER_TYPE_RECV:
1611 /*
1612 * Here we use the Delayed-Ack timer value from the inp
1613 * ususually about 200ms.
1614 */
1615 if (stcb == NULL) {
1616 return (EFAULT);
1617 }
1618 tmr = &stcb->asoc.dack_timer;
1619 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1620 break;
1621 case SCTP_TIMER_TYPE_SHUTDOWN:
1622 /* Here we use the RTO of the destination. */
1623 if ((stcb == NULL) || (net == NULL)) {
1624 return (EFAULT);
1625 }
1626 if (net->RTO == 0) {
1627 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1628 } else {
1629 to_ticks = MSEC_TO_TICKS(net->RTO);
1630 }
1631 tmr = &net->rxt_timer;
1632 break;
1633 case SCTP_TIMER_TYPE_HEARTBEAT:
1634 /*
1635 * the net is used here so that we can add in the RTO. Even
1636 * though we use a different timer. We also add the HB timer
1637 * PLUS a random jitter.
1638 */
1639 if (stcb == NULL) {
1640 return (EFAULT);
1641 } {
1642 uint32_t rndval;
1643 uint8_t this_random;
1644 int cnt_of_unconf = 0;
1645 struct sctp_nets *lnet;
1646
1647 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1648 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1649 (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1650 cnt_of_unconf++;
1651 }
1652 }
1653 if (cnt_of_unconf) {
1654 lnet = NULL;
1655 sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
1656 }
1657 if (stcb->asoc.hb_random_idx > 3) {
1658 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1659 memcpy(stcb->asoc.hb_random_values, &rndval,
1660 sizeof(stcb->asoc.hb_random_values));
1739 break;
1740 case SCTP_TIMER_TYPE_ITERATOR:
1741 {
1742 struct sctp_iterator *it;
1743
1744 it = (struct sctp_iterator *)inp;
1745 tmr = &it->tmr;
1746 to_ticks = SCTP_ITERATOR_TICKS;
1747 }
1748 break;
1749 case SCTP_TIMER_TYPE_SEND:
1750 /* Here we use the RTO timer */
1751 {
1752 int rto_val;
1753
1754 if ((stcb == NULL) || (net == NULL)) {
1755 return (EFAULT);
1756 }
1757 tmr = &net->rxt_timer;
1758 if (net->RTO == 0) {
1759 rto_val = stcb->asoc.initial_rto;
1760 } else {
1761 rto_val = net->RTO;
1762 }
1763 to_ticks = MSEC_TO_TICKS(rto_val);
1764 }
1765 break;
1766 case SCTP_TIMER_TYPE_INIT:
1767 /*
1768 * Here we use the INIT timer default usually about 1
1769 * minute.
1770 */
1771 if ((stcb == NULL) || (net == NULL)) {
1772 return (EFAULT);
1773 }
1774 tmr = &net->rxt_timer;
1775 if (net->RTO == 0) {
1776 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1777 } else {
1778 to_ticks = MSEC_TO_TICKS(net->RTO);
1779 }
1780 break;
1781 case SCTP_TIMER_TYPE_RECV:
1782 /*
1783 * Here we use the Delayed-Ack timer value from the inp
1784 * ususually about 200ms.
1785 */
1786 if (stcb == NULL) {
1787 return (EFAULT);
1788 }
1789 tmr = &stcb->asoc.dack_timer;
1790 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1791 break;
1792 case SCTP_TIMER_TYPE_SHUTDOWN:
1793 /* Here we use the RTO of the destination. */
1794 if ((stcb == NULL) || (net == NULL)) {
1795 return (EFAULT);
1796 }
1797 if (net->RTO == 0) {
1798 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1799 } else {
1800 to_ticks = MSEC_TO_TICKS(net->RTO);
1801 }
1802 tmr = &net->rxt_timer;
1803 break;
1804 case SCTP_TIMER_TYPE_HEARTBEAT:
1805 /*
1806 * the net is used here so that we can add in the RTO. Even
1807 * though we use a different timer. We also add the HB timer
1808 * PLUS a random jitter.
1809 */
1810 if (stcb == NULL) {
1811 return (EFAULT);
1812 } {
1813 uint32_t rndval;
1814 uint8_t this_random;
1815 int cnt_of_unconf = 0;
1816 struct sctp_nets *lnet;
1817
1818 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1819 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1820 (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1821 cnt_of_unconf++;
1822 }
1823 }
1824 if (cnt_of_unconf) {
1825 lnet = NULL;
1826 sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
1827 }
1828 if (stcb->asoc.hb_random_idx > 3) {
1829 rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1830 memcpy(stcb->asoc.hb_random_values, &rndval,
1831 sizeof(stcb->asoc.hb_random_values));
1661 this_random = stcb->asoc.hb_random_values[0];
1662 stcb->asoc.hb_random_idx = 0;
1832 stcb->asoc.hb_random_idx = 0;
1663 stcb->asoc.hb_ect_randombit = 0;
1664 } else {
1665 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
1666 stcb->asoc.hb_random_idx++;
1667 stcb->asoc.hb_ect_randombit = 0;
1668 }
1833 }
1834 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
1835 stcb->asoc.hb_random_idx++;
1836 stcb->asoc.hb_ect_randombit = 0;
1669 /*
1670 * this_random will be 0 - 256 ms RTO is in ms.
1671 */
1672 if ((stcb->asoc.hb_is_disabled) &&
1673 (cnt_of_unconf == 0)) {
1674 return (0);
1675 }
1676 if (net) {
1677 struct sctp_nets *lnet;
1678 int delay;
1679
1680 delay = stcb->asoc.heart_beat_delay;
1681 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1682 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1683 ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
1684 (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1685 delay = 0;
1686 }
1687 }
1688 if (net->RTO == 0) {
1689 /* Never been checked */
1690 to_ticks = this_random + stcb->asoc.initial_rto + delay;
1691 } else {
1692 /* set rto_val to the ms */
1693 to_ticks = delay + net->RTO + this_random;
1694 }
1695 } else {
1696 if (cnt_of_unconf) {
1697 to_ticks = this_random + stcb->asoc.initial_rto;
1698 } else {
1699 to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
1700 }
1701 }
1702 /*
1703 * Now we must convert the to_ticks that are now in
1704 * ms to ticks.
1705 */
1706 to_ticks = MSEC_TO_TICKS(to_ticks);
1707 tmr = &stcb->asoc.hb_timer;
1708 }
1709 break;
1710 case SCTP_TIMER_TYPE_COOKIE:
1711 /*
1712 * Here we can use the RTO timer from the network since one
1713 * RTT was compelete. If a retran happened then we will be
1714 * using the RTO initial value.
1715 */
1716 if ((stcb == NULL) || (net == NULL)) {
1717 return (EFAULT);
1718 }
1719 if (net->RTO == 0) {
1720 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1721 } else {
1722 to_ticks = MSEC_TO_TICKS(net->RTO);
1723 }
1724 tmr = &net->rxt_timer;
1725 break;
1726 case SCTP_TIMER_TYPE_NEWCOOKIE:
1727 /*
1728 * nothing needed but the endpoint here ususually about 60
1729 * minutes.
1730 */
1731 tmr = &inp->sctp_ep.signature_change;
1732 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1733 break;
1734 case SCTP_TIMER_TYPE_ASOCKILL:
1735 if (stcb == NULL) {
1736 return (EFAULT);
1737 }
1738 tmr = &stcb->asoc.strreset_timer;
1739 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
1740 break;
1741 case SCTP_TIMER_TYPE_INPKILL:
1742 /*
1743 * The inp is setup to die. We re-use the signature_chage
1744 * timer since that has stopped and we are in the GONE
1745 * state.
1746 */
1747 tmr = &inp->sctp_ep.signature_change;
1748 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
1749 break;
1750 case SCTP_TIMER_TYPE_PATHMTURAISE:
1751 /*
1752 * Here we use the value found in the EP for PMTU ususually
1753 * about 10 minutes.
1754 */
1755 if (stcb == NULL) {
1756 return (EFAULT);
1757 }
1758 if (net == NULL) {
1759 return (EFAULT);
1760 }
1761 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
1762 tmr = &net->pmtu_timer;
1763 break;
1764 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1765 /* Here we use the RTO of the destination */
1766 if ((stcb == NULL) || (net == NULL)) {
1767 return (EFAULT);
1768 }
1769 if (net->RTO == 0) {
1770 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1771 } else {
1772 to_ticks = MSEC_TO_TICKS(net->RTO);
1773 }
1774 tmr = &net->rxt_timer;
1775 break;
1776 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1777 /*
1778 * Here we use the endpoints shutdown guard timer usually
1779 * about 3 minutes.
1780 */
1781 if (stcb == NULL) {
1782 return (EFAULT);
1783 }
1784 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
1785 tmr = &stcb->asoc.shut_guard_timer;
1786 break;
1787 case SCTP_TIMER_TYPE_STRRESET:
1788 /*
1789 * Here the timer comes from the inp but its value is from
1790 * the RTO.
1791 */
1792 if ((stcb == NULL) || (net == NULL)) {
1793 return (EFAULT);
1794 }
1795 if (net->RTO == 0) {
1796 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1797 } else {
1798 to_ticks = MSEC_TO_TICKS(net->RTO);
1799 }
1800 tmr = &stcb->asoc.strreset_timer;
1801 break;
1802
1803 case SCTP_TIMER_TYPE_EARLYFR:
1804 {
1805 unsigned int msec;
1806
1807 if ((stcb == NULL) || (net == NULL)) {
1808 return (EFAULT);
1809 }
1810 if (net->flight_size > net->cwnd) {
1811 /* no need to start */
1812 return (0);
1813 }
1814 SCTP_STAT_INCR(sctps_earlyfrstart);
1815 if (net->lastsa == 0) {
1816 /* Hmm no rtt estimate yet? */
1817 msec = stcb->asoc.initial_rto >> 2;
1818 } else {
1819 msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
1820 }
1821 if (msec < sctp_early_fr_msec) {
1822 msec = sctp_early_fr_msec;
1823 if (msec < SCTP_MINFR_MSEC_FLOOR) {
1824 msec = SCTP_MINFR_MSEC_FLOOR;
1825 }
1826 }
1827 to_ticks = MSEC_TO_TICKS(msec);
1828 tmr = &net->fr_timer;
1829 }
1830 break;
1831 case SCTP_TIMER_TYPE_ASCONF:
1832 /*
1833 * Here the timer comes from the inp but its value is from
1834 * the RTO.
1835 */
1836 if ((stcb == NULL) || (net == NULL)) {
1837 return (EFAULT);
1838 }
1839 if (net->RTO == 0) {
1840 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1841 } else {
1842 to_ticks = MSEC_TO_TICKS(net->RTO);
1843 }
1844 tmr = &stcb->asoc.asconf_timer;
1845 break;
1846 case SCTP_TIMER_TYPE_AUTOCLOSE:
1847 if (stcb == NULL) {
1848 return (EFAULT);
1849 }
1850 if (stcb->asoc.sctp_autoclose_ticks == 0) {
1851 /*
1852 * Really an error since stcb is NOT set to
1853 * autoclose
1854 */
1855 return (0);
1856 }
1857 to_ticks = stcb->asoc.sctp_autoclose_ticks;
1858 tmr = &stcb->asoc.autoclose_timer;
1859 break;
1860 default:
1861#ifdef SCTP_DEBUG
1862 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1863 printf("sctp_timer_start:Unknown timer type %d\n",
1864 t_type);
1865 }
1866#endif /* SCTP_DEBUG */
1867 return (EFAULT);
1868 break;
1869 };
1870 if ((to_ticks <= 0) || (tmr == NULL)) {
1871#ifdef SCTP_DEBUG
1872 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
1873 printf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n",
1874 t_type, to_ticks, tmr);
1875 }
1876#endif /* SCTP_DEBUG */
1877 return (EFAULT);
1878 }
1879 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1880 /*
1881 * we do NOT allow you to have it already running. if it is
1882 * we leave the current one up unchanged
1883 */
1884 return (EALREADY);
1885 }
1886 /* At this point we can proceed */
1887 if (t_type == SCTP_TIMER_TYPE_SEND) {
1888 stcb->asoc.num_send_timers_up++;
1889 }
1890 tmr->stopped_from = 0;
1891 tmr->type = t_type;
1892 tmr->ep = (void *)inp;
1893 tmr->tcb = (void *)stcb;
1894 tmr->net = (void *)net;
1895 tmr->self = (void *)tmr;
1896 tmr->ticks = ticks;
1897 SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
1898 return (0);
1899}
1900
1901int
1902sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1903 struct sctp_nets *net, uint32_t from)
1904{
1905 struct sctp_timer *tmr;
1906
1907 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
1908 (inp == NULL))
1909 return (EFAULT);
1910
1911 tmr = NULL;
1912 if (stcb) {
1913 SCTP_TCB_LOCK_ASSERT(stcb);
1914 }
1915 switch (t_type) {
1916 case SCTP_TIMER_TYPE_ADDR_WQ:
1917 tmr = &sctppcbinfo.addr_wq_timer;
1918 break;
1919 case SCTP_TIMER_TYPE_EARLYFR:
1920 if ((stcb == NULL) || (net == NULL)) {
1921 return (EFAULT);
1922 }
1923 tmr = &net->fr_timer;
1924 SCTP_STAT_INCR(sctps_earlyfrstop);
1925 break;
1926 case SCTP_TIMER_TYPE_ITERATOR:
1927 {
1928 struct sctp_iterator *it;
1929
1930 it = (struct sctp_iterator *)inp;
1931 tmr = &it->tmr;
1932 }
1933 break;
1934 case SCTP_TIMER_TYPE_SEND:
1935 if ((stcb == NULL) || (net == NULL)) {
1936 return (EFAULT);
1937 }
1938 tmr = &net->rxt_timer;
1939 break;
1940 case SCTP_TIMER_TYPE_INIT:
1941 if ((stcb == NULL) || (net == NULL)) {
1942 return (EFAULT);
1943 }
1944 tmr = &net->rxt_timer;
1945 break;
1946 case SCTP_TIMER_TYPE_RECV:
1947 if (stcb == NULL) {
1948 return (EFAULT);
1949 }
1950 tmr = &stcb->asoc.dack_timer;
1951 break;
1952 case SCTP_TIMER_TYPE_SHUTDOWN:
1953 if ((stcb == NULL) || (net == NULL)) {
1954 return (EFAULT);
1955 }
1956 tmr = &net->rxt_timer;
1957 break;
1958 case SCTP_TIMER_TYPE_HEARTBEAT:
1959 if (stcb == NULL) {
1960 return (EFAULT);
1961 }
1962 tmr = &stcb->asoc.hb_timer;
1963 break;
1964 case SCTP_TIMER_TYPE_COOKIE:
1965 if ((stcb == NULL) || (net == NULL)) {
1966 return (EFAULT);
1967 }
1968 tmr = &net->rxt_timer;
1969 break;
1970 case SCTP_TIMER_TYPE_NEWCOOKIE:
1971 /* nothing needed but the endpoint here */
1972 tmr = &inp->sctp_ep.signature_change;
1973 /*
1974 * We re-use the newcookie timer for the INP kill timer. We
1975 * must assure that we do not kill it by accident.
1976 */
1977 break;
1978 case SCTP_TIMER_TYPE_ASOCKILL:
1979 /*
1980 * Stop the asoc kill timer.
1981 */
1982 if (stcb == NULL) {
1983 return (EFAULT);
1984 }
1985 tmr = &stcb->asoc.strreset_timer;
1986 break;
1987
1988 case SCTP_TIMER_TYPE_INPKILL:
1989 /*
1990 * The inp is setup to die. We re-use the signature_chage
1991 * timer since that has stopped and we are in the GONE
1992 * state.
1993 */
1994 tmr = &inp->sctp_ep.signature_change;
1995 break;
1996 case SCTP_TIMER_TYPE_PATHMTURAISE:
1997 if ((stcb == NULL) || (net == NULL)) {
1998 return (EFAULT);
1999 }
2000 tmr = &net->pmtu_timer;
2001 break;
2002 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2003 if ((stcb == NULL) || (net == NULL)) {
2004 return (EFAULT);
2005 }
2006 tmr = &net->rxt_timer;
2007 break;
2008 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2009 if (stcb == NULL) {
2010 return (EFAULT);
2011 }
2012 tmr = &stcb->asoc.shut_guard_timer;
2013 break;
2014 case SCTP_TIMER_TYPE_STRRESET:
2015 if (stcb == NULL) {
2016 return (EFAULT);
2017 }
2018 tmr = &stcb->asoc.strreset_timer;
2019 break;
2020 case SCTP_TIMER_TYPE_ASCONF:
2021 if (stcb == NULL) {
2022 return (EFAULT);
2023 }
2024 tmr = &stcb->asoc.asconf_timer;
2025 break;
2026 case SCTP_TIMER_TYPE_AUTOCLOSE:
2027 if (stcb == NULL) {
2028 return (EFAULT);
2029 }
2030 tmr = &stcb->asoc.autoclose_timer;
2031 break;
2032 default:
2033#ifdef SCTP_DEBUG
2034 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
2035 printf("sctp_timer_stop:Unknown timer type %d\n",
2036 t_type);
2037 }
2038#endif /* SCTP_DEBUG */
2039 break;
2040 };
2041 if (tmr == NULL) {
2042 return (EFAULT);
2043 }
2044 if ((tmr->type != t_type) && tmr->type) {
2045 /*
2046 * Ok we have a timer that is under joint use. Cookie timer
2047 * per chance with the SEND timer. We therefore are NOT
2048 * running the timer that the caller wants stopped. So just
2049 * return.
2050 */
2051 return (0);
2052 }
2053 if (t_type == SCTP_TIMER_TYPE_SEND) {
2054 stcb->asoc.num_send_timers_up--;
2055 if (stcb->asoc.num_send_timers_up < 0) {
2056 stcb->asoc.num_send_timers_up = 0;
2057 }
2058 }
2059 tmr->self = NULL;
2060 tmr->stopped_from = from;
2061 SCTP_OS_TIMER_STOP(&tmr->timer);
2062 return (0);
2063}
2064
2065#ifdef SCTP_USE_ADLER32
2066static uint32_t
2067update_adler32(uint32_t adler, uint8_t * buf, int32_t len)
2068{
2069 uint32_t s1 = adler & 0xffff;
2070 uint32_t s2 = (adler >> 16) & 0xffff;
2071 int n;
2072
2073 for (n = 0; n < len; n++, buf++) {
2074 /* s1 = (s1 + buf[n]) % BASE */
2075 /* first we add */
2076 s1 = (s1 + *buf);
2077 /*
2078 * now if we need to, we do a mod by subtracting. It seems a
2079 * bit faster since I really will only ever do one subtract
2080 * at the MOST, since buf[n] is a max of 255.
2081 */
2082 if (s1 >= SCTP_ADLER32_BASE) {
2083 s1 -= SCTP_ADLER32_BASE;
2084 }
2085 /* s2 = (s2 + s1) % BASE */
2086 /* first we add */
2087 s2 = (s2 + s1);
2088 /*
2089 * again, it is more efficent (it seems) to subtract since
2090 * the most s2 will ever be is (BASE-1 + BASE-1) in the
2091 * worse case. This would then be (2 * BASE) - 2, which will
2092 * still only do one subtract. On Intel this is much better
2093 * to do this way and avoid the divide. Have not -pg'd on
2094 * sparc.
2095 */
2096 if (s2 >= SCTP_ADLER32_BASE) {
2097 s2 -= SCTP_ADLER32_BASE;
2098 }
2099 }
2100 /* Return the adler32 of the bytes buf[0..len-1] */
2101 return ((s2 << 16) + s1);
2102}
2103
2104#endif
2105
2106
2107uint32_t
2108sctp_calculate_len(struct mbuf *m)
2109{
2110 uint32_t tlen = 0;
2111 struct mbuf *at;
2112
2113 at = m;
2114 while (at) {
2115 tlen += SCTP_BUF_LEN(at);
2116 at = SCTP_BUF_NEXT(at);
2117 }
2118 return (tlen);
2119}
2120
2121#if defined(SCTP_WITH_NO_CSUM)
2122
2123uint32_t
2124sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2125{
2126 /*
2127 * given a mbuf chain with a packetheader offset by 'offset'
2128 * pointing at a sctphdr (with csum set to 0) go through the chain
2129 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is
2130 * currently Adler32 but will change to CRC32x soon. Also has a side
2131 * bonus calculate the total length of the mbuf chain. Note: if
2132 * offset is greater than the total mbuf length, checksum=1,
2133 * pktlen=0 is returned (ie. no real error code)
2134 */
2135 if (pktlen == NULL)
2136 return (0);
2137 *pktlen = sctp_calculate_len(m);
2138 return (0);
2139}
2140
2141#elif defined(SCTP_USE_INCHKSUM)
2142
2143#include <machine/in_cksum.h>
2144
2145uint32_t
2146sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2147{
2148 /*
2149 * given a mbuf chain with a packetheader offset by 'offset'
2150 * pointing at a sctphdr (with csum set to 0) go through the chain
2151 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is
2152 * currently Adler32 but will change to CRC32x soon. Also has a side
2153 * bonus calculate the total length of the mbuf chain. Note: if
2154 * offset is greater than the total mbuf length, checksum=1,
2155 * pktlen=0 is returned (ie. no real error code)
2156 */
2157 int32_t tlen = 0;
2158 struct mbuf *at;
2159 uint32_t the_sum, retsum;
2160
2161 at = m;
2162 while (at) {
2163 tlen += SCTP_BUF_LEN(at);
2164 at = SCTP_BUF_NEXT(at);
2165 }
2166 the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset));
2167 if (pktlen != NULL)
2168 *pktlen = (tlen - offset);
2169 retsum = htons(the_sum);
2170 return (the_sum);
2171}
2172
2173#else
2174
2175uint32_t
2176sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2177{
2178 /*
2179 * given a mbuf chain with a packetheader offset by 'offset'
2180 * pointing at a sctphdr (with csum set to 0) go through the chain
2181 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is
2182 * currently Adler32 but will change to CRC32x soon. Also has a side
2183 * bonus calculate the total length of the mbuf chain. Note: if
2184 * offset is greater than the total mbuf length, checksum=1,
2185 * pktlen=0 is returned (ie. no real error code)
2186 */
2187 int32_t tlen = 0;
2188
2189#ifdef SCTP_USE_ADLER32
2190 uint32_t base = 1L;
2191
2192#else
2193 uint32_t base = 0xffffffff;
2194
2195#endif /* SCTP_USE_ADLER32 */
2196 struct mbuf *at;
2197
2198 at = m;
2199 /* find the correct mbuf and offset into mbuf */
2200 while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) {
2201 offset -= SCTP_BUF_LEN(at); /* update remaining offset
2202 * left */
2203 at = SCTP_BUF_NEXT(at);
2204 }
2205 while (at != NULL) {
2206 if ((SCTP_BUF_LEN(at) - offset) > 0) {
2207#ifdef SCTP_USE_ADLER32
2208 base = update_adler32(base,
2209 (unsigned char *)(SCTP_BUF_AT(at, offset)),
2210 (unsigned int)(SCTP_BUF_LEN(at) - offset));
2211#else
2212 if ((SCTP_BUF_LEN(at) - offset) < 4) {
2213 /* Use old method if less than 4 bytes */
2214 base = old_update_crc32(base,
2215 (unsigned char *)(SCTP_BUF_AT(at, offset)),
2216 (unsigned int)(SCTP_BUF_LEN(at) - offset));
2217 } else {
2218 base = update_crc32(base,
2219 (unsigned char *)(SCTP_BUF_AT(at, offset)),
2220 (unsigned int)(SCTP_BUF_LEN(at) - offset));
2221 }
2222#endif /* SCTP_USE_ADLER32 */
2223 tlen += SCTP_BUF_LEN(at) - offset;
2224 /* we only offset once into the first mbuf */
2225 }
2226 if (offset) {
2227 if (offset < SCTP_BUF_LEN(at))
2228 offset = 0;
2229 else
2230 offset -= SCTP_BUF_LEN(at);
2231 }
2232 at = SCTP_BUF_NEXT(at);
2233 }
2234 if (pktlen != NULL) {
2235 *pktlen = tlen;
2236 }
2237#ifdef SCTP_USE_ADLER32
2238 /* Adler32 */
2239 base = htonl(base);
2240#else
2241 /* CRC-32c */
2242 base = sctp_csum_finalize(base);
2243#endif
2244 return (base);
2245}
2246
2247
2248#endif
2249
2250void
2251sctp_mtu_size_reset(struct sctp_inpcb *inp,
2252 struct sctp_association *asoc, uint32_t mtu)
2253{
2254 /*
2255 * Reset the P-MTU size on this association, this involves changing
2256 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2257 * allow the DF flag to be cleared.
2258 */
2259 struct sctp_tmit_chunk *chk;
2260 unsigned int eff_mtu, ovh;
2261
2262 asoc->smallest_mtu = mtu;
2263 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2264 ovh = SCTP_MIN_OVERHEAD;
2265 } else {
2266 ovh = SCTP_MIN_V4_OVERHEAD;
2267 }
2268 eff_mtu = mtu - ovh;
2269 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2270
2271 if (chk->send_size > eff_mtu) {
2272 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2273 }
2274 }
2275 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2276 if (chk->send_size > eff_mtu) {
2277 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2278 }
2279 }
2280}
2281
2282
2283/*
2284 * given an association and starting time of the current RTT period return
2285 * RTO in number of msecs net should point to the current network
2286 */
2287uint32_t
2288sctp_calculate_rto(struct sctp_tcb *stcb,
2289 struct sctp_association *asoc,
2290 struct sctp_nets *net,
2291 struct timeval *old)
2292{
2293 /*
2294 * given an association and the starting time of the current RTT
2295 * period (in value1/value2) return RTO in number of msecs.
2296 */
2297 int calc_time = 0;
2298 int o_calctime;
2299 unsigned int new_rto = 0;
2300 int first_measure = 0;
2301 struct timeval now;
2302
2303 /************************/
2304 /* 1. calculate new RTT */
2305 /************************/
2306 /* get the current time */
2307 SCTP_GETTIME_TIMEVAL(&now);
2308 /* compute the RTT value */
2309 if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2310 calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2311 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2312 calc_time += (((u_long)now.tv_usec -
2313 (u_long)old->tv_usec) / 1000);
2314 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2315 /* Borrow 1,000ms from current calculation */
2316 calc_time -= 1000;
2317 /* Add in the slop over */
2318 calc_time += ((int)now.tv_usec / 1000);
2319 /* Add in the pre-second ms's */
2320 calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2321 }
2322 } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2323 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2324 calc_time = ((u_long)now.tv_usec -
2325 (u_long)old->tv_usec) / 1000;
2326 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2327 /* impossible .. garbage in nothing out */
2328 return (((net->lastsa >> 2) + net->lastsv) >> 1);
2329 } else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2330 /*
2331 * We have to have 1 usec :-D this must be the
2332 * loopback.
2333 */
2334 calc_time = 1;
2335 } else {
2336 /* impossible .. garbage in nothing out */
2337 return (((net->lastsa >> 2) + net->lastsv) >> 1);
2338 }
2339 } else {
2340 /* Clock wrapped? */
2341 return (((net->lastsa >> 2) + net->lastsv) >> 1);
2342 }
2343 /***************************/
2344 /* 2. update RTTVAR & SRTT */
2345 /***************************/
2346 o_calctime = calc_time;
2347 /* this is Van Jacobson's integer version */
2348 if (net->RTO) {
2349 calc_time -= (net->lastsa >> 3);
2350 if ((int)net->prev_rtt > o_calctime) {
2351 net->rtt_variance = net->prev_rtt - o_calctime;
2352 /* decreasing */
2353 net->rto_variance_dir = 0;
2354 } else {
2355 /* increasing */
2356 net->rtt_variance = o_calctime - net->prev_rtt;
2357 net->rto_variance_dir = 1;
2358 }
2359#ifdef SCTP_RTTVAR_LOGGING
2360 rto_logging(net, SCTP_LOG_RTTVAR);
2361#endif
2362 net->prev_rtt = o_calctime;
2363 net->lastsa += calc_time;
2364 if (calc_time < 0) {
2365 calc_time = -calc_time;
2366 }
2367 calc_time -= (net->lastsv >> 2);
2368 net->lastsv += calc_time;
2369 if (net->lastsv == 0) {
2370 net->lastsv = SCTP_CLOCK_GRANULARITY;
2371 }
2372 } else {
2373 /* First RTO measurment */
2374 net->lastsa = calc_time;
2375 net->lastsv = calc_time >> 1;
2376 first_measure = 1;
2377 net->rto_variance_dir = 1;
2378 net->prev_rtt = o_calctime;
2379 net->rtt_variance = 0;
2380#ifdef SCTP_RTTVAR_LOGGING
2381 rto_logging(net, SCTP_LOG_INITIAL_RTT);
2382#endif
2383 }
2384 new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1;
2385 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2386 (stcb->asoc.sat_network_lockout == 0)) {
2387 stcb->asoc.sat_network = 1;
2388 } else if ((!first_measure) && stcb->asoc.sat_network) {
2389 stcb->asoc.sat_network = 0;
2390 stcb->asoc.sat_network_lockout = 1;
2391 }
2392 /* bound it, per C6/C7 in Section 5.3.1 */
2393 if (new_rto < stcb->asoc.minrto) {
2394 new_rto = stcb->asoc.minrto;
2395 }
2396 if (new_rto > stcb->asoc.maxrto) {
2397 new_rto = stcb->asoc.maxrto;
2398 }
2399 /* we are now returning the RTT Smoothed */
2400 return ((uint32_t) new_rto);
2401}
2402
2403/*
2404 * return a pointer to a contiguous piece of data from the given mbuf chain
2405 * starting at 'off' for 'len' bytes. If the desired piece spans more than
2406 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2407 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2408 */
2409__inline caddr_t
2410sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2411{
2412 uint32_t count;
2413 uint8_t *ptr;
2414
2415 ptr = in_ptr;
2416 if ((off < 0) || (len <= 0))
2417 return (NULL);
2418
2419 /* find the desired start location */
2420 while ((m != NULL) && (off > 0)) {
2421 if (off < SCTP_BUF_LEN(m))
2422 break;
2423 off -= SCTP_BUF_LEN(m);
2424 m = SCTP_BUF_NEXT(m);
2425 }
2426 if (m == NULL)
2427 return (NULL);
2428
2429 /* is the current mbuf large enough (eg. contiguous)? */
2430 if ((SCTP_BUF_LEN(m) - off) >= len) {
2431 return (mtod(m, caddr_t)+off);
2432 } else {
2433 /* else, it spans more than one mbuf, so save a temp copy... */
2434 while ((m != NULL) && (len > 0)) {
2435 count = min(SCTP_BUF_LEN(m) - off, len);
2436 bcopy(mtod(m, caddr_t)+off, ptr, count);
2437 len -= count;
2438 ptr += count;
2439 off = 0;
2440 m = SCTP_BUF_NEXT(m);
2441 }
2442 if ((m == NULL) && (len > 0))
2443 return (NULL);
2444 else
2445 return ((caddr_t)in_ptr);
2446 }
2447}
2448
2449
2450
2451struct sctp_paramhdr *
2452sctp_get_next_param(struct mbuf *m,
2453 int offset,
2454 struct sctp_paramhdr *pull,
2455 int pull_limit)
2456{
2457 /* This just provides a typed signature to Peter's Pull routine */
2458 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2459 (uint8_t *) pull));
2460}
2461
2462
2463int
2464sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2465{
2466 /*
2467 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2468 * padlen is > 3 this routine will fail.
2469 */
2470 uint8_t *dp;
2471 int i;
2472
2473 if (padlen > 3) {
2474 return (ENOBUFS);
2475 }
2476 if (M_TRAILINGSPACE(m)) {
2477 /*
2478 * The easy way. We hope the majority of the time we hit
2479 * here :)
2480 */
2481 dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2482 SCTP_BUF_LEN(m) += padlen;
2483 } else {
2484 /* Hard way we must grow the mbuf */
2485 struct mbuf *tmp;
2486
2487 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2488 if (tmp == NULL) {
2489 /* Out of space GAK! we are in big trouble. */
2490 return (ENOSPC);
2491 }
2492 /* setup and insert in middle */
2493 SCTP_BUF_NEXT(tmp) = SCTP_BUF_NEXT(m);
2494 SCTP_BUF_LEN(tmp) = padlen;
2495 SCTP_BUF_NEXT(m) = tmp;
2496 dp = mtod(tmp, uint8_t *);
2497 }
2498 /* zero out the pad */
2499 for (i = 0; i < padlen; i++) {
2500 *dp = 0;
2501 dp++;
2502 }
2503 return (0);
2504}
2505
2506int
2507sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2508{
2509 /* find the last mbuf in chain and pad it */
2510 struct mbuf *m_at;
2511
2512 m_at = m;
2513 if (last_mbuf) {
2514 return (sctp_add_pad_tombuf(last_mbuf, padval));
2515 } else {
2516 while (m_at) {
2517 if (SCTP_BUF_NEXT(m_at) == NULL) {
2518 return (sctp_add_pad_tombuf(m_at, padval));
2519 }
2520 m_at = SCTP_BUF_NEXT(m_at);
2521 }
2522 }
2523 return (EFAULT);
2524}
2525
2526int sctp_asoc_change_wake = 0;
2527
2528static void
2529sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2530 uint32_t error, void *data)
2531{
2532 struct mbuf *m_notify;
2533 struct sctp_assoc_change *sac;
2534 struct sctp_queued_to_read *control;
2535
2536 /*
2537 * First if we are are going down dump everything we can to the
2538 * socket rcv queue.
2539 */
2540
2541 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
2542 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
2543 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
2544 ) {
2545 /* If the socket is gone we are out of here */
2546 return;
2547 }
2548 /*
2549 * For TCP model AND UDP connected sockets we will send an error up
2550 * when an ABORT comes in.
2551 */
2552 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2553 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2554 (event == SCTP_COMM_LOST)) {
2555 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT)
2556 stcb->sctp_socket->so_error = ECONNREFUSED;
2557 else
2558 stcb->sctp_socket->so_error = ECONNRESET;
2559 /* Wake ANY sleepers */
2560 sorwakeup(stcb->sctp_socket);
2561 sowwakeup(stcb->sctp_socket);
2562 sctp_asoc_change_wake++;
2563 }
2564 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2565 /* event not enabled */
2566 return;
2567 }
2568 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2569 if (m_notify == NULL)
2570 /* no space left */
2571 return;
2572 SCTP_BUF_LEN(m_notify) = 0;
2573
2574 sac = mtod(m_notify, struct sctp_assoc_change *);
2575 sac->sac_type = SCTP_ASSOC_CHANGE;
2576 sac->sac_flags = 0;
2577 sac->sac_length = sizeof(struct sctp_assoc_change);
2578 sac->sac_state = event;
2579 sac->sac_error = error;
2580 /* XXX verify these stream counts */
2581 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2582 sac->sac_inbound_streams = stcb->asoc.streamincnt;
2583 sac->sac_assoc_id = sctp_get_associd(stcb);
2584 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2585 SCTP_BUF_NEXT(m_notify) = NULL;
2586 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2587 0, 0, 0, 0, 0, 0,
2588 m_notify);
2589 if (control == NULL) {
2590 /* no memory */
2591 sctp_m_freem(m_notify);
2592 return;
2593 }
2594 control->length = SCTP_BUF_LEN(m_notify);
2595 /* not that we need this */
2596 control->tail_mbuf = m_notify;
2597 control->spec_flags = M_NOTIFICATION;
2598 sctp_add_to_readq(stcb->sctp_ep, stcb,
2599 control,
2600 &stcb->sctp_socket->so_rcv, 1);
2601 if (event == SCTP_COMM_LOST) {
2602 /* Wake up any sleeper */
2603 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2604 }
2605}
2606
2607static void
2608sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2609 struct sockaddr *sa, uint32_t error)
2610{
2611 struct mbuf *m_notify;
2612 struct sctp_paddr_change *spc;
2613 struct sctp_queued_to_read *control;
2614
2615 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT))
2616 /* event not enabled */
2617 return;
2618
2619 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2620 if (m_notify == NULL)
2621 return;
2622 SCTP_BUF_LEN(m_notify) = 0;
2623 spc = mtod(m_notify, struct sctp_paddr_change *);
2624 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2625 spc->spc_flags = 0;
2626 spc->spc_length = sizeof(struct sctp_paddr_change);
2627 if (sa->sa_family == AF_INET) {
2628 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2629 } else {
2630 struct sockaddr_in6 *sin6;
2631
2632 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2633
1837 /*
1838 * this_random will be 0 - 256 ms RTO is in ms.
1839 */
1840 if ((stcb->asoc.hb_is_disabled) &&
1841 (cnt_of_unconf == 0)) {
1842 return (0);
1843 }
1844 if (net) {
1845 struct sctp_nets *lnet;
1846 int delay;
1847
1848 delay = stcb->asoc.heart_beat_delay;
1849 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1850 if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1851 ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
1852 (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1853 delay = 0;
1854 }
1855 }
1856 if (net->RTO == 0) {
1857 /* Never been checked */
1858 to_ticks = this_random + stcb->asoc.initial_rto + delay;
1859 } else {
1860 /* set rto_val to the ms */
1861 to_ticks = delay + net->RTO + this_random;
1862 }
1863 } else {
1864 if (cnt_of_unconf) {
1865 to_ticks = this_random + stcb->asoc.initial_rto;
1866 } else {
1867 to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
1868 }
1869 }
1870 /*
1871 * Now we must convert the to_ticks that are now in
1872 * ms to ticks.
1873 */
1874 to_ticks = MSEC_TO_TICKS(to_ticks);
1875 tmr = &stcb->asoc.hb_timer;
1876 }
1877 break;
1878 case SCTP_TIMER_TYPE_COOKIE:
1879 /*
1880 * Here we can use the RTO timer from the network since one
1881 * RTT was compelete. If a retran happened then we will be
1882 * using the RTO initial value.
1883 */
1884 if ((stcb == NULL) || (net == NULL)) {
1885 return (EFAULT);
1886 }
1887 if (net->RTO == 0) {
1888 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1889 } else {
1890 to_ticks = MSEC_TO_TICKS(net->RTO);
1891 }
1892 tmr = &net->rxt_timer;
1893 break;
1894 case SCTP_TIMER_TYPE_NEWCOOKIE:
1895 /*
1896 * nothing needed but the endpoint here ususually about 60
1897 * minutes.
1898 */
1899 tmr = &inp->sctp_ep.signature_change;
1900 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1901 break;
1902 case SCTP_TIMER_TYPE_ASOCKILL:
1903 if (stcb == NULL) {
1904 return (EFAULT);
1905 }
1906 tmr = &stcb->asoc.strreset_timer;
1907 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
1908 break;
1909 case SCTP_TIMER_TYPE_INPKILL:
1910 /*
1911 * The inp is setup to die. We re-use the signature_chage
1912 * timer since that has stopped and we are in the GONE
1913 * state.
1914 */
1915 tmr = &inp->sctp_ep.signature_change;
1916 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
1917 break;
1918 case SCTP_TIMER_TYPE_PATHMTURAISE:
1919 /*
1920 * Here we use the value found in the EP for PMTU ususually
1921 * about 10 minutes.
1922 */
1923 if (stcb == NULL) {
1924 return (EFAULT);
1925 }
1926 if (net == NULL) {
1927 return (EFAULT);
1928 }
1929 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
1930 tmr = &net->pmtu_timer;
1931 break;
1932 case SCTP_TIMER_TYPE_SHUTDOWNACK:
1933 /* Here we use the RTO of the destination */
1934 if ((stcb == NULL) || (net == NULL)) {
1935 return (EFAULT);
1936 }
1937 if (net->RTO == 0) {
1938 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1939 } else {
1940 to_ticks = MSEC_TO_TICKS(net->RTO);
1941 }
1942 tmr = &net->rxt_timer;
1943 break;
1944 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1945 /*
1946 * Here we use the endpoints shutdown guard timer usually
1947 * about 3 minutes.
1948 */
1949 if (stcb == NULL) {
1950 return (EFAULT);
1951 }
1952 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
1953 tmr = &stcb->asoc.shut_guard_timer;
1954 break;
1955 case SCTP_TIMER_TYPE_STRRESET:
1956 /*
1957 * Here the timer comes from the inp but its value is from
1958 * the RTO.
1959 */
1960 if ((stcb == NULL) || (net == NULL)) {
1961 return (EFAULT);
1962 }
1963 if (net->RTO == 0) {
1964 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1965 } else {
1966 to_ticks = MSEC_TO_TICKS(net->RTO);
1967 }
1968 tmr = &stcb->asoc.strreset_timer;
1969 break;
1970
1971 case SCTP_TIMER_TYPE_EARLYFR:
1972 {
1973 unsigned int msec;
1974
1975 if ((stcb == NULL) || (net == NULL)) {
1976 return (EFAULT);
1977 }
1978 if (net->flight_size > net->cwnd) {
1979 /* no need to start */
1980 return (0);
1981 }
1982 SCTP_STAT_INCR(sctps_earlyfrstart);
1983 if (net->lastsa == 0) {
1984 /* Hmm no rtt estimate yet? */
1985 msec = stcb->asoc.initial_rto >> 2;
1986 } else {
1987 msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
1988 }
1989 if (msec < sctp_early_fr_msec) {
1990 msec = sctp_early_fr_msec;
1991 if (msec < SCTP_MINFR_MSEC_FLOOR) {
1992 msec = SCTP_MINFR_MSEC_FLOOR;
1993 }
1994 }
1995 to_ticks = MSEC_TO_TICKS(msec);
1996 tmr = &net->fr_timer;
1997 }
1998 break;
1999 case SCTP_TIMER_TYPE_ASCONF:
2000 /*
2001 * Here the timer comes from the inp but its value is from
2002 * the RTO.
2003 */
2004 if ((stcb == NULL) || (net == NULL)) {
2005 return (EFAULT);
2006 }
2007 if (net->RTO == 0) {
2008 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2009 } else {
2010 to_ticks = MSEC_TO_TICKS(net->RTO);
2011 }
2012 tmr = &stcb->asoc.asconf_timer;
2013 break;
2014 case SCTP_TIMER_TYPE_AUTOCLOSE:
2015 if (stcb == NULL) {
2016 return (EFAULT);
2017 }
2018 if (stcb->asoc.sctp_autoclose_ticks == 0) {
2019 /*
2020 * Really an error since stcb is NOT set to
2021 * autoclose
2022 */
2023 return (0);
2024 }
2025 to_ticks = stcb->asoc.sctp_autoclose_ticks;
2026 tmr = &stcb->asoc.autoclose_timer;
2027 break;
2028 default:
2029#ifdef SCTP_DEBUG
2030 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
2031 printf("sctp_timer_start:Unknown timer type %d\n",
2032 t_type);
2033 }
2034#endif /* SCTP_DEBUG */
2035 return (EFAULT);
2036 break;
2037 };
2038 if ((to_ticks <= 0) || (tmr == NULL)) {
2039#ifdef SCTP_DEBUG
2040 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
2041 printf("sctp_timer_start:%d:software error to_ticks:%d tmr:%p not set ??\n",
2042 t_type, to_ticks, tmr);
2043 }
2044#endif /* SCTP_DEBUG */
2045 return (EFAULT);
2046 }
2047 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2048 /*
2049 * we do NOT allow you to have it already running. if it is
2050 * we leave the current one up unchanged
2051 */
2052 return (EALREADY);
2053 }
2054 /* At this point we can proceed */
2055 if (t_type == SCTP_TIMER_TYPE_SEND) {
2056 stcb->asoc.num_send_timers_up++;
2057 }
2058 tmr->stopped_from = 0;
2059 tmr->type = t_type;
2060 tmr->ep = (void *)inp;
2061 tmr->tcb = (void *)stcb;
2062 tmr->net = (void *)net;
2063 tmr->self = (void *)tmr;
2064 tmr->ticks = ticks;
2065 SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2066 return (0);
2067}
2068
2069int
2070sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2071 struct sctp_nets *net, uint32_t from)
2072{
2073 struct sctp_timer *tmr;
2074
2075 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2076 (inp == NULL))
2077 return (EFAULT);
2078
2079 tmr = NULL;
2080 if (stcb) {
2081 SCTP_TCB_LOCK_ASSERT(stcb);
2082 }
2083 switch (t_type) {
2084 case SCTP_TIMER_TYPE_ADDR_WQ:
2085 tmr = &sctppcbinfo.addr_wq_timer;
2086 break;
2087 case SCTP_TIMER_TYPE_EARLYFR:
2088 if ((stcb == NULL) || (net == NULL)) {
2089 return (EFAULT);
2090 }
2091 tmr = &net->fr_timer;
2092 SCTP_STAT_INCR(sctps_earlyfrstop);
2093 break;
2094 case SCTP_TIMER_TYPE_ITERATOR:
2095 {
2096 struct sctp_iterator *it;
2097
2098 it = (struct sctp_iterator *)inp;
2099 tmr = &it->tmr;
2100 }
2101 break;
2102 case SCTP_TIMER_TYPE_SEND:
2103 if ((stcb == NULL) || (net == NULL)) {
2104 return (EFAULT);
2105 }
2106 tmr = &net->rxt_timer;
2107 break;
2108 case SCTP_TIMER_TYPE_INIT:
2109 if ((stcb == NULL) || (net == NULL)) {
2110 return (EFAULT);
2111 }
2112 tmr = &net->rxt_timer;
2113 break;
2114 case SCTP_TIMER_TYPE_RECV:
2115 if (stcb == NULL) {
2116 return (EFAULT);
2117 }
2118 tmr = &stcb->asoc.dack_timer;
2119 break;
2120 case SCTP_TIMER_TYPE_SHUTDOWN:
2121 if ((stcb == NULL) || (net == NULL)) {
2122 return (EFAULT);
2123 }
2124 tmr = &net->rxt_timer;
2125 break;
2126 case SCTP_TIMER_TYPE_HEARTBEAT:
2127 if (stcb == NULL) {
2128 return (EFAULT);
2129 }
2130 tmr = &stcb->asoc.hb_timer;
2131 break;
2132 case SCTP_TIMER_TYPE_COOKIE:
2133 if ((stcb == NULL) || (net == NULL)) {
2134 return (EFAULT);
2135 }
2136 tmr = &net->rxt_timer;
2137 break;
2138 case SCTP_TIMER_TYPE_NEWCOOKIE:
2139 /* nothing needed but the endpoint here */
2140 tmr = &inp->sctp_ep.signature_change;
2141 /*
2142 * We re-use the newcookie timer for the INP kill timer. We
2143 * must assure that we do not kill it by accident.
2144 */
2145 break;
2146 case SCTP_TIMER_TYPE_ASOCKILL:
2147 /*
2148 * Stop the asoc kill timer.
2149 */
2150 if (stcb == NULL) {
2151 return (EFAULT);
2152 }
2153 tmr = &stcb->asoc.strreset_timer;
2154 break;
2155
2156 case SCTP_TIMER_TYPE_INPKILL:
2157 /*
2158 * The inp is setup to die. We re-use the signature_chage
2159 * timer since that has stopped and we are in the GONE
2160 * state.
2161 */
2162 tmr = &inp->sctp_ep.signature_change;
2163 break;
2164 case SCTP_TIMER_TYPE_PATHMTURAISE:
2165 if ((stcb == NULL) || (net == NULL)) {
2166 return (EFAULT);
2167 }
2168 tmr = &net->pmtu_timer;
2169 break;
2170 case SCTP_TIMER_TYPE_SHUTDOWNACK:
2171 if ((stcb == NULL) || (net == NULL)) {
2172 return (EFAULT);
2173 }
2174 tmr = &net->rxt_timer;
2175 break;
2176 case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2177 if (stcb == NULL) {
2178 return (EFAULT);
2179 }
2180 tmr = &stcb->asoc.shut_guard_timer;
2181 break;
2182 case SCTP_TIMER_TYPE_STRRESET:
2183 if (stcb == NULL) {
2184 return (EFAULT);
2185 }
2186 tmr = &stcb->asoc.strreset_timer;
2187 break;
2188 case SCTP_TIMER_TYPE_ASCONF:
2189 if (stcb == NULL) {
2190 return (EFAULT);
2191 }
2192 tmr = &stcb->asoc.asconf_timer;
2193 break;
2194 case SCTP_TIMER_TYPE_AUTOCLOSE:
2195 if (stcb == NULL) {
2196 return (EFAULT);
2197 }
2198 tmr = &stcb->asoc.autoclose_timer;
2199 break;
2200 default:
2201#ifdef SCTP_DEBUG
2202 if (sctp_debug_on & SCTP_DEBUG_TIMER1) {
2203 printf("sctp_timer_stop:Unknown timer type %d\n",
2204 t_type);
2205 }
2206#endif /* SCTP_DEBUG */
2207 break;
2208 };
2209 if (tmr == NULL) {
2210 return (EFAULT);
2211 }
2212 if ((tmr->type != t_type) && tmr->type) {
2213 /*
2214 * Ok we have a timer that is under joint use. Cookie timer
2215 * per chance with the SEND timer. We therefore are NOT
2216 * running the timer that the caller wants stopped. So just
2217 * return.
2218 */
2219 return (0);
2220 }
2221 if (t_type == SCTP_TIMER_TYPE_SEND) {
2222 stcb->asoc.num_send_timers_up--;
2223 if (stcb->asoc.num_send_timers_up < 0) {
2224 stcb->asoc.num_send_timers_up = 0;
2225 }
2226 }
2227 tmr->self = NULL;
2228 tmr->stopped_from = from;
2229 SCTP_OS_TIMER_STOP(&tmr->timer);
2230 return (0);
2231}
2232
2233#ifdef SCTP_USE_ADLER32
2234static uint32_t
2235update_adler32(uint32_t adler, uint8_t * buf, int32_t len)
2236{
2237 uint32_t s1 = adler & 0xffff;
2238 uint32_t s2 = (adler >> 16) & 0xffff;
2239 int n;
2240
2241 for (n = 0; n < len; n++, buf++) {
2242 /* s1 = (s1 + buf[n]) % BASE */
2243 /* first we add */
2244 s1 = (s1 + *buf);
2245 /*
2246 * now if we need to, we do a mod by subtracting. It seems a
2247 * bit faster since I really will only ever do one subtract
2248 * at the MOST, since buf[n] is a max of 255.
2249 */
2250 if (s1 >= SCTP_ADLER32_BASE) {
2251 s1 -= SCTP_ADLER32_BASE;
2252 }
2253 /* s2 = (s2 + s1) % BASE */
2254 /* first we add */
2255 s2 = (s2 + s1);
2256 /*
2257 * again, it is more efficent (it seems) to subtract since
2258 * the most s2 will ever be is (BASE-1 + BASE-1) in the
2259 * worse case. This would then be (2 * BASE) - 2, which will
2260 * still only do one subtract. On Intel this is much better
2261 * to do this way and avoid the divide. Have not -pg'd on
2262 * sparc.
2263 */
2264 if (s2 >= SCTP_ADLER32_BASE) {
2265 s2 -= SCTP_ADLER32_BASE;
2266 }
2267 }
2268 /* Return the adler32 of the bytes buf[0..len-1] */
2269 return ((s2 << 16) + s1);
2270}
2271
2272#endif
2273
2274
2275uint32_t
2276sctp_calculate_len(struct mbuf *m)
2277{
2278 uint32_t tlen = 0;
2279 struct mbuf *at;
2280
2281 at = m;
2282 while (at) {
2283 tlen += SCTP_BUF_LEN(at);
2284 at = SCTP_BUF_NEXT(at);
2285 }
2286 return (tlen);
2287}
2288
2289#if defined(SCTP_WITH_NO_CSUM)
2290
2291uint32_t
2292sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2293{
2294 /*
2295 * given a mbuf chain with a packetheader offset by 'offset'
2296 * pointing at a sctphdr (with csum set to 0) go through the chain
2297 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is
2298 * currently Adler32 but will change to CRC32x soon. Also has a side
2299 * bonus calculate the total length of the mbuf chain. Note: if
2300 * offset is greater than the total mbuf length, checksum=1,
2301 * pktlen=0 is returned (ie. no real error code)
2302 */
2303 if (pktlen == NULL)
2304 return (0);
2305 *pktlen = sctp_calculate_len(m);
2306 return (0);
2307}
2308
2309#elif defined(SCTP_USE_INCHKSUM)
2310
2311#include <machine/in_cksum.h>
2312
2313uint32_t
2314sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2315{
2316 /*
2317 * given a mbuf chain with a packetheader offset by 'offset'
2318 * pointing at a sctphdr (with csum set to 0) go through the chain
2319 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is
2320 * currently Adler32 but will change to CRC32x soon. Also has a side
2321 * bonus calculate the total length of the mbuf chain. Note: if
2322 * offset is greater than the total mbuf length, checksum=1,
2323 * pktlen=0 is returned (ie. no real error code)
2324 */
2325 int32_t tlen = 0;
2326 struct mbuf *at;
2327 uint32_t the_sum, retsum;
2328
2329 at = m;
2330 while (at) {
2331 tlen += SCTP_BUF_LEN(at);
2332 at = SCTP_BUF_NEXT(at);
2333 }
2334 the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset));
2335 if (pktlen != NULL)
2336 *pktlen = (tlen - offset);
2337 retsum = htons(the_sum);
2338 return (the_sum);
2339}
2340
2341#else
2342
2343uint32_t
2344sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2345{
2346 /*
2347 * given a mbuf chain with a packetheader offset by 'offset'
2348 * pointing at a sctphdr (with csum set to 0) go through the chain
2349 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This is
2350 * currently Adler32 but will change to CRC32x soon. Also has a side
2351 * bonus calculate the total length of the mbuf chain. Note: if
2352 * offset is greater than the total mbuf length, checksum=1,
2353 * pktlen=0 is returned (ie. no real error code)
2354 */
2355 int32_t tlen = 0;
2356
2357#ifdef SCTP_USE_ADLER32
2358 uint32_t base = 1L;
2359
2360#else
2361 uint32_t base = 0xffffffff;
2362
2363#endif /* SCTP_USE_ADLER32 */
2364 struct mbuf *at;
2365
2366 at = m;
2367 /* find the correct mbuf and offset into mbuf */
2368 while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) {
2369 offset -= SCTP_BUF_LEN(at); /* update remaining offset
2370 * left */
2371 at = SCTP_BUF_NEXT(at);
2372 }
2373 while (at != NULL) {
2374 if ((SCTP_BUF_LEN(at) - offset) > 0) {
2375#ifdef SCTP_USE_ADLER32
2376 base = update_adler32(base,
2377 (unsigned char *)(SCTP_BUF_AT(at, offset)),
2378 (unsigned int)(SCTP_BUF_LEN(at) - offset));
2379#else
2380 if ((SCTP_BUF_LEN(at) - offset) < 4) {
2381 /* Use old method if less than 4 bytes */
2382 base = old_update_crc32(base,
2383 (unsigned char *)(SCTP_BUF_AT(at, offset)),
2384 (unsigned int)(SCTP_BUF_LEN(at) - offset));
2385 } else {
2386 base = update_crc32(base,
2387 (unsigned char *)(SCTP_BUF_AT(at, offset)),
2388 (unsigned int)(SCTP_BUF_LEN(at) - offset));
2389 }
2390#endif /* SCTP_USE_ADLER32 */
2391 tlen += SCTP_BUF_LEN(at) - offset;
2392 /* we only offset once into the first mbuf */
2393 }
2394 if (offset) {
2395 if (offset < SCTP_BUF_LEN(at))
2396 offset = 0;
2397 else
2398 offset -= SCTP_BUF_LEN(at);
2399 }
2400 at = SCTP_BUF_NEXT(at);
2401 }
2402 if (pktlen != NULL) {
2403 *pktlen = tlen;
2404 }
2405#ifdef SCTP_USE_ADLER32
2406 /* Adler32 */
2407 base = htonl(base);
2408#else
2409 /* CRC-32c */
2410 base = sctp_csum_finalize(base);
2411#endif
2412 return (base);
2413}
2414
2415
2416#endif
2417
2418void
2419sctp_mtu_size_reset(struct sctp_inpcb *inp,
2420 struct sctp_association *asoc, uint32_t mtu)
2421{
2422 /*
2423 * Reset the P-MTU size on this association, this involves changing
2424 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2425 * allow the DF flag to be cleared.
2426 */
2427 struct sctp_tmit_chunk *chk;
2428 unsigned int eff_mtu, ovh;
2429
2430 asoc->smallest_mtu = mtu;
2431 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2432 ovh = SCTP_MIN_OVERHEAD;
2433 } else {
2434 ovh = SCTP_MIN_V4_OVERHEAD;
2435 }
2436 eff_mtu = mtu - ovh;
2437 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2438
2439 if (chk->send_size > eff_mtu) {
2440 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2441 }
2442 }
2443 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2444 if (chk->send_size > eff_mtu) {
2445 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2446 }
2447 }
2448}
2449
2450
2451/*
2452 * given an association and starting time of the current RTT period return
2453 * RTO in number of msecs net should point to the current network
2454 */
2455uint32_t
2456sctp_calculate_rto(struct sctp_tcb *stcb,
2457 struct sctp_association *asoc,
2458 struct sctp_nets *net,
2459 struct timeval *old)
2460{
2461 /*
2462 * given an association and the starting time of the current RTT
2463 * period (in value1/value2) return RTO in number of msecs.
2464 */
2465 int calc_time = 0;
2466 int o_calctime;
2467 unsigned int new_rto = 0;
2468 int first_measure = 0;
2469 struct timeval now;
2470
2471 /************************/
2472 /* 1. calculate new RTT */
2473 /************************/
2474 /* get the current time */
2475 SCTP_GETTIME_TIMEVAL(&now);
2476 /* compute the RTT value */
2477 if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2478 calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2479 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2480 calc_time += (((u_long)now.tv_usec -
2481 (u_long)old->tv_usec) / 1000);
2482 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2483 /* Borrow 1,000ms from current calculation */
2484 calc_time -= 1000;
2485 /* Add in the slop over */
2486 calc_time += ((int)now.tv_usec / 1000);
2487 /* Add in the pre-second ms's */
2488 calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2489 }
2490 } else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2491 if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2492 calc_time = ((u_long)now.tv_usec -
2493 (u_long)old->tv_usec) / 1000;
2494 } else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2495 /* impossible .. garbage in nothing out */
2496 return (((net->lastsa >> 2) + net->lastsv) >> 1);
2497 } else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2498 /*
2499 * We have to have 1 usec :-D this must be the
2500 * loopback.
2501 */
2502 calc_time = 1;
2503 } else {
2504 /* impossible .. garbage in nothing out */
2505 return (((net->lastsa >> 2) + net->lastsv) >> 1);
2506 }
2507 } else {
2508 /* Clock wrapped? */
2509 return (((net->lastsa >> 2) + net->lastsv) >> 1);
2510 }
2511 /***************************/
2512 /* 2. update RTTVAR & SRTT */
2513 /***************************/
2514 o_calctime = calc_time;
2515 /* this is Van Jacobson's integer version */
2516 if (net->RTO) {
2517 calc_time -= (net->lastsa >> 3);
2518 if ((int)net->prev_rtt > o_calctime) {
2519 net->rtt_variance = net->prev_rtt - o_calctime;
2520 /* decreasing */
2521 net->rto_variance_dir = 0;
2522 } else {
2523 /* increasing */
2524 net->rtt_variance = o_calctime - net->prev_rtt;
2525 net->rto_variance_dir = 1;
2526 }
2527#ifdef SCTP_RTTVAR_LOGGING
2528 rto_logging(net, SCTP_LOG_RTTVAR);
2529#endif
2530 net->prev_rtt = o_calctime;
2531 net->lastsa += calc_time;
2532 if (calc_time < 0) {
2533 calc_time = -calc_time;
2534 }
2535 calc_time -= (net->lastsv >> 2);
2536 net->lastsv += calc_time;
2537 if (net->lastsv == 0) {
2538 net->lastsv = SCTP_CLOCK_GRANULARITY;
2539 }
2540 } else {
2541 /* First RTO measurment */
2542 net->lastsa = calc_time;
2543 net->lastsv = calc_time >> 1;
2544 first_measure = 1;
2545 net->rto_variance_dir = 1;
2546 net->prev_rtt = o_calctime;
2547 net->rtt_variance = 0;
2548#ifdef SCTP_RTTVAR_LOGGING
2549 rto_logging(net, SCTP_LOG_INITIAL_RTT);
2550#endif
2551 }
2552 new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1;
2553 if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2554 (stcb->asoc.sat_network_lockout == 0)) {
2555 stcb->asoc.sat_network = 1;
2556 } else if ((!first_measure) && stcb->asoc.sat_network) {
2557 stcb->asoc.sat_network = 0;
2558 stcb->asoc.sat_network_lockout = 1;
2559 }
2560 /* bound it, per C6/C7 in Section 5.3.1 */
2561 if (new_rto < stcb->asoc.minrto) {
2562 new_rto = stcb->asoc.minrto;
2563 }
2564 if (new_rto > stcb->asoc.maxrto) {
2565 new_rto = stcb->asoc.maxrto;
2566 }
2567 /* we are now returning the RTT Smoothed */
2568 return ((uint32_t) new_rto);
2569}
2570
2571/*
2572 * return a pointer to a contiguous piece of data from the given mbuf chain
2573 * starting at 'off' for 'len' bytes. If the desired piece spans more than
2574 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2575 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2576 */
2577__inline caddr_t
2578sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2579{
2580 uint32_t count;
2581 uint8_t *ptr;
2582
2583 ptr = in_ptr;
2584 if ((off < 0) || (len <= 0))
2585 return (NULL);
2586
2587 /* find the desired start location */
2588 while ((m != NULL) && (off > 0)) {
2589 if (off < SCTP_BUF_LEN(m))
2590 break;
2591 off -= SCTP_BUF_LEN(m);
2592 m = SCTP_BUF_NEXT(m);
2593 }
2594 if (m == NULL)
2595 return (NULL);
2596
2597 /* is the current mbuf large enough (eg. contiguous)? */
2598 if ((SCTP_BUF_LEN(m) - off) >= len) {
2599 return (mtod(m, caddr_t)+off);
2600 } else {
2601 /* else, it spans more than one mbuf, so save a temp copy... */
2602 while ((m != NULL) && (len > 0)) {
2603 count = min(SCTP_BUF_LEN(m) - off, len);
2604 bcopy(mtod(m, caddr_t)+off, ptr, count);
2605 len -= count;
2606 ptr += count;
2607 off = 0;
2608 m = SCTP_BUF_NEXT(m);
2609 }
2610 if ((m == NULL) && (len > 0))
2611 return (NULL);
2612 else
2613 return ((caddr_t)in_ptr);
2614 }
2615}
2616
2617
2618
2619struct sctp_paramhdr *
2620sctp_get_next_param(struct mbuf *m,
2621 int offset,
2622 struct sctp_paramhdr *pull,
2623 int pull_limit)
2624{
2625 /* This just provides a typed signature to Peter's Pull routine */
2626 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2627 (uint8_t *) pull));
2628}
2629
2630
2631int
2632sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2633{
2634 /*
2635 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2636 * padlen is > 3 this routine will fail.
2637 */
2638 uint8_t *dp;
2639 int i;
2640
2641 if (padlen > 3) {
2642 return (ENOBUFS);
2643 }
2644 if (M_TRAILINGSPACE(m)) {
2645 /*
2646 * The easy way. We hope the majority of the time we hit
2647 * here :)
2648 */
2649 dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2650 SCTP_BUF_LEN(m) += padlen;
2651 } else {
2652 /* Hard way we must grow the mbuf */
2653 struct mbuf *tmp;
2654
2655 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2656 if (tmp == NULL) {
2657 /* Out of space GAK! we are in big trouble. */
2658 return (ENOSPC);
2659 }
2660 /* setup and insert in middle */
2661 SCTP_BUF_NEXT(tmp) = SCTP_BUF_NEXT(m);
2662 SCTP_BUF_LEN(tmp) = padlen;
2663 SCTP_BUF_NEXT(m) = tmp;
2664 dp = mtod(tmp, uint8_t *);
2665 }
2666 /* zero out the pad */
2667 for (i = 0; i < padlen; i++) {
2668 *dp = 0;
2669 dp++;
2670 }
2671 return (0);
2672}
2673
2674int
2675sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2676{
2677 /* find the last mbuf in chain and pad it */
2678 struct mbuf *m_at;
2679
2680 m_at = m;
2681 if (last_mbuf) {
2682 return (sctp_add_pad_tombuf(last_mbuf, padval));
2683 } else {
2684 while (m_at) {
2685 if (SCTP_BUF_NEXT(m_at) == NULL) {
2686 return (sctp_add_pad_tombuf(m_at, padval));
2687 }
2688 m_at = SCTP_BUF_NEXT(m_at);
2689 }
2690 }
2691 return (EFAULT);
2692}
2693
2694int sctp_asoc_change_wake = 0;
2695
2696static void
2697sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2698 uint32_t error, void *data)
2699{
2700 struct mbuf *m_notify;
2701 struct sctp_assoc_change *sac;
2702 struct sctp_queued_to_read *control;
2703
2704 /*
2705 * First if we are are going down dump everything we can to the
2706 * socket rcv queue.
2707 */
2708
2709 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
2710 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
2711 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
2712 ) {
2713 /* If the socket is gone we are out of here */
2714 return;
2715 }
2716 /*
2717 * For TCP model AND UDP connected sockets we will send an error up
2718 * when an ABORT comes in.
2719 */
2720 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2721 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2722 (event == SCTP_COMM_LOST)) {
2723 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT)
2724 stcb->sctp_socket->so_error = ECONNREFUSED;
2725 else
2726 stcb->sctp_socket->so_error = ECONNRESET;
2727 /* Wake ANY sleepers */
2728 sorwakeup(stcb->sctp_socket);
2729 sowwakeup(stcb->sctp_socket);
2730 sctp_asoc_change_wake++;
2731 }
2732 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2733 /* event not enabled */
2734 return;
2735 }
2736 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2737 if (m_notify == NULL)
2738 /* no space left */
2739 return;
2740 SCTP_BUF_LEN(m_notify) = 0;
2741
2742 sac = mtod(m_notify, struct sctp_assoc_change *);
2743 sac->sac_type = SCTP_ASSOC_CHANGE;
2744 sac->sac_flags = 0;
2745 sac->sac_length = sizeof(struct sctp_assoc_change);
2746 sac->sac_state = event;
2747 sac->sac_error = error;
2748 /* XXX verify these stream counts */
2749 sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2750 sac->sac_inbound_streams = stcb->asoc.streamincnt;
2751 sac->sac_assoc_id = sctp_get_associd(stcb);
2752 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2753 SCTP_BUF_NEXT(m_notify) = NULL;
2754 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2755 0, 0, 0, 0, 0, 0,
2756 m_notify);
2757 if (control == NULL) {
2758 /* no memory */
2759 sctp_m_freem(m_notify);
2760 return;
2761 }
2762 control->length = SCTP_BUF_LEN(m_notify);
2763 /* not that we need this */
2764 control->tail_mbuf = m_notify;
2765 control->spec_flags = M_NOTIFICATION;
2766 sctp_add_to_readq(stcb->sctp_ep, stcb,
2767 control,
2768 &stcb->sctp_socket->so_rcv, 1);
2769 if (event == SCTP_COMM_LOST) {
2770 /* Wake up any sleeper */
2771 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2772 }
2773}
2774
2775static void
2776sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2777 struct sockaddr *sa, uint32_t error)
2778{
2779 struct mbuf *m_notify;
2780 struct sctp_paddr_change *spc;
2781 struct sctp_queued_to_read *control;
2782
2783 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT))
2784 /* event not enabled */
2785 return;
2786
2787 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2788 if (m_notify == NULL)
2789 return;
2790 SCTP_BUF_LEN(m_notify) = 0;
2791 spc = mtod(m_notify, struct sctp_paddr_change *);
2792 spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2793 spc->spc_flags = 0;
2794 spc->spc_length = sizeof(struct sctp_paddr_change);
2795 if (sa->sa_family == AF_INET) {
2796 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2797 } else {
2798 struct sockaddr_in6 *sin6;
2799
2800 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2801
2634 /* recover scope_id for user */
2635 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2636 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2802 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2803 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2637 (void)sa6_recoverscope(sin6);
2804 if (sin6->sin6_scope_id == 0) {
2805 /* recover scope_id for user */
2806 (void)sa6_recoverscope(sin6);
2807 } else {
2808 /* clear embedded scope_id for user */
2809 in6_clearscope(&sin6->sin6_addr);
2810 }
2638 }
2639 }
2640 spc->spc_state = state;
2641 spc->spc_error = error;
2642 spc->spc_assoc_id = sctp_get_associd(stcb);
2643
2644 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2645 SCTP_BUF_NEXT(m_notify) = NULL;
2646
2647 /* append to socket */
2648 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2649 0, 0, 0, 0, 0, 0,
2650 m_notify);
2651 if (control == NULL) {
2652 /* no memory */
2653 sctp_m_freem(m_notify);
2654 return;
2655 }
2656 control->length = SCTP_BUF_LEN(m_notify);
2657 control->spec_flags = M_NOTIFICATION;
2658 /* not that we need this */
2659 control->tail_mbuf = m_notify;
2660 sctp_add_to_readq(stcb->sctp_ep, stcb,
2661 control,
2662 &stcb->sctp_socket->so_rcv, 1);
2663}
2664
2665
2666static void
2667sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2668 struct sctp_tmit_chunk *chk)
2669{
2670 struct mbuf *m_notify;
2671 struct sctp_send_failed *ssf;
2672 struct sctp_queued_to_read *control;
2673 int length;
2674
2675 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
2676 /* event not enabled */
2677 return;
2678
2679 length = sizeof(struct sctp_send_failed) + chk->send_size;
2680 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2681 if (m_notify == NULL)
2682 /* no space left */
2683 return;
2684 SCTP_BUF_LEN(m_notify) = 0;
2685 ssf = mtod(m_notify, struct sctp_send_failed *);
2686 ssf->ssf_type = SCTP_SEND_FAILED;
2687 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2688 ssf->ssf_flags = SCTP_DATA_UNSENT;
2689 else
2690 ssf->ssf_flags = SCTP_DATA_SENT;
2691 ssf->ssf_length = length;
2692 ssf->ssf_error = error;
2693 /* not exactly what the user sent in, but should be close :) */
2694 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2695 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2696 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2697 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2698 ssf->ssf_info.sinfo_context = chk->rec.data.context;
2699 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2700 ssf->ssf_assoc_id = sctp_get_associd(stcb);
2701 SCTP_BUF_NEXT(m_notify) = chk->data;
2702 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2703
2704 /* Steal off the mbuf */
2705 chk->data = NULL;
2706 /*
2707 * For this case, we check the actual socket buffer, since the assoc
2708 * is going away we don't want to overfill the socket buffer for a
2709 * non-reader
2710 */
2711 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2712 sctp_m_freem(m_notify);
2713 return;
2714 }
2715 /* append to socket */
2716 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2717 0, 0, 0, 0, 0, 0,
2718 m_notify);
2719 if (control == NULL) {
2720 /* no memory */
2721 sctp_m_freem(m_notify);
2722 return;
2723 }
2724 control->spec_flags = M_NOTIFICATION;
2725 sctp_add_to_readq(stcb->sctp_ep, stcb,
2726 control,
2727 &stcb->sctp_socket->so_rcv, 1);
2728}
2729
2730
2731static void
2732sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2733 struct sctp_stream_queue_pending *sp)
2734{
2735 struct mbuf *m_notify;
2736 struct sctp_send_failed *ssf;
2737 struct sctp_queued_to_read *control;
2738 int length;
2739
2740 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
2741 /* event not enabled */
2742 return;
2743
2744 length = sizeof(struct sctp_send_failed) + sp->length;
2745 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
2746 if (m_notify == NULL)
2747 /* no space left */
2748 return;
2749 SCTP_BUF_LEN(m_notify) = 0;
2750 ssf = mtod(m_notify, struct sctp_send_failed *);
2751 ssf->ssf_type = SCTP_SEND_FAILED;
2752 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2753 ssf->ssf_flags = SCTP_DATA_UNSENT;
2754 else
2755 ssf->ssf_flags = SCTP_DATA_SENT;
2756 ssf->ssf_length = length;
2757 ssf->ssf_error = error;
2758 /* not exactly what the user sent in, but should be close :) */
2759 ssf->ssf_info.sinfo_stream = sp->stream;
2760 ssf->ssf_info.sinfo_ssn = sp->strseq;
2761 ssf->ssf_info.sinfo_flags = sp->sinfo_flags;
2762 ssf->ssf_info.sinfo_ppid = sp->ppid;
2763 ssf->ssf_info.sinfo_context = sp->context;
2764 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2765 ssf->ssf_assoc_id = sctp_get_associd(stcb);
2766 SCTP_BUF_NEXT(m_notify) = sp->data;
2767 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2768
2769 /* Steal off the mbuf */
2770 sp->data = NULL;
2771 /*
2772 * For this case, we check the actual socket buffer, since the assoc
2773 * is going away we don't want to overfill the socket buffer for a
2774 * non-reader
2775 */
2776 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2777 sctp_m_freem(m_notify);
2778 return;
2779 }
2780 /* append to socket */
2781 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2782 0, 0, 0, 0, 0, 0,
2783 m_notify);
2784 if (control == NULL) {
2785 /* no memory */
2786 sctp_m_freem(m_notify);
2787 return;
2788 }
2789 control->spec_flags = M_NOTIFICATION;
2790 sctp_add_to_readq(stcb->sctp_ep, stcb,
2791 control,
2792 &stcb->sctp_socket->so_rcv, 1);
2793}
2794
2795
2796
2797static void
2798sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
2799 uint32_t error)
2800{
2801 struct mbuf *m_notify;
2802 struct sctp_adaptation_event *sai;
2803 struct sctp_queued_to_read *control;
2804
2805 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT))
2806 /* event not enabled */
2807 return;
2808
2809 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
2810 if (m_notify == NULL)
2811 /* no space left */
2812 return;
2813 SCTP_BUF_LEN(m_notify) = 0;
2814 sai = mtod(m_notify, struct sctp_adaptation_event *);
2815 sai->sai_type = SCTP_ADAPTATION_INDICATION;
2816 sai->sai_flags = 0;
2817 sai->sai_length = sizeof(struct sctp_adaptation_event);
2818 sai->sai_adaptation_ind = error;
2819 sai->sai_assoc_id = sctp_get_associd(stcb);
2820
2821 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
2822 SCTP_BUF_NEXT(m_notify) = NULL;
2823
2824 /* append to socket */
2825 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2826 0, 0, 0, 0, 0, 0,
2827 m_notify);
2828 if (control == NULL) {
2829 /* no memory */
2830 sctp_m_freem(m_notify);
2831 return;
2832 }
2833 control->length = SCTP_BUF_LEN(m_notify);
2834 control->spec_flags = M_NOTIFICATION;
2835 /* not that we need this */
2836 control->tail_mbuf = m_notify;
2837 sctp_add_to_readq(stcb->sctp_ep, stcb,
2838 control,
2839 &stcb->sctp_socket->so_rcv, 1);
2840}
2841
2842/* This always must be called with the read-queue LOCKED in the INP */
2843void
2844sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb,
2845 uint32_t error, int nolock)
2846{
2847 struct mbuf *m_notify;
2848 struct sctp_pdapi_event *pdapi;
2849 struct sctp_queued_to_read *control;
2850 struct sockbuf *sb;
2851
2852 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT))
2853 /* event not enabled */
2854 return;
2855
2856 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
2857 if (m_notify == NULL)
2858 /* no space left */
2859 return;
2860 SCTP_BUF_LEN(m_notify) = 0;
2861 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
2862 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
2863 pdapi->pdapi_flags = 0;
2864 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
2865 pdapi->pdapi_indication = error;
2866 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
2867
2868 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
2869 SCTP_BUF_NEXT(m_notify) = NULL;
2870 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2871 0, 0, 0, 0, 0, 0,
2872 m_notify);
2873 if (control == NULL) {
2874 /* no memory */
2875 sctp_m_freem(m_notify);
2876 return;
2877 }
2878 control->spec_flags = M_NOTIFICATION;
2879 control->length = SCTP_BUF_LEN(m_notify);
2880 /* not that we need this */
2881 control->tail_mbuf = m_notify;
2882 control->held_length = 0;
2883 control->length = 0;
2884 if (nolock == 0) {
2885 SCTP_INP_READ_LOCK(stcb->sctp_ep);
2886 }
2887 sb = &stcb->sctp_socket->so_rcv;
2888#ifdef SCTP_SB_LOGGING
2889 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
2890#endif
2891 sctp_sballoc(stcb, sb, m_notify);
2892#ifdef SCTP_SB_LOGGING
2893 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
2894#endif
2895 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
2896 control->end_added = 1;
2897 if (stcb->asoc.control_pdapi)
2898 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
2899 else {
2900 /* we really should not see this case */
2901 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
2902 }
2903 if (nolock == 0) {
2904 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
2905 }
2906 if (stcb->sctp_ep && stcb->sctp_socket) {
2907 /* This should always be the case */
2908 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2909 }
2910}
2911
2912static void
2913sctp_notify_shutdown_event(struct sctp_tcb *stcb)
2914{
2915 struct mbuf *m_notify;
2916 struct sctp_shutdown_event *sse;
2917 struct sctp_queued_to_read *control;
2918
2919 /*
2920 * For TCP model AND UDP connected sockets we will send an error up
2921 * when an SHUTDOWN completes
2922 */
2923 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2924 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2925 /* mark socket closed for read/write and wakeup! */
2926 socantsendmore(stcb->sctp_socket);
2927 }
2928 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
2929 /* event not enabled */
2930 return;
2931
2932 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
2933 if (m_notify == NULL)
2934 /* no space left */
2935 return;
2936 sse = mtod(m_notify, struct sctp_shutdown_event *);
2937 sse->sse_type = SCTP_SHUTDOWN_EVENT;
2938 sse->sse_flags = 0;
2939 sse->sse_length = sizeof(struct sctp_shutdown_event);
2940 sse->sse_assoc_id = sctp_get_associd(stcb);
2941
2942 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
2943 SCTP_BUF_NEXT(m_notify) = NULL;
2944
2945 /* append to socket */
2946 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2947 0, 0, 0, 0, 0, 0,
2948 m_notify);
2949 if (control == NULL) {
2950 /* no memory */
2951 sctp_m_freem(m_notify);
2952 return;
2953 }
2954 control->spec_flags = M_NOTIFICATION;
2955 control->length = SCTP_BUF_LEN(m_notify);
2956 /* not that we need this */
2957 control->tail_mbuf = m_notify;
2958 sctp_add_to_readq(stcb->sctp_ep, stcb,
2959 control,
2960 &stcb->sctp_socket->so_rcv, 1);
2961}
2962
2963static void
2964sctp_notify_stream_reset(struct sctp_tcb *stcb,
2965 int number_entries, uint16_t * list, int flag)
2966{
2967 struct mbuf *m_notify;
2968 struct sctp_queued_to_read *control;
2969 struct sctp_stream_reset_event *strreset;
2970 int len;
2971
2972 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
2973 /* event not enabled */
2974 return;
2975
2976 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
2977 if (m_notify == NULL)
2978 /* no space left */
2979 return;
2980 SCTP_BUF_LEN(m_notify) = 0;
2981 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
2982 if (len > M_TRAILINGSPACE(m_notify)) {
2983 /* never enough room */
2984 sctp_m_freem(m_notify);
2985 return;
2986 }
2987 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
2988 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
2989 if (number_entries == 0) {
2990 strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
2991 } else {
2992 strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
2993 }
2994 strreset->strreset_length = len;
2995 strreset->strreset_assoc_id = sctp_get_associd(stcb);
2996 if (number_entries) {
2997 int i;
2998
2999 for (i = 0; i < number_entries; i++) {
3000 strreset->strreset_list[i] = ntohs(list[i]);
3001 }
3002 }
3003 SCTP_BUF_LEN(m_notify) = len;
3004 SCTP_BUF_NEXT(m_notify) = NULL;
3005 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3006 /* no space */
3007 sctp_m_freem(m_notify);
3008 return;
3009 }
3010 /* append to socket */
3011 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3012 0, 0, 0, 0, 0, 0,
3013 m_notify);
3014 if (control == NULL) {
3015 /* no memory */
3016 sctp_m_freem(m_notify);
3017 return;
3018 }
3019 control->spec_flags = M_NOTIFICATION;
3020 control->length = SCTP_BUF_LEN(m_notify);
3021 /* not that we need this */
3022 control->tail_mbuf = m_notify;
3023 sctp_add_to_readq(stcb->sctp_ep, stcb,
3024 control,
3025 &stcb->sctp_socket->so_rcv, 1);
3026}
3027
3028
3029void
3030sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3031 uint32_t error, void *data)
3032{
3033 if (stcb == NULL) {
3034 /* unlikely but */
3035 return;
3036 }
3037 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3038 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3039 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
3040 ) {
3041 /* No notifications up when we are in a no socket state */
3042 return;
3043 }
3044 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3045 /* Can't send up to a closed socket any notifications */
3046 return;
3047 }
3048 if (stcb && (stcb->asoc.assoc_up_sent == 0) && (notification != SCTP_NOTIFY_ASSOC_UP)) {
3049 if ((notification != SCTP_NOTIFY_ASSOC_DOWN) &&
3050 (notification != SCTP_NOTIFY_ASSOC_ABORTED) &&
3051 (notification != SCTP_NOTIFY_SPECIAL_SP_FAIL) &&
3052 (notification != SCTP_NOTIFY_DG_FAIL) &&
3053 (notification != SCTP_NOTIFY_PEER_SHUTDOWN)) {
3054 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, 0, NULL);
3055 stcb->asoc.assoc_up_sent = 1;
3056 }
3057 }
3058 switch (notification) {
3059 case SCTP_NOTIFY_ASSOC_UP:
3060 if (stcb->asoc.assoc_up_sent == 0) {
3061 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL);
3062 stcb->asoc.assoc_up_sent = 1;
3063 }
3064 break;
3065 case SCTP_NOTIFY_ASSOC_DOWN:
3066 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL);
3067 break;
3068 case SCTP_NOTIFY_INTERFACE_DOWN:
3069 {
3070 struct sctp_nets *net;
3071
3072 net = (struct sctp_nets *)data;
3073 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3074 (struct sockaddr *)&net->ro._l_addr, error);
3075 break;
3076 }
3077 case SCTP_NOTIFY_INTERFACE_UP:
3078 {
3079 struct sctp_nets *net;
3080
3081 net = (struct sctp_nets *)data;
3082 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3083 (struct sockaddr *)&net->ro._l_addr, error);
3084 break;
3085 }
3086 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3087 {
3088 struct sctp_nets *net;
3089
3090 net = (struct sctp_nets *)data;
3091 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3092 (struct sockaddr *)&net->ro._l_addr, error);
3093 break;
3094 }
3095 case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3096 sctp_notify_send_failed2(stcb, error,
3097 (struct sctp_stream_queue_pending *)data);
3098 break;
3099 case SCTP_NOTIFY_DG_FAIL:
3100 sctp_notify_send_failed(stcb, error,
3101 (struct sctp_tmit_chunk *)data);
3102 break;
3103 case SCTP_NOTIFY_ADAPTATION_INDICATION:
3104 /* Here the error is the adaptation indication */
3105 sctp_notify_adaptation_layer(stcb, error);
3106 break;
3107 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3108 sctp_notify_partial_delivery_indication(stcb, error, 0);
3109 break;
3110 case SCTP_NOTIFY_STRDATA_ERR:
3111 break;
3112 case SCTP_NOTIFY_ASSOC_ABORTED:
3113 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL);
3114 break;
3115 case SCTP_NOTIFY_PEER_OPENED_STREAM:
3116 break;
3117 case SCTP_NOTIFY_STREAM_OPENED_OK:
3118 break;
3119 case SCTP_NOTIFY_ASSOC_RESTART:
3120 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data);
3121 break;
3122 case SCTP_NOTIFY_HB_RESP:
3123 break;
3124 case SCTP_NOTIFY_STR_RESET_SEND:
3125 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3126 break;
3127 case SCTP_NOTIFY_STR_RESET_RECV:
3128 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3129 break;
3130 case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3131 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_INBOUND_STR));
3132 break;
3133
3134 case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3135 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_INBOUND_STR));
3136 break;
3137
3138 case SCTP_NOTIFY_ASCONF_ADD_IP:
3139 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3140 error);
3141 break;
3142 case SCTP_NOTIFY_ASCONF_DELETE_IP:
3143 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3144 error);
3145 break;
3146 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3147 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3148 error);
3149 break;
3150 case SCTP_NOTIFY_ASCONF_SUCCESS:
3151 break;
3152 case SCTP_NOTIFY_ASCONF_FAILED:
3153 break;
3154 case SCTP_NOTIFY_PEER_SHUTDOWN:
3155 sctp_notify_shutdown_event(stcb);
3156 break;
3157 case SCTP_NOTIFY_AUTH_NEW_KEY:
3158 sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3159 (uint16_t) (uintptr_t) data);
3160 break;
3161#if 0
3162 case SCTP_NOTIFY_AUTH_KEY_CONFLICT:
3163 sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT,
3164 error, (uint16_t) (uintptr_t) data);
3165 break;
3166#endif /* not yet? remove? */
3167
3168
3169 default:
3170#ifdef SCTP_DEBUG
3171 if (sctp_debug_on & SCTP_DEBUG_UTIL1) {
3172 printf("NOTIFY: unknown notification %xh (%u)\n",
3173 notification, notification);
3174 }
3175#endif /* SCTP_DEBUG */
3176 break;
3177 } /* end switch */
3178}
3179
3180void
3181sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock)
3182{
3183 struct sctp_association *asoc;
3184 struct sctp_stream_out *outs;
3185 struct sctp_tmit_chunk *chk;
3186 struct sctp_stream_queue_pending *sp;
3187 int i;
3188
3189 asoc = &stcb->asoc;
3190
3191 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3192 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3193 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3194 return;
3195 }
3196 /* now through all the gunk freeing chunks */
3197 if (holds_lock == 0)
3198 SCTP_TCB_SEND_LOCK(stcb);
3199 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3200 /* For each stream */
3201 outs = &stcb->asoc.strmout[i];
3202 /* clean up any sends there */
3203 stcb->asoc.locked_on_sending = NULL;
3204 sp = TAILQ_FIRST(&outs->outqueue);
3205 while (sp) {
3206 stcb->asoc.stream_queue_cnt--;
3207 TAILQ_REMOVE(&outs->outqueue, sp, next);
3208 sctp_free_spbufspace(stcb, asoc, sp);
3209 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3210 SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp);
3211 if (sp->data) {
3212 sctp_m_freem(sp->data);
3213 sp->data = NULL;
3214 }
3215 if (sp->net)
3216 sctp_free_remote_addr(sp->net);
3217 sp->net = NULL;
3218 /* Free the chunk */
3219 sctp_free_a_strmoq(stcb, sp);
3220 sp = TAILQ_FIRST(&outs->outqueue);
3221 }
3222 }
3223
3224 /* pending send queue SHOULD be empty */
3225 if (!TAILQ_EMPTY(&asoc->send_queue)) {
3226 chk = TAILQ_FIRST(&asoc->send_queue);
3227 while (chk) {
3228 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3229 asoc->send_queue_cnt--;
3230 if (chk->data) {
3231 /*
3232 * trim off the sctp chunk header(it should
3233 * be there)
3234 */
3235 if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3236 m_adj(chk->data, sizeof(struct sctp_data_chunk));
3237 sctp_mbuf_crush(chk->data);
3238 }
3239 }
3240 sctp_free_bufspace(stcb, asoc, chk, 1);
3241 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
3242 if (chk->data) {
3243 sctp_m_freem(chk->data);
3244 chk->data = NULL;
3245 }
3246 if (chk->whoTo)
3247 sctp_free_remote_addr(chk->whoTo);
3248 chk->whoTo = NULL;
3249 sctp_free_a_chunk(stcb, chk);
3250 chk = TAILQ_FIRST(&asoc->send_queue);
3251 }
3252 }
3253 /* sent queue SHOULD be empty */
3254 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3255 chk = TAILQ_FIRST(&asoc->sent_queue);
3256 while (chk) {
3257 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3258 asoc->sent_queue_cnt--;
3259 if (chk->data) {
3260 /*
3261 * trim off the sctp chunk header(it should
3262 * be there)
3263 */
3264 if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3265 m_adj(chk->data, sizeof(struct sctp_data_chunk));
3266 sctp_mbuf_crush(chk->data);
3267 }
3268 }
3269 sctp_free_bufspace(stcb, asoc, chk, 1);
3270 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3271 SCTP_NOTIFY_DATAGRAM_SENT, chk);
3272 if (chk->data) {
3273 sctp_m_freem(chk->data);
3274 chk->data = NULL;
3275 }
3276 if (chk->whoTo)
3277 sctp_free_remote_addr(chk->whoTo);
3278 chk->whoTo = NULL;
3279 sctp_free_a_chunk(stcb, chk);
3280 chk = TAILQ_FIRST(&asoc->sent_queue);
3281 }
3282 }
3283 if (holds_lock == 0)
3284 SCTP_TCB_SEND_UNLOCK(stcb);
3285}
3286
3287void
3288sctp_abort_notification(struct sctp_tcb *stcb, int error)
3289{
3290
3291 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3292 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3293 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3294 return;
3295 }
3296 /* Tell them we lost the asoc */
3297 sctp_report_all_outbound(stcb, 1);
3298 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3299 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3300 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3301 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3302 }
3303 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL);
3304}
3305
3306void
3307sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3308 struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err)
3309{
3310 uint32_t vtag;
3311
3312 vtag = 0;
3313 if (stcb != NULL) {
3314 /* We have a TCB to abort, send notification too */
3315 vtag = stcb->asoc.peer_vtag;
3316 sctp_abort_notification(stcb, 0);
3317 }
3318 sctp_send_abort(m, iphlen, sh, vtag, op_err);
3319 if (stcb != NULL) {
3320 /* Ok, now lets free it */
3321 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3322 } else {
3323 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3324 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3325 sctp_inpcb_free(inp, 1, 0);
3326 }
3327 }
3328 }
3329}
3330
3331void
3332sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3333 int error, struct mbuf *op_err)
3334{
3335 uint32_t vtag;
3336
3337 if (stcb == NULL) {
3338 /* Got to have a TCB */
3339 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3340 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3341 sctp_inpcb_free(inp, 1, 0);
3342 }
3343 }
3344 return;
3345 }
3346 vtag = stcb->asoc.peer_vtag;
3347 /* notify the ulp */
3348 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3349 sctp_abort_notification(stcb, error);
3350 /* notify the peer */
3351 sctp_send_abort_tcb(stcb, op_err);
3352 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3353 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3354 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3355 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3356 }
3357 /* now free the asoc */
3358 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3359}
3360
3361void
3362sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3363 struct sctp_inpcb *inp, struct mbuf *op_err)
3364{
3365 struct sctp_chunkhdr *ch, chunk_buf;
3366 unsigned int chk_length;
3367
3368 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3369 /* Generate a TO address for future reference */
3370 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3371 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3372 sctp_inpcb_free(inp, 1, 0);
3373 }
3374 }
3375 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3376 sizeof(*ch), (uint8_t *) & chunk_buf);
3377 while (ch != NULL) {
3378 chk_length = ntohs(ch->chunk_length);
3379 if (chk_length < sizeof(*ch)) {
3380 /* break to abort land */
3381 break;
3382 }
3383 switch (ch->chunk_type) {
3384 case SCTP_PACKET_DROPPED:
3385 /* we don't respond to pkt-dropped */
3386 return;
3387 case SCTP_ABORT_ASSOCIATION:
3388 /* we don't respond with an ABORT to an ABORT */
3389 return;
3390 case SCTP_SHUTDOWN_COMPLETE:
3391 /*
3392 * we ignore it since we are not waiting for it and
3393 * peer is gone
3394 */
3395 return;
3396 case SCTP_SHUTDOWN_ACK:
3397 sctp_send_shutdown_complete2(m, iphlen, sh);
3398 return;
3399 default:
3400 break;
3401 }
3402 offset += SCTP_SIZE32(chk_length);
3403 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3404 sizeof(*ch), (uint8_t *) & chunk_buf);
3405 }
3406 sctp_send_abort(m, iphlen, sh, 0, op_err);
3407}
3408
3409/*
3410 * check the inbound datagram to make sure there is not an abort inside it,
3411 * if there is return 1, else return 0.
3412 */
3413int
3414sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3415{
3416 struct sctp_chunkhdr *ch;
3417 struct sctp_init_chunk *init_chk, chunk_buf;
3418 int offset;
3419 unsigned int chk_length;
3420
3421 offset = iphlen + sizeof(struct sctphdr);
3422 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
3423 (uint8_t *) & chunk_buf);
3424 while (ch != NULL) {
3425 chk_length = ntohs(ch->chunk_length);
3426 if (chk_length < sizeof(*ch)) {
3427 /* packet is probably corrupt */
3428 break;
3429 }
3430 /* we seem to be ok, is it an abort? */
3431 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
3432 /* yep, tell them */
3433 return (1);
3434 }
3435 if (ch->chunk_type == SCTP_INITIATION) {
3436 /* need to update the Vtag */
3437 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
3438 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
3439 if (init_chk != NULL) {
3440 *vtagfill = ntohl(init_chk->init.initiate_tag);
3441 }
3442 }
3443 /* Nope, move to the next chunk */
3444 offset += SCTP_SIZE32(chk_length);
3445 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3446 sizeof(*ch), (uint8_t *) & chunk_buf);
3447 }
3448 return (0);
3449}
3450
3451/*
3452 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
3453 * set (i.e. it's 0) so, create this function to compare link local scopes
3454 */
3455uint32_t
3456sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
3457{
3458 struct sockaddr_in6 a, b;
3459
3460 /* save copies */
3461 a = *addr1;
3462 b = *addr2;
3463
3464 if (a.sin6_scope_id == 0)
3465 if (sa6_recoverscope(&a)) {
3466 /* can't get scope, so can't match */
3467 return (0);
3468 }
3469 if (b.sin6_scope_id == 0)
3470 if (sa6_recoverscope(&b)) {
3471 /* can't get scope, so can't match */
3472 return (0);
3473 }
3474 if (a.sin6_scope_id != b.sin6_scope_id)
3475 return (0);
3476
3477 return (1);
3478}
3479
3480/*
3481 * returns a sockaddr_in6 with embedded scope recovered and removed
3482 */
3483struct sockaddr_in6 *
3484sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
3485{
3486 /* check and strip embedded scope junk */
3487 if (addr->sin6_family == AF_INET6) {
3488 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
3489 if (addr->sin6_scope_id == 0) {
3490 *store = *addr;
3491 if (!sa6_recoverscope(store)) {
3492 /* use the recovered scope */
3493 addr = store;
3494 }
3495 } else {
3496 /* else, return the original "to" addr */
3497 in6_clearscope(&addr->sin6_addr);
3498 }
3499 }
3500 }
3501 return (addr);
3502}
3503
3504/*
3505 * are the two addresses the same? currently a "scopeless" check returns: 1
3506 * if same, 0 if not
3507 */
3508__inline int
3509sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
3510{
3511
3512 /* must be valid */
3513 if (sa1 == NULL || sa2 == NULL)
3514 return (0);
3515
3516 /* must be the same family */
3517 if (sa1->sa_family != sa2->sa_family)
3518 return (0);
3519
3520 if (sa1->sa_family == AF_INET6) {
3521 /* IPv6 addresses */
3522 struct sockaddr_in6 *sin6_1, *sin6_2;
3523
3524 sin6_1 = (struct sockaddr_in6 *)sa1;
3525 sin6_2 = (struct sockaddr_in6 *)sa2;
3526 return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
3527 &sin6_2->sin6_addr));
3528 } else if (sa1->sa_family == AF_INET) {
3529 /* IPv4 addresses */
3530 struct sockaddr_in *sin_1, *sin_2;
3531
3532 sin_1 = (struct sockaddr_in *)sa1;
3533 sin_2 = (struct sockaddr_in *)sa2;
3534 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
3535 } else {
3536 /* we don't do these... */
3537 return (0);
3538 }
3539}
3540
3541void
3542sctp_print_address(struct sockaddr *sa)
3543{
3544
3545 if (sa->sa_family == AF_INET6) {
3546 struct sockaddr_in6 *sin6;
3547 char ip6buf[INET6_ADDRSTRLEN];
3548
3549 sin6 = (struct sockaddr_in6 *)sa;
3550 printf("IPv6 address: %s:%d scope:%u\n",
3551 ip6_sprintf(ip6buf, &sin6->sin6_addr),
3552 ntohs(sin6->sin6_port),
3553 sin6->sin6_scope_id);
3554 } else if (sa->sa_family == AF_INET) {
3555 struct sockaddr_in *sin;
3556 unsigned char *p;
3557
3558 sin = (struct sockaddr_in *)sa;
3559 p = (unsigned char *)&sin->sin_addr;
3560 printf("IPv4 address: %u.%u.%u.%u:%d\n",
3561 p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
3562 } else {
3563 printf("?\n");
3564 }
3565}
3566
3567void
3568sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
3569{
3570 if (iph->ip_v == IPVERSION) {
3571 struct sockaddr_in lsa, fsa;
3572
3573 bzero(&lsa, sizeof(lsa));
3574 lsa.sin_len = sizeof(lsa);
3575 lsa.sin_family = AF_INET;
3576 lsa.sin_addr = iph->ip_src;
3577 lsa.sin_port = sh->src_port;
3578 bzero(&fsa, sizeof(fsa));
3579 fsa.sin_len = sizeof(fsa);
3580 fsa.sin_family = AF_INET;
3581 fsa.sin_addr = iph->ip_dst;
3582 fsa.sin_port = sh->dest_port;
3583 printf("src: ");
3584 sctp_print_address((struct sockaddr *)&lsa);
3585 printf("dest: ");
3586 sctp_print_address((struct sockaddr *)&fsa);
3587 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
3588 struct ip6_hdr *ip6;
3589 struct sockaddr_in6 lsa6, fsa6;
3590
3591 ip6 = (struct ip6_hdr *)iph;
3592 bzero(&lsa6, sizeof(lsa6));
3593 lsa6.sin6_len = sizeof(lsa6);
3594 lsa6.sin6_family = AF_INET6;
3595 lsa6.sin6_addr = ip6->ip6_src;
3596 lsa6.sin6_port = sh->src_port;
3597 bzero(&fsa6, sizeof(fsa6));
3598 fsa6.sin6_len = sizeof(fsa6);
3599 fsa6.sin6_family = AF_INET6;
3600 fsa6.sin6_addr = ip6->ip6_dst;
3601 fsa6.sin6_port = sh->dest_port;
3602 printf("src: ");
3603 sctp_print_address((struct sockaddr *)&lsa6);
3604 printf("dest: ");
3605 sctp_print_address((struct sockaddr *)&fsa6);
3606 }
3607}
3608
3609void
3610sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
3611 struct sctp_inpcb *new_inp,
3612 struct sctp_tcb *stcb)
3613{
3614 /*
3615 * go through our old INP and pull off any control structures that
3616 * belong to stcb and move then to the new inp.
3617 */
3618 struct socket *old_so, *new_so;
3619 struct sctp_queued_to_read *control, *nctl;
3620 struct sctp_readhead tmp_queue;
3621 struct mbuf *m;
3622 int error;
3623
3624 old_so = old_inp->sctp_socket;
3625 new_so = new_inp->sctp_socket;
3626 TAILQ_INIT(&tmp_queue);
3627
3628 SOCKBUF_LOCK(&(old_so->so_rcv));
3629
3630 error = sblock(&old_so->so_rcv, 0);
3631
3632 SOCKBUF_UNLOCK(&(old_so->so_rcv));
3633 if (error) {
3634 /*
3635 * Gak, can't get sblock, we have a problem. data will be
3636 * left stranded.. and we don't dare look at it since the
3637 * other thread may be reading something. Oh well, its a
3638 * screwed up app that does a peeloff OR a accept while
3639 * reading from the main socket... actually its only the
3640 * peeloff() case, since I think read will fail on a
3641 * listening socket..
3642 */
3643 return;
3644 }
3645 /* lock the socket buffers */
3646 SCTP_INP_READ_LOCK(old_inp);
3647 control = TAILQ_FIRST(&old_inp->read_queue);
3648 /* Pull off all for out target stcb */
3649 while (control) {
3650 nctl = TAILQ_NEXT(control, next);
3651 if (control->stcb == stcb) {
3652 /* remove it we want it */
3653 TAILQ_REMOVE(&old_inp->read_queue, control, next);
3654 TAILQ_INSERT_TAIL(&tmp_queue, control, next);
3655 m = control->data;
3656 while (m) {
3657#ifdef SCTP_SB_LOGGING
3658 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
3659#endif
3660 sctp_sbfree(control, stcb, &old_so->so_rcv, m);
3661#ifdef SCTP_SB_LOGGING
3662 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3663#endif
3664 m = SCTP_BUF_NEXT(m);
3665 }
3666 }
3667 control = nctl;
3668 }
3669 SCTP_INP_READ_UNLOCK(old_inp);
3670
3671 /* Remove the sb-lock on the old socket */
3672 SOCKBUF_LOCK(&(old_so->so_rcv));
3673
3674 sbunlock(&old_so->so_rcv);
3675 SOCKBUF_UNLOCK(&(old_so->so_rcv));
3676
3677 /* Now we move them over to the new socket buffer */
3678 control = TAILQ_FIRST(&tmp_queue);
3679 SCTP_INP_READ_LOCK(new_inp);
3680 while (control) {
3681 nctl = TAILQ_NEXT(control, next);
3682 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
3683 m = control->data;
3684 while (m) {
3685#ifdef SCTP_SB_LOGGING
3686 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
3687#endif
3688 sctp_sballoc(stcb, &new_so->so_rcv, m);
3689#ifdef SCTP_SB_LOGGING
3690 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3691#endif
3692 m = SCTP_BUF_NEXT(m);
3693 }
3694 control = nctl;
3695 }
3696 SCTP_INP_READ_UNLOCK(new_inp);
3697}
3698
3699
3700void
3701sctp_add_to_readq(struct sctp_inpcb *inp,
3702 struct sctp_tcb *stcb,
3703 struct sctp_queued_to_read *control,
3704 struct sockbuf *sb,
3705 int end)
3706{
3707 /*
3708 * Here we must place the control on the end of the socket read
3709 * queue AND increment sb_cc so that select will work properly on
3710 * read.
3711 */
3712 struct mbuf *m, *prev = NULL;
3713
3714 if (inp == NULL) {
3715 /* Gak, TSNH!! */
3716#ifdef INVARIANTS
3717 panic("Gak, inp NULL on add_to_readq");
3718#endif
3719 return;
3720 }
3721 SCTP_INP_READ_LOCK(inp);
2811 }
2812 }
2813 spc->spc_state = state;
2814 spc->spc_error = error;
2815 spc->spc_assoc_id = sctp_get_associd(stcb);
2816
2817 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2818 SCTP_BUF_NEXT(m_notify) = NULL;
2819
2820 /* append to socket */
2821 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2822 0, 0, 0, 0, 0, 0,
2823 m_notify);
2824 if (control == NULL) {
2825 /* no memory */
2826 sctp_m_freem(m_notify);
2827 return;
2828 }
2829 control->length = SCTP_BUF_LEN(m_notify);
2830 control->spec_flags = M_NOTIFICATION;
2831 /* not that we need this */
2832 control->tail_mbuf = m_notify;
2833 sctp_add_to_readq(stcb->sctp_ep, stcb,
2834 control,
2835 &stcb->sctp_socket->so_rcv, 1);
2836}
2837
2838
2839static void
2840sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2841 struct sctp_tmit_chunk *chk)
2842{
2843 struct mbuf *m_notify;
2844 struct sctp_send_failed *ssf;
2845 struct sctp_queued_to_read *control;
2846 int length;
2847
2848 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
2849 /* event not enabled */
2850 return;
2851
2852 length = sizeof(struct sctp_send_failed) + chk->send_size;
2853 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2854 if (m_notify == NULL)
2855 /* no space left */
2856 return;
2857 SCTP_BUF_LEN(m_notify) = 0;
2858 ssf = mtod(m_notify, struct sctp_send_failed *);
2859 ssf->ssf_type = SCTP_SEND_FAILED;
2860 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2861 ssf->ssf_flags = SCTP_DATA_UNSENT;
2862 else
2863 ssf->ssf_flags = SCTP_DATA_SENT;
2864 ssf->ssf_length = length;
2865 ssf->ssf_error = error;
2866 /* not exactly what the user sent in, but should be close :) */
2867 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2868 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2869 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2870 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2871 ssf->ssf_info.sinfo_context = chk->rec.data.context;
2872 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2873 ssf->ssf_assoc_id = sctp_get_associd(stcb);
2874 SCTP_BUF_NEXT(m_notify) = chk->data;
2875 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2876
2877 /* Steal off the mbuf */
2878 chk->data = NULL;
2879 /*
2880 * For this case, we check the actual socket buffer, since the assoc
2881 * is going away we don't want to overfill the socket buffer for a
2882 * non-reader
2883 */
2884 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2885 sctp_m_freem(m_notify);
2886 return;
2887 }
2888 /* append to socket */
2889 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2890 0, 0, 0, 0, 0, 0,
2891 m_notify);
2892 if (control == NULL) {
2893 /* no memory */
2894 sctp_m_freem(m_notify);
2895 return;
2896 }
2897 control->spec_flags = M_NOTIFICATION;
2898 sctp_add_to_readq(stcb->sctp_ep, stcb,
2899 control,
2900 &stcb->sctp_socket->so_rcv, 1);
2901}
2902
2903
2904static void
2905sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2906 struct sctp_stream_queue_pending *sp)
2907{
2908 struct mbuf *m_notify;
2909 struct sctp_send_failed *ssf;
2910 struct sctp_queued_to_read *control;
2911 int length;
2912
2913 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
2914 /* event not enabled */
2915 return;
2916
2917 length = sizeof(struct sctp_send_failed) + sp->length;
2918 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
2919 if (m_notify == NULL)
2920 /* no space left */
2921 return;
2922 SCTP_BUF_LEN(m_notify) = 0;
2923 ssf = mtod(m_notify, struct sctp_send_failed *);
2924 ssf->ssf_type = SCTP_SEND_FAILED;
2925 if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2926 ssf->ssf_flags = SCTP_DATA_UNSENT;
2927 else
2928 ssf->ssf_flags = SCTP_DATA_SENT;
2929 ssf->ssf_length = length;
2930 ssf->ssf_error = error;
2931 /* not exactly what the user sent in, but should be close :) */
2932 ssf->ssf_info.sinfo_stream = sp->stream;
2933 ssf->ssf_info.sinfo_ssn = sp->strseq;
2934 ssf->ssf_info.sinfo_flags = sp->sinfo_flags;
2935 ssf->ssf_info.sinfo_ppid = sp->ppid;
2936 ssf->ssf_info.sinfo_context = sp->context;
2937 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2938 ssf->ssf_assoc_id = sctp_get_associd(stcb);
2939 SCTP_BUF_NEXT(m_notify) = sp->data;
2940 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2941
2942 /* Steal off the mbuf */
2943 sp->data = NULL;
2944 /*
2945 * For this case, we check the actual socket buffer, since the assoc
2946 * is going away we don't want to overfill the socket buffer for a
2947 * non-reader
2948 */
2949 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2950 sctp_m_freem(m_notify);
2951 return;
2952 }
2953 /* append to socket */
2954 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2955 0, 0, 0, 0, 0, 0,
2956 m_notify);
2957 if (control == NULL) {
2958 /* no memory */
2959 sctp_m_freem(m_notify);
2960 return;
2961 }
2962 control->spec_flags = M_NOTIFICATION;
2963 sctp_add_to_readq(stcb->sctp_ep, stcb,
2964 control,
2965 &stcb->sctp_socket->so_rcv, 1);
2966}
2967
2968
2969
2970static void
2971sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
2972 uint32_t error)
2973{
2974 struct mbuf *m_notify;
2975 struct sctp_adaptation_event *sai;
2976 struct sctp_queued_to_read *control;
2977
2978 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT))
2979 /* event not enabled */
2980 return;
2981
2982 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
2983 if (m_notify == NULL)
2984 /* no space left */
2985 return;
2986 SCTP_BUF_LEN(m_notify) = 0;
2987 sai = mtod(m_notify, struct sctp_adaptation_event *);
2988 sai->sai_type = SCTP_ADAPTATION_INDICATION;
2989 sai->sai_flags = 0;
2990 sai->sai_length = sizeof(struct sctp_adaptation_event);
2991 sai->sai_adaptation_ind = error;
2992 sai->sai_assoc_id = sctp_get_associd(stcb);
2993
2994 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
2995 SCTP_BUF_NEXT(m_notify) = NULL;
2996
2997 /* append to socket */
2998 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2999 0, 0, 0, 0, 0, 0,
3000 m_notify);
3001 if (control == NULL) {
3002 /* no memory */
3003 sctp_m_freem(m_notify);
3004 return;
3005 }
3006 control->length = SCTP_BUF_LEN(m_notify);
3007 control->spec_flags = M_NOTIFICATION;
3008 /* not that we need this */
3009 control->tail_mbuf = m_notify;
3010 sctp_add_to_readq(stcb->sctp_ep, stcb,
3011 control,
3012 &stcb->sctp_socket->so_rcv, 1);
3013}
3014
3015/* This always must be called with the read-queue LOCKED in the INP */
3016void
3017sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb,
3018 uint32_t error, int nolock)
3019{
3020 struct mbuf *m_notify;
3021 struct sctp_pdapi_event *pdapi;
3022 struct sctp_queued_to_read *control;
3023 struct sockbuf *sb;
3024
3025 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT))
3026 /* event not enabled */
3027 return;
3028
3029 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3030 if (m_notify == NULL)
3031 /* no space left */
3032 return;
3033 SCTP_BUF_LEN(m_notify) = 0;
3034 pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3035 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3036 pdapi->pdapi_flags = 0;
3037 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3038 pdapi->pdapi_indication = error;
3039 pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3040
3041 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3042 SCTP_BUF_NEXT(m_notify) = NULL;
3043 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3044 0, 0, 0, 0, 0, 0,
3045 m_notify);
3046 if (control == NULL) {
3047 /* no memory */
3048 sctp_m_freem(m_notify);
3049 return;
3050 }
3051 control->spec_flags = M_NOTIFICATION;
3052 control->length = SCTP_BUF_LEN(m_notify);
3053 /* not that we need this */
3054 control->tail_mbuf = m_notify;
3055 control->held_length = 0;
3056 control->length = 0;
3057 if (nolock == 0) {
3058 SCTP_INP_READ_LOCK(stcb->sctp_ep);
3059 }
3060 sb = &stcb->sctp_socket->so_rcv;
3061#ifdef SCTP_SB_LOGGING
3062 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3063#endif
3064 sctp_sballoc(stcb, sb, m_notify);
3065#ifdef SCTP_SB_LOGGING
3066 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3067#endif
3068 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3069 control->end_added = 1;
3070 if (stcb->asoc.control_pdapi)
3071 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3072 else {
3073 /* we really should not see this case */
3074 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3075 }
3076 if (nolock == 0) {
3077 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
3078 }
3079 if (stcb->sctp_ep && stcb->sctp_socket) {
3080 /* This should always be the case */
3081 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3082 }
3083}
3084
3085static void
3086sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3087{
3088 struct mbuf *m_notify;
3089 struct sctp_shutdown_event *sse;
3090 struct sctp_queued_to_read *control;
3091
3092 /*
3093 * For TCP model AND UDP connected sockets we will send an error up
3094 * when an SHUTDOWN completes
3095 */
3096 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3097 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3098 /* mark socket closed for read/write and wakeup! */
3099 socantsendmore(stcb->sctp_socket);
3100 }
3101 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
3102 /* event not enabled */
3103 return;
3104
3105 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3106 if (m_notify == NULL)
3107 /* no space left */
3108 return;
3109 sse = mtod(m_notify, struct sctp_shutdown_event *);
3110 sse->sse_type = SCTP_SHUTDOWN_EVENT;
3111 sse->sse_flags = 0;
3112 sse->sse_length = sizeof(struct sctp_shutdown_event);
3113 sse->sse_assoc_id = sctp_get_associd(stcb);
3114
3115 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3116 SCTP_BUF_NEXT(m_notify) = NULL;
3117
3118 /* append to socket */
3119 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3120 0, 0, 0, 0, 0, 0,
3121 m_notify);
3122 if (control == NULL) {
3123 /* no memory */
3124 sctp_m_freem(m_notify);
3125 return;
3126 }
3127 control->spec_flags = M_NOTIFICATION;
3128 control->length = SCTP_BUF_LEN(m_notify);
3129 /* not that we need this */
3130 control->tail_mbuf = m_notify;
3131 sctp_add_to_readq(stcb->sctp_ep, stcb,
3132 control,
3133 &stcb->sctp_socket->so_rcv, 1);
3134}
3135
3136static void
3137sctp_notify_stream_reset(struct sctp_tcb *stcb,
3138 int number_entries, uint16_t * list, int flag)
3139{
3140 struct mbuf *m_notify;
3141 struct sctp_queued_to_read *control;
3142 struct sctp_stream_reset_event *strreset;
3143 int len;
3144
3145 if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
3146 /* event not enabled */
3147 return;
3148
3149 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3150 if (m_notify == NULL)
3151 /* no space left */
3152 return;
3153 SCTP_BUF_LEN(m_notify) = 0;
3154 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3155 if (len > M_TRAILINGSPACE(m_notify)) {
3156 /* never enough room */
3157 sctp_m_freem(m_notify);
3158 return;
3159 }
3160 strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3161 strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3162 if (number_entries == 0) {
3163 strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3164 } else {
3165 strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3166 }
3167 strreset->strreset_length = len;
3168 strreset->strreset_assoc_id = sctp_get_associd(stcb);
3169 if (number_entries) {
3170 int i;
3171
3172 for (i = 0; i < number_entries; i++) {
3173 strreset->strreset_list[i] = ntohs(list[i]);
3174 }
3175 }
3176 SCTP_BUF_LEN(m_notify) = len;
3177 SCTP_BUF_NEXT(m_notify) = NULL;
3178 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3179 /* no space */
3180 sctp_m_freem(m_notify);
3181 return;
3182 }
3183 /* append to socket */
3184 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3185 0, 0, 0, 0, 0, 0,
3186 m_notify);
3187 if (control == NULL) {
3188 /* no memory */
3189 sctp_m_freem(m_notify);
3190 return;
3191 }
3192 control->spec_flags = M_NOTIFICATION;
3193 control->length = SCTP_BUF_LEN(m_notify);
3194 /* not that we need this */
3195 control->tail_mbuf = m_notify;
3196 sctp_add_to_readq(stcb->sctp_ep, stcb,
3197 control,
3198 &stcb->sctp_socket->so_rcv, 1);
3199}
3200
3201
3202void
3203sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3204 uint32_t error, void *data)
3205{
3206 if (stcb == NULL) {
3207 /* unlikely but */
3208 return;
3209 }
3210 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3211 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3212 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
3213 ) {
3214 /* No notifications up when we are in a no socket state */
3215 return;
3216 }
3217 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3218 /* Can't send up to a closed socket any notifications */
3219 return;
3220 }
3221 if (stcb && (stcb->asoc.assoc_up_sent == 0) && (notification != SCTP_NOTIFY_ASSOC_UP)) {
3222 if ((notification != SCTP_NOTIFY_ASSOC_DOWN) &&
3223 (notification != SCTP_NOTIFY_ASSOC_ABORTED) &&
3224 (notification != SCTP_NOTIFY_SPECIAL_SP_FAIL) &&
3225 (notification != SCTP_NOTIFY_DG_FAIL) &&
3226 (notification != SCTP_NOTIFY_PEER_SHUTDOWN)) {
3227 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, 0, NULL);
3228 stcb->asoc.assoc_up_sent = 1;
3229 }
3230 }
3231 switch (notification) {
3232 case SCTP_NOTIFY_ASSOC_UP:
3233 if (stcb->asoc.assoc_up_sent == 0) {
3234 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL);
3235 stcb->asoc.assoc_up_sent = 1;
3236 }
3237 break;
3238 case SCTP_NOTIFY_ASSOC_DOWN:
3239 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL);
3240 break;
3241 case SCTP_NOTIFY_INTERFACE_DOWN:
3242 {
3243 struct sctp_nets *net;
3244
3245 net = (struct sctp_nets *)data;
3246 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3247 (struct sockaddr *)&net->ro._l_addr, error);
3248 break;
3249 }
3250 case SCTP_NOTIFY_INTERFACE_UP:
3251 {
3252 struct sctp_nets *net;
3253
3254 net = (struct sctp_nets *)data;
3255 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3256 (struct sockaddr *)&net->ro._l_addr, error);
3257 break;
3258 }
3259 case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3260 {
3261 struct sctp_nets *net;
3262
3263 net = (struct sctp_nets *)data;
3264 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3265 (struct sockaddr *)&net->ro._l_addr, error);
3266 break;
3267 }
3268 case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3269 sctp_notify_send_failed2(stcb, error,
3270 (struct sctp_stream_queue_pending *)data);
3271 break;
3272 case SCTP_NOTIFY_DG_FAIL:
3273 sctp_notify_send_failed(stcb, error,
3274 (struct sctp_tmit_chunk *)data);
3275 break;
3276 case SCTP_NOTIFY_ADAPTATION_INDICATION:
3277 /* Here the error is the adaptation indication */
3278 sctp_notify_adaptation_layer(stcb, error);
3279 break;
3280 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3281 sctp_notify_partial_delivery_indication(stcb, error, 0);
3282 break;
3283 case SCTP_NOTIFY_STRDATA_ERR:
3284 break;
3285 case SCTP_NOTIFY_ASSOC_ABORTED:
3286 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL);
3287 break;
3288 case SCTP_NOTIFY_PEER_OPENED_STREAM:
3289 break;
3290 case SCTP_NOTIFY_STREAM_OPENED_OK:
3291 break;
3292 case SCTP_NOTIFY_ASSOC_RESTART:
3293 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data);
3294 break;
3295 case SCTP_NOTIFY_HB_RESP:
3296 break;
3297 case SCTP_NOTIFY_STR_RESET_SEND:
3298 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3299 break;
3300 case SCTP_NOTIFY_STR_RESET_RECV:
3301 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3302 break;
3303 case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3304 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_INBOUND_STR));
3305 break;
3306
3307 case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3308 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_INBOUND_STR));
3309 break;
3310
3311 case SCTP_NOTIFY_ASCONF_ADD_IP:
3312 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3313 error);
3314 break;
3315 case SCTP_NOTIFY_ASCONF_DELETE_IP:
3316 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3317 error);
3318 break;
3319 case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3320 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3321 error);
3322 break;
3323 case SCTP_NOTIFY_ASCONF_SUCCESS:
3324 break;
3325 case SCTP_NOTIFY_ASCONF_FAILED:
3326 break;
3327 case SCTP_NOTIFY_PEER_SHUTDOWN:
3328 sctp_notify_shutdown_event(stcb);
3329 break;
3330 case SCTP_NOTIFY_AUTH_NEW_KEY:
3331 sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3332 (uint16_t) (uintptr_t) data);
3333 break;
3334#if 0
3335 case SCTP_NOTIFY_AUTH_KEY_CONFLICT:
3336 sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT,
3337 error, (uint16_t) (uintptr_t) data);
3338 break;
3339#endif /* not yet? remove? */
3340
3341
3342 default:
3343#ifdef SCTP_DEBUG
3344 if (sctp_debug_on & SCTP_DEBUG_UTIL1) {
3345 printf("NOTIFY: unknown notification %xh (%u)\n",
3346 notification, notification);
3347 }
3348#endif /* SCTP_DEBUG */
3349 break;
3350 } /* end switch */
3351}
3352
3353void
3354sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock)
3355{
3356 struct sctp_association *asoc;
3357 struct sctp_stream_out *outs;
3358 struct sctp_tmit_chunk *chk;
3359 struct sctp_stream_queue_pending *sp;
3360 int i;
3361
3362 asoc = &stcb->asoc;
3363
3364 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3365 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3366 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3367 return;
3368 }
3369 /* now through all the gunk freeing chunks */
3370 if (holds_lock == 0)
3371 SCTP_TCB_SEND_LOCK(stcb);
3372 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3373 /* For each stream */
3374 outs = &stcb->asoc.strmout[i];
3375 /* clean up any sends there */
3376 stcb->asoc.locked_on_sending = NULL;
3377 sp = TAILQ_FIRST(&outs->outqueue);
3378 while (sp) {
3379 stcb->asoc.stream_queue_cnt--;
3380 TAILQ_REMOVE(&outs->outqueue, sp, next);
3381 sctp_free_spbufspace(stcb, asoc, sp);
3382 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3383 SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp);
3384 if (sp->data) {
3385 sctp_m_freem(sp->data);
3386 sp->data = NULL;
3387 }
3388 if (sp->net)
3389 sctp_free_remote_addr(sp->net);
3390 sp->net = NULL;
3391 /* Free the chunk */
3392 sctp_free_a_strmoq(stcb, sp);
3393 sp = TAILQ_FIRST(&outs->outqueue);
3394 }
3395 }
3396
3397 /* pending send queue SHOULD be empty */
3398 if (!TAILQ_EMPTY(&asoc->send_queue)) {
3399 chk = TAILQ_FIRST(&asoc->send_queue);
3400 while (chk) {
3401 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3402 asoc->send_queue_cnt--;
3403 if (chk->data) {
3404 /*
3405 * trim off the sctp chunk header(it should
3406 * be there)
3407 */
3408 if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3409 m_adj(chk->data, sizeof(struct sctp_data_chunk));
3410 sctp_mbuf_crush(chk->data);
3411 }
3412 }
3413 sctp_free_bufspace(stcb, asoc, chk, 1);
3414 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk);
3415 if (chk->data) {
3416 sctp_m_freem(chk->data);
3417 chk->data = NULL;
3418 }
3419 if (chk->whoTo)
3420 sctp_free_remote_addr(chk->whoTo);
3421 chk->whoTo = NULL;
3422 sctp_free_a_chunk(stcb, chk);
3423 chk = TAILQ_FIRST(&asoc->send_queue);
3424 }
3425 }
3426 /* sent queue SHOULD be empty */
3427 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3428 chk = TAILQ_FIRST(&asoc->sent_queue);
3429 while (chk) {
3430 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3431 asoc->sent_queue_cnt--;
3432 if (chk->data) {
3433 /*
3434 * trim off the sctp chunk header(it should
3435 * be there)
3436 */
3437 if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3438 m_adj(chk->data, sizeof(struct sctp_data_chunk));
3439 sctp_mbuf_crush(chk->data);
3440 }
3441 }
3442 sctp_free_bufspace(stcb, asoc, chk, 1);
3443 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3444 SCTP_NOTIFY_DATAGRAM_SENT, chk);
3445 if (chk->data) {
3446 sctp_m_freem(chk->data);
3447 chk->data = NULL;
3448 }
3449 if (chk->whoTo)
3450 sctp_free_remote_addr(chk->whoTo);
3451 chk->whoTo = NULL;
3452 sctp_free_a_chunk(stcb, chk);
3453 chk = TAILQ_FIRST(&asoc->sent_queue);
3454 }
3455 }
3456 if (holds_lock == 0)
3457 SCTP_TCB_SEND_UNLOCK(stcb);
3458}
3459
3460void
3461sctp_abort_notification(struct sctp_tcb *stcb, int error)
3462{
3463
3464 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3465 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3466 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3467 return;
3468 }
3469 /* Tell them we lost the asoc */
3470 sctp_report_all_outbound(stcb, 1);
3471 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3472 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3473 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3474 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3475 }
3476 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL);
3477}
3478
3479void
3480sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3481 struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err)
3482{
3483 uint32_t vtag;
3484
3485 vtag = 0;
3486 if (stcb != NULL) {
3487 /* We have a TCB to abort, send notification too */
3488 vtag = stcb->asoc.peer_vtag;
3489 sctp_abort_notification(stcb, 0);
3490 }
3491 sctp_send_abort(m, iphlen, sh, vtag, op_err);
3492 if (stcb != NULL) {
3493 /* Ok, now lets free it */
3494 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3495 } else {
3496 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3497 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3498 sctp_inpcb_free(inp, 1, 0);
3499 }
3500 }
3501 }
3502}
3503
3504void
3505sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3506 int error, struct mbuf *op_err)
3507{
3508 uint32_t vtag;
3509
3510 if (stcb == NULL) {
3511 /* Got to have a TCB */
3512 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3513 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3514 sctp_inpcb_free(inp, 1, 0);
3515 }
3516 }
3517 return;
3518 }
3519 vtag = stcb->asoc.peer_vtag;
3520 /* notify the ulp */
3521 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3522 sctp_abort_notification(stcb, error);
3523 /* notify the peer */
3524 sctp_send_abort_tcb(stcb, op_err);
3525 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3526 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3527 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3528 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3529 }
3530 /* now free the asoc */
3531 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3532}
3533
3534void
3535sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3536 struct sctp_inpcb *inp, struct mbuf *op_err)
3537{
3538 struct sctp_chunkhdr *ch, chunk_buf;
3539 unsigned int chk_length;
3540
3541 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3542 /* Generate a TO address for future reference */
3543 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3544 if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3545 sctp_inpcb_free(inp, 1, 0);
3546 }
3547 }
3548 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3549 sizeof(*ch), (uint8_t *) & chunk_buf);
3550 while (ch != NULL) {
3551 chk_length = ntohs(ch->chunk_length);
3552 if (chk_length < sizeof(*ch)) {
3553 /* break to abort land */
3554 break;
3555 }
3556 switch (ch->chunk_type) {
3557 case SCTP_PACKET_DROPPED:
3558 /* we don't respond to pkt-dropped */
3559 return;
3560 case SCTP_ABORT_ASSOCIATION:
3561 /* we don't respond with an ABORT to an ABORT */
3562 return;
3563 case SCTP_SHUTDOWN_COMPLETE:
3564 /*
3565 * we ignore it since we are not waiting for it and
3566 * peer is gone
3567 */
3568 return;
3569 case SCTP_SHUTDOWN_ACK:
3570 sctp_send_shutdown_complete2(m, iphlen, sh);
3571 return;
3572 default:
3573 break;
3574 }
3575 offset += SCTP_SIZE32(chk_length);
3576 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3577 sizeof(*ch), (uint8_t *) & chunk_buf);
3578 }
3579 sctp_send_abort(m, iphlen, sh, 0, op_err);
3580}
3581
3582/*
3583 * check the inbound datagram to make sure there is not an abort inside it,
3584 * if there is return 1, else return 0.
3585 */
3586int
3587sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3588{
3589 struct sctp_chunkhdr *ch;
3590 struct sctp_init_chunk *init_chk, chunk_buf;
3591 int offset;
3592 unsigned int chk_length;
3593
3594 offset = iphlen + sizeof(struct sctphdr);
3595 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
3596 (uint8_t *) & chunk_buf);
3597 while (ch != NULL) {
3598 chk_length = ntohs(ch->chunk_length);
3599 if (chk_length < sizeof(*ch)) {
3600 /* packet is probably corrupt */
3601 break;
3602 }
3603 /* we seem to be ok, is it an abort? */
3604 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
3605 /* yep, tell them */
3606 return (1);
3607 }
3608 if (ch->chunk_type == SCTP_INITIATION) {
3609 /* need to update the Vtag */
3610 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
3611 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
3612 if (init_chk != NULL) {
3613 *vtagfill = ntohl(init_chk->init.initiate_tag);
3614 }
3615 }
3616 /* Nope, move to the next chunk */
3617 offset += SCTP_SIZE32(chk_length);
3618 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3619 sizeof(*ch), (uint8_t *) & chunk_buf);
3620 }
3621 return (0);
3622}
3623
3624/*
3625 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
3626 * set (i.e. it's 0) so, create this function to compare link local scopes
3627 */
3628uint32_t
3629sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
3630{
3631 struct sockaddr_in6 a, b;
3632
3633 /* save copies */
3634 a = *addr1;
3635 b = *addr2;
3636
3637 if (a.sin6_scope_id == 0)
3638 if (sa6_recoverscope(&a)) {
3639 /* can't get scope, so can't match */
3640 return (0);
3641 }
3642 if (b.sin6_scope_id == 0)
3643 if (sa6_recoverscope(&b)) {
3644 /* can't get scope, so can't match */
3645 return (0);
3646 }
3647 if (a.sin6_scope_id != b.sin6_scope_id)
3648 return (0);
3649
3650 return (1);
3651}
3652
3653/*
3654 * returns a sockaddr_in6 with embedded scope recovered and removed
3655 */
3656struct sockaddr_in6 *
3657sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
3658{
3659 /* check and strip embedded scope junk */
3660 if (addr->sin6_family == AF_INET6) {
3661 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
3662 if (addr->sin6_scope_id == 0) {
3663 *store = *addr;
3664 if (!sa6_recoverscope(store)) {
3665 /* use the recovered scope */
3666 addr = store;
3667 }
3668 } else {
3669 /* else, return the original "to" addr */
3670 in6_clearscope(&addr->sin6_addr);
3671 }
3672 }
3673 }
3674 return (addr);
3675}
3676
3677/*
3678 * are the two addresses the same? currently a "scopeless" check returns: 1
3679 * if same, 0 if not
3680 */
3681__inline int
3682sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
3683{
3684
3685 /* must be valid */
3686 if (sa1 == NULL || sa2 == NULL)
3687 return (0);
3688
3689 /* must be the same family */
3690 if (sa1->sa_family != sa2->sa_family)
3691 return (0);
3692
3693 if (sa1->sa_family == AF_INET6) {
3694 /* IPv6 addresses */
3695 struct sockaddr_in6 *sin6_1, *sin6_2;
3696
3697 sin6_1 = (struct sockaddr_in6 *)sa1;
3698 sin6_2 = (struct sockaddr_in6 *)sa2;
3699 return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
3700 &sin6_2->sin6_addr));
3701 } else if (sa1->sa_family == AF_INET) {
3702 /* IPv4 addresses */
3703 struct sockaddr_in *sin_1, *sin_2;
3704
3705 sin_1 = (struct sockaddr_in *)sa1;
3706 sin_2 = (struct sockaddr_in *)sa2;
3707 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
3708 } else {
3709 /* we don't do these... */
3710 return (0);
3711 }
3712}
3713
3714void
3715sctp_print_address(struct sockaddr *sa)
3716{
3717
3718 if (sa->sa_family == AF_INET6) {
3719 struct sockaddr_in6 *sin6;
3720 char ip6buf[INET6_ADDRSTRLEN];
3721
3722 sin6 = (struct sockaddr_in6 *)sa;
3723 printf("IPv6 address: %s:%d scope:%u\n",
3724 ip6_sprintf(ip6buf, &sin6->sin6_addr),
3725 ntohs(sin6->sin6_port),
3726 sin6->sin6_scope_id);
3727 } else if (sa->sa_family == AF_INET) {
3728 struct sockaddr_in *sin;
3729 unsigned char *p;
3730
3731 sin = (struct sockaddr_in *)sa;
3732 p = (unsigned char *)&sin->sin_addr;
3733 printf("IPv4 address: %u.%u.%u.%u:%d\n",
3734 p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
3735 } else {
3736 printf("?\n");
3737 }
3738}
3739
3740void
3741sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
3742{
3743 if (iph->ip_v == IPVERSION) {
3744 struct sockaddr_in lsa, fsa;
3745
3746 bzero(&lsa, sizeof(lsa));
3747 lsa.sin_len = sizeof(lsa);
3748 lsa.sin_family = AF_INET;
3749 lsa.sin_addr = iph->ip_src;
3750 lsa.sin_port = sh->src_port;
3751 bzero(&fsa, sizeof(fsa));
3752 fsa.sin_len = sizeof(fsa);
3753 fsa.sin_family = AF_INET;
3754 fsa.sin_addr = iph->ip_dst;
3755 fsa.sin_port = sh->dest_port;
3756 printf("src: ");
3757 sctp_print_address((struct sockaddr *)&lsa);
3758 printf("dest: ");
3759 sctp_print_address((struct sockaddr *)&fsa);
3760 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
3761 struct ip6_hdr *ip6;
3762 struct sockaddr_in6 lsa6, fsa6;
3763
3764 ip6 = (struct ip6_hdr *)iph;
3765 bzero(&lsa6, sizeof(lsa6));
3766 lsa6.sin6_len = sizeof(lsa6);
3767 lsa6.sin6_family = AF_INET6;
3768 lsa6.sin6_addr = ip6->ip6_src;
3769 lsa6.sin6_port = sh->src_port;
3770 bzero(&fsa6, sizeof(fsa6));
3771 fsa6.sin6_len = sizeof(fsa6);
3772 fsa6.sin6_family = AF_INET6;
3773 fsa6.sin6_addr = ip6->ip6_dst;
3774 fsa6.sin6_port = sh->dest_port;
3775 printf("src: ");
3776 sctp_print_address((struct sockaddr *)&lsa6);
3777 printf("dest: ");
3778 sctp_print_address((struct sockaddr *)&fsa6);
3779 }
3780}
3781
3782void
3783sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
3784 struct sctp_inpcb *new_inp,
3785 struct sctp_tcb *stcb)
3786{
3787 /*
3788 * go through our old INP and pull off any control structures that
3789 * belong to stcb and move then to the new inp.
3790 */
3791 struct socket *old_so, *new_so;
3792 struct sctp_queued_to_read *control, *nctl;
3793 struct sctp_readhead tmp_queue;
3794 struct mbuf *m;
3795 int error;
3796
3797 old_so = old_inp->sctp_socket;
3798 new_so = new_inp->sctp_socket;
3799 TAILQ_INIT(&tmp_queue);
3800
3801 SOCKBUF_LOCK(&(old_so->so_rcv));
3802
3803 error = sblock(&old_so->so_rcv, 0);
3804
3805 SOCKBUF_UNLOCK(&(old_so->so_rcv));
3806 if (error) {
3807 /*
3808 * Gak, can't get sblock, we have a problem. data will be
3809 * left stranded.. and we don't dare look at it since the
3810 * other thread may be reading something. Oh well, its a
3811 * screwed up app that does a peeloff OR a accept while
3812 * reading from the main socket... actually its only the
3813 * peeloff() case, since I think read will fail on a
3814 * listening socket..
3815 */
3816 return;
3817 }
3818 /* lock the socket buffers */
3819 SCTP_INP_READ_LOCK(old_inp);
3820 control = TAILQ_FIRST(&old_inp->read_queue);
3821 /* Pull off all for out target stcb */
3822 while (control) {
3823 nctl = TAILQ_NEXT(control, next);
3824 if (control->stcb == stcb) {
3825 /* remove it we want it */
3826 TAILQ_REMOVE(&old_inp->read_queue, control, next);
3827 TAILQ_INSERT_TAIL(&tmp_queue, control, next);
3828 m = control->data;
3829 while (m) {
3830#ifdef SCTP_SB_LOGGING
3831 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
3832#endif
3833 sctp_sbfree(control, stcb, &old_so->so_rcv, m);
3834#ifdef SCTP_SB_LOGGING
3835 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3836#endif
3837 m = SCTP_BUF_NEXT(m);
3838 }
3839 }
3840 control = nctl;
3841 }
3842 SCTP_INP_READ_UNLOCK(old_inp);
3843
3844 /* Remove the sb-lock on the old socket */
3845 SOCKBUF_LOCK(&(old_so->so_rcv));
3846
3847 sbunlock(&old_so->so_rcv);
3848 SOCKBUF_UNLOCK(&(old_so->so_rcv));
3849
3850 /* Now we move them over to the new socket buffer */
3851 control = TAILQ_FIRST(&tmp_queue);
3852 SCTP_INP_READ_LOCK(new_inp);
3853 while (control) {
3854 nctl = TAILQ_NEXT(control, next);
3855 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
3856 m = control->data;
3857 while (m) {
3858#ifdef SCTP_SB_LOGGING
3859 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
3860#endif
3861 sctp_sballoc(stcb, &new_so->so_rcv, m);
3862#ifdef SCTP_SB_LOGGING
3863 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3864#endif
3865 m = SCTP_BUF_NEXT(m);
3866 }
3867 control = nctl;
3868 }
3869 SCTP_INP_READ_UNLOCK(new_inp);
3870}
3871
3872
3873void
3874sctp_add_to_readq(struct sctp_inpcb *inp,
3875 struct sctp_tcb *stcb,
3876 struct sctp_queued_to_read *control,
3877 struct sockbuf *sb,
3878 int end)
3879{
3880 /*
3881 * Here we must place the control on the end of the socket read
3882 * queue AND increment sb_cc so that select will work properly on
3883 * read.
3884 */
3885 struct mbuf *m, *prev = NULL;
3886
3887 if (inp == NULL) {
3888 /* Gak, TSNH!! */
3889#ifdef INVARIANTS
3890 panic("Gak, inp NULL on add_to_readq");
3891#endif
3892 return;
3893 }
3894 SCTP_INP_READ_LOCK(inp);
3722 atomic_add_int(&inp->total_recvs, 1);
3723 atomic_add_int(&stcb->total_recvs, 1);
3895 if (!(control->spec_flags & M_NOTIFICATION)) {
3896 atomic_add_int(&inp->total_recvs, 1);
3897 if (!control->do_not_ref_stcb) {
3898 atomic_add_int(&stcb->total_recvs, 1);
3899 }
3900 }
3724 m = control->data;
3725 control->held_length = 0;
3726 control->length = 0;
3727 while (m) {
3728 if (SCTP_BUF_LEN(m) == 0) {
3729 /* Skip mbufs with NO length */
3730 if (prev == NULL) {
3731 /* First one */
3732 control->data = sctp_m_free(m);
3733 m = control->data;
3734 } else {
3735 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
3736 m = SCTP_BUF_NEXT(prev);
3737 }
3738 if (m == NULL) {
3739 control->tail_mbuf = prev;;
3740 }
3741 continue;
3742 }
3743 prev = m;
3744#ifdef SCTP_SB_LOGGING
3745 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
3746#endif
3747 sctp_sballoc(stcb, sb, m);
3748#ifdef SCTP_SB_LOGGING
3749 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3750#endif
3751 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
3752 m = SCTP_BUF_NEXT(m);
3753 }
3754 if (prev != NULL) {
3755 control->tail_mbuf = prev;
3756 } else {
3757 /* Everything got collapsed out?? */
3758 return;
3759 }
3760 if (end) {
3761 control->end_added = 1;
3762 }
3763 TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
3764 SCTP_INP_READ_UNLOCK(inp);
3765 if (inp && inp->sctp_socket) {
3766 sctp_sorwakeup(inp, inp->sctp_socket);
3767 }
3768}
3769
3770
3771int
3772sctp_append_to_readq(struct sctp_inpcb *inp,
3773 struct sctp_tcb *stcb,
3774 struct sctp_queued_to_read *control,
3775 struct mbuf *m,
3776 int end,
3777 int ctls_cumack,
3778 struct sockbuf *sb)
3779{
3780 /*
3781 * A partial delivery API event is underway. OR we are appending on
3782 * the reassembly queue.
3783 *
3784 * If PDAPI this means we need to add m to the end of the data.
3785 * Increase the length in the control AND increment the sb_cc.
3786 * Otherwise sb is NULL and all we need to do is put it at the end
3787 * of the mbuf chain.
3788 */
3789 int len = 0;
3790 struct mbuf *mm, *tail = NULL, *prev = NULL;
3791
3792 if (inp) {
3793 SCTP_INP_READ_LOCK(inp);
3794 }
3795 if (control == NULL) {
3796get_out:
3797 if (inp) {
3798 SCTP_INP_READ_UNLOCK(inp);
3799 }
3800 return (-1);
3801 }
3802 if (control->end_added) {
3803 /* huh this one is complete? */
3804 goto get_out;
3805 }
3806 mm = m;
3807 if (mm == NULL) {
3808 goto get_out;
3809 }
3810 while (mm) {
3811 if (SCTP_BUF_LEN(mm) == 0) {
3812 /* Skip mbufs with NO lenght */
3813 if (prev == NULL) {
3814 /* First one */
3815 m = sctp_m_free(mm);
3816 mm = m;
3817 } else {
3818 SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
3819 mm = SCTP_BUF_NEXT(prev);
3820 }
3821 continue;
3822 }
3823 prev = mm;
3824 len += SCTP_BUF_LEN(mm);
3825 if (sb) {
3826#ifdef SCTP_SB_LOGGING
3827 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
3828#endif
3829 sctp_sballoc(stcb, sb, mm);
3830#ifdef SCTP_SB_LOGGING
3831 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3832#endif
3833 }
3834 mm = SCTP_BUF_NEXT(mm);
3835 }
3836 if (prev) {
3837 tail = prev;
3838 } else {
3839 /* Really there should always be a prev */
3840 if (m == NULL) {
3841 /* Huh nothing left? */
3842#ifdef INVARIANTS
3843 panic("Nothing left to add?");
3844#else
3845 goto get_out;
3846#endif
3847 }
3848 tail = m;
3849 }
3850 if (end) {
3851 /* message is complete */
3852 if (control == stcb->asoc.control_pdapi) {
3853 stcb->asoc.control_pdapi = NULL;
3854 }
3855 control->held_length = 0;
3856 control->end_added = 1;
3857 }
3858 atomic_add_int(&control->length, len);
3859 if (control->tail_mbuf) {
3860 /* append */
3861 SCTP_BUF_NEXT(control->tail_mbuf) = m;
3862 control->tail_mbuf = tail;
3863 } else {
3864 /* nothing there */
3865#ifdef INVARIANTS
3866 if (control->data != NULL) {
3867 panic("This should NOT happen");
3868 }
3869#endif
3870 control->data = m;
3871 control->tail_mbuf = tail;
3872 }
3873 /*
3874 * When we are appending in partial delivery, the cum-ack is used
3875 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
3876 * is populated in the outbound sinfo structure from the true cumack
3877 * if the association exists...
3878 */
3879 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
3880 if (inp) {
3881 SCTP_INP_READ_UNLOCK(inp);
3882 }
3883 if (inp && inp->sctp_socket) {
3884 sctp_sorwakeup(inp, inp->sctp_socket);
3885 }
3886 return (0);
3887}
3888
3889
3890
3891/*************HOLD THIS COMMENT FOR PATCH FILE OF
3892 *************ALTERNATE ROUTING CODE
3893 */
3894
3895/*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
3896 *************ALTERNATE ROUTING CODE
3897 */
3898
3899struct mbuf *
3900sctp_generate_invmanparam(int err)
3901{
3902 /* Return a MBUF with a invalid mandatory parameter */
3903 struct mbuf *m;
3904
3905 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
3906 if (m) {
3907 struct sctp_paramhdr *ph;
3908
3909 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
3910 ph = mtod(m, struct sctp_paramhdr *);
3911 ph->param_length = htons(sizeof(struct sctp_paramhdr));
3912 ph->param_type = htons(err);
3913 }
3914 return (m);
3915}
3916
3917#ifdef SCTP_MBCNT_LOGGING
3918void
3919sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
3920 struct sctp_tmit_chunk *tp1, int chk_cnt)
3921{
3922 if (tp1->data == NULL) {
3923 return;
3924 }
3925 asoc->chunks_on_out_queue -= chk_cnt;
3926 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
3927 asoc->total_output_queue_size,
3928 tp1->book_size,
3929 0,
3930 tp1->mbcnt);
3931 if (asoc->total_output_queue_size >= tp1->book_size) {
3932 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
3933 } else {
3934 asoc->total_output_queue_size = 0;
3935 }
3936
3937 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
3938 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
3939 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
3940 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
3941 } else {
3942 stcb->sctp_socket->so_snd.sb_cc = 0;
3943
3944 }
3945 }
3946}
3947
3948#endif
3949
3950int
3951sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
3952 int reason, struct sctpchunk_listhead *queue)
3953{
3954 int ret_sz = 0;
3955 int notdone;
3956 uint8_t foundeom = 0;
3957
3958 do {
3959 ret_sz += tp1->book_size;
3960 tp1->sent = SCTP_FORWARD_TSN_SKIP;
3961 if (tp1->data) {
3962 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3963 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1);
3964 sctp_m_freem(tp1->data);
3965 tp1->data = NULL;
3966 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3967 }
3968 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
3969 stcb->asoc.sent_queue_cnt_removeable--;
3970 }
3971 if (queue == &stcb->asoc.send_queue) {
3972 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
3973 /* on to the sent queue */
3974 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
3975 sctp_next);
3976 stcb->asoc.sent_queue_cnt++;
3977 }
3978 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
3979 SCTP_DATA_NOT_FRAG) {
3980 /* not frag'ed we ae done */
3981 notdone = 0;
3982 foundeom = 1;
3983 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
3984 /* end of frag, we are done */
3985 notdone = 0;
3986 foundeom = 1;
3987 } else {
3988 /*
3989 * Its a begin or middle piece, we must mark all of
3990 * it
3991 */
3992 notdone = 1;
3993 tp1 = TAILQ_NEXT(tp1, sctp_next);
3994 }
3995 } while (tp1 && notdone);
3996 if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
3997 /*
3998 * The multi-part message was scattered across the send and
3999 * sent queue.
4000 */
4001 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4002 /*
4003 * recurse throught the send_queue too, starting at the
4004 * beginning.
4005 */
4006 if (tp1) {
4007 ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
4008 &stcb->asoc.send_queue);
4009 } else {
4010 printf("hmm, nothing on the send queue and no EOM?\n");
4011 }
4012 }
4013 return (ret_sz);
4014}
4015
4016/*
4017 * checks to see if the given address, sa, is one that is currently known by
4018 * the kernel note: can't distinguish the same address on multiple interfaces
4019 * and doesn't handle multiple addresses with different zone/scope id's note:
4020 * ifa_ifwithaddr() compares the entire sockaddr struct
4021 */
3901 m = control->data;
3902 control->held_length = 0;
3903 control->length = 0;
3904 while (m) {
3905 if (SCTP_BUF_LEN(m) == 0) {
3906 /* Skip mbufs with NO length */
3907 if (prev == NULL) {
3908 /* First one */
3909 control->data = sctp_m_free(m);
3910 m = control->data;
3911 } else {
3912 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
3913 m = SCTP_BUF_NEXT(prev);
3914 }
3915 if (m == NULL) {
3916 control->tail_mbuf = prev;;
3917 }
3918 continue;
3919 }
3920 prev = m;
3921#ifdef SCTP_SB_LOGGING
3922 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
3923#endif
3924 sctp_sballoc(stcb, sb, m);
3925#ifdef SCTP_SB_LOGGING
3926 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3927#endif
3928 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
3929 m = SCTP_BUF_NEXT(m);
3930 }
3931 if (prev != NULL) {
3932 control->tail_mbuf = prev;
3933 } else {
3934 /* Everything got collapsed out?? */
3935 return;
3936 }
3937 if (end) {
3938 control->end_added = 1;
3939 }
3940 TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
3941 SCTP_INP_READ_UNLOCK(inp);
3942 if (inp && inp->sctp_socket) {
3943 sctp_sorwakeup(inp, inp->sctp_socket);
3944 }
3945}
3946
3947
3948int
3949sctp_append_to_readq(struct sctp_inpcb *inp,
3950 struct sctp_tcb *stcb,
3951 struct sctp_queued_to_read *control,
3952 struct mbuf *m,
3953 int end,
3954 int ctls_cumack,
3955 struct sockbuf *sb)
3956{
3957 /*
3958 * A partial delivery API event is underway. OR we are appending on
3959 * the reassembly queue.
3960 *
3961 * If PDAPI this means we need to add m to the end of the data.
3962 * Increase the length in the control AND increment the sb_cc.
3963 * Otherwise sb is NULL and all we need to do is put it at the end
3964 * of the mbuf chain.
3965 */
3966 int len = 0;
3967 struct mbuf *mm, *tail = NULL, *prev = NULL;
3968
3969 if (inp) {
3970 SCTP_INP_READ_LOCK(inp);
3971 }
3972 if (control == NULL) {
3973get_out:
3974 if (inp) {
3975 SCTP_INP_READ_UNLOCK(inp);
3976 }
3977 return (-1);
3978 }
3979 if (control->end_added) {
3980 /* huh this one is complete? */
3981 goto get_out;
3982 }
3983 mm = m;
3984 if (mm == NULL) {
3985 goto get_out;
3986 }
3987 while (mm) {
3988 if (SCTP_BUF_LEN(mm) == 0) {
3989 /* Skip mbufs with NO lenght */
3990 if (prev == NULL) {
3991 /* First one */
3992 m = sctp_m_free(mm);
3993 mm = m;
3994 } else {
3995 SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
3996 mm = SCTP_BUF_NEXT(prev);
3997 }
3998 continue;
3999 }
4000 prev = mm;
4001 len += SCTP_BUF_LEN(mm);
4002 if (sb) {
4003#ifdef SCTP_SB_LOGGING
4004 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4005#endif
4006 sctp_sballoc(stcb, sb, mm);
4007#ifdef SCTP_SB_LOGGING
4008 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4009#endif
4010 }
4011 mm = SCTP_BUF_NEXT(mm);
4012 }
4013 if (prev) {
4014 tail = prev;
4015 } else {
4016 /* Really there should always be a prev */
4017 if (m == NULL) {
4018 /* Huh nothing left? */
4019#ifdef INVARIANTS
4020 panic("Nothing left to add?");
4021#else
4022 goto get_out;
4023#endif
4024 }
4025 tail = m;
4026 }
4027 if (end) {
4028 /* message is complete */
4029 if (control == stcb->asoc.control_pdapi) {
4030 stcb->asoc.control_pdapi = NULL;
4031 }
4032 control->held_length = 0;
4033 control->end_added = 1;
4034 }
4035 atomic_add_int(&control->length, len);
4036 if (control->tail_mbuf) {
4037 /* append */
4038 SCTP_BUF_NEXT(control->tail_mbuf) = m;
4039 control->tail_mbuf = tail;
4040 } else {
4041 /* nothing there */
4042#ifdef INVARIANTS
4043 if (control->data != NULL) {
4044 panic("This should NOT happen");
4045 }
4046#endif
4047 control->data = m;
4048 control->tail_mbuf = tail;
4049 }
4050 /*
4051 * When we are appending in partial delivery, the cum-ack is used
4052 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4053 * is populated in the outbound sinfo structure from the true cumack
4054 * if the association exists...
4055 */
4056 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4057 if (inp) {
4058 SCTP_INP_READ_UNLOCK(inp);
4059 }
4060 if (inp && inp->sctp_socket) {
4061 sctp_sorwakeup(inp, inp->sctp_socket);
4062 }
4063 return (0);
4064}
4065
4066
4067
4068/*************HOLD THIS COMMENT FOR PATCH FILE OF
4069 *************ALTERNATE ROUTING CODE
4070 */
4071
4072/*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4073 *************ALTERNATE ROUTING CODE
4074 */
4075
4076struct mbuf *
4077sctp_generate_invmanparam(int err)
4078{
4079 /* Return a MBUF with a invalid mandatory parameter */
4080 struct mbuf *m;
4081
4082 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4083 if (m) {
4084 struct sctp_paramhdr *ph;
4085
4086 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4087 ph = mtod(m, struct sctp_paramhdr *);
4088 ph->param_length = htons(sizeof(struct sctp_paramhdr));
4089 ph->param_type = htons(err);
4090 }
4091 return (m);
4092}
4093
4094#ifdef SCTP_MBCNT_LOGGING
4095void
4096sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4097 struct sctp_tmit_chunk *tp1, int chk_cnt)
4098{
4099 if (tp1->data == NULL) {
4100 return;
4101 }
4102 asoc->chunks_on_out_queue -= chk_cnt;
4103 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4104 asoc->total_output_queue_size,
4105 tp1->book_size,
4106 0,
4107 tp1->mbcnt);
4108 if (asoc->total_output_queue_size >= tp1->book_size) {
4109 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4110 } else {
4111 asoc->total_output_queue_size = 0;
4112 }
4113
4114 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4115 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4116 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4117 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4118 } else {
4119 stcb->sctp_socket->so_snd.sb_cc = 0;
4120
4121 }
4122 }
4123}
4124
4125#endif
4126
4127int
4128sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4129 int reason, struct sctpchunk_listhead *queue)
4130{
4131 int ret_sz = 0;
4132 int notdone;
4133 uint8_t foundeom = 0;
4134
4135 do {
4136 ret_sz += tp1->book_size;
4137 tp1->sent = SCTP_FORWARD_TSN_SKIP;
4138 if (tp1->data) {
4139 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4140 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1);
4141 sctp_m_freem(tp1->data);
4142 tp1->data = NULL;
4143 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4144 }
4145 if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4146 stcb->asoc.sent_queue_cnt_removeable--;
4147 }
4148 if (queue == &stcb->asoc.send_queue) {
4149 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4150 /* on to the sent queue */
4151 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4152 sctp_next);
4153 stcb->asoc.sent_queue_cnt++;
4154 }
4155 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4156 SCTP_DATA_NOT_FRAG) {
4157 /* not frag'ed we ae done */
4158 notdone = 0;
4159 foundeom = 1;
4160 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4161 /* end of frag, we are done */
4162 notdone = 0;
4163 foundeom = 1;
4164 } else {
4165 /*
4166 * Its a begin or middle piece, we must mark all of
4167 * it
4168 */
4169 notdone = 1;
4170 tp1 = TAILQ_NEXT(tp1, sctp_next);
4171 }
4172 } while (tp1 && notdone);
4173 if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
4174 /*
4175 * The multi-part message was scattered across the send and
4176 * sent queue.
4177 */
4178 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4179 /*
4180 * recurse throught the send_queue too, starting at the
4181 * beginning.
4182 */
4183 if (tp1) {
4184 ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
4185 &stcb->asoc.send_queue);
4186 } else {
4187 printf("hmm, nothing on the send queue and no EOM?\n");
4188 }
4189 }
4190 return (ret_sz);
4191}
4192
4193/*
4194 * checks to see if the given address, sa, is one that is currently known by
4195 * the kernel note: can't distinguish the same address on multiple interfaces
4196 * and doesn't handle multiple addresses with different zone/scope id's note:
4197 * ifa_ifwithaddr() compares the entire sockaddr struct
4198 */
4022struct ifaddr *
4023sctp_find_ifa_by_addr(struct sockaddr *sa)
4199struct sctp_ifa *
4200sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, int holds_lock)
4024{
4201{
4025 struct ifnet *ifn;
4026 struct ifaddr *ifa;
4202 struct sctp_laddr *laddr;
4027
4203
4028 /* go through all our known interfaces */
4029 TAILQ_FOREACH(ifn, &ifnet, if_list) {
4030 /* go through each interface addresses */
4031 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
4032 /* correct family? */
4033 if (ifa->ifa_addr->sa_family != sa->sa_family)
4034 continue;
4204 if (holds_lock == 0)
4205 SCTP_INP_RLOCK(inp);
4035
4206
4036#ifdef INET6
4037 if (ifa->ifa_addr->sa_family == AF_INET6) {
4038 /* IPv6 address */
4039 struct sockaddr_in6 *sin1, *sin2, sin6_tmp;
4207 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4208 if (laddr->ifa == NULL)
4209 continue;
4210 if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4211 continue;
4212 if (addr->sa_family == AF_INET) {
4213 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4214 laddr->ifa->address.sin.sin_addr.s_addr) {
4215 /* found him. */
4216 if (holds_lock == 0)
4217 SCTP_INP_RUNLOCK(inp);
4218 return (laddr->ifa);
4219 break;
4220 }
4221 } else if (addr->sa_family == AF_INET6) {
4222 if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr,
4223 &laddr->ifa->address.sin6.sin6_addr)) {
4224 /* found him. */
4225 if (holds_lock == 0)
4226 SCTP_INP_RUNLOCK(inp);
4227 return (laddr->ifa);
4228 break;
4229 }
4230 }
4231 }
4232 if (holds_lock == 0)
4233 SCTP_INP_RUNLOCK(inp);
4234 return (NULL);
4235}
4040
4236
4041 sin1 = (struct sockaddr_in6 *)ifa->ifa_addr;
4042 if (IN6_IS_SCOPE_LINKLOCAL(&sin1->sin6_addr)) {
4043 /* create a copy and clear scope */
4044 memcpy(&sin6_tmp, sin1,
4045 sizeof(struct sockaddr_in6));
4046 sin1 = &sin6_tmp;
4047 in6_clearscope(&sin1->sin6_addr);
4048 }
4049 sin2 = (struct sockaddr_in6 *)sa;
4050 if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
4051 sizeof(struct in6_addr)) == 0) {
4052 /* found it */
4053 return (ifa);
4054 }
4055 } else
4056#endif
4057 if (ifa->ifa_addr->sa_family == AF_INET) {
4058 /* IPv4 address */
4059 struct sockaddr_in *sin1, *sin2;
4237struct sctp_ifa *
4238sctp_find_ifa_in_ifn(struct sctp_ifn *sctp_ifnp, struct sockaddr *addr,
4239 int holds_lock)
4240{
4241 struct sctp_ifa *sctp_ifap;
4060
4242
4061 sin1 = (struct sockaddr_in *)ifa->ifa_addr;
4062 sin2 = (struct sockaddr_in *)sa;
4063 if (sin1->sin_addr.s_addr ==
4064 sin2->sin_addr.s_addr) {
4065 /* found it */
4066 return (ifa);
4067 }
4243 if (holds_lock == 0)
4244 SCTP_IPI_ADDR_LOCK();
4245
4246 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
4247 if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4248 continue;
4249 if (addr->sa_family == AF_INET) {
4250 if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4251 sctp_ifap->address.sin.sin_addr.s_addr) {
4252 /* found him. */
4253 if (holds_lock == 0)
4254 SCTP_IPI_ADDR_UNLOCK();
4255 return (sctp_ifap);
4256 break;
4068 }
4257 }
4069 /* else, not AF_INET or AF_INET6, so skip */
4070 } /* end foreach ifa */
4071 } /* end foreach ifn */
4072 /* not found! */
4258 } else if (addr->sa_family == AF_INET6) {
4259 if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr,
4260 &sctp_ifap->address.sin6.sin6_addr)) {
4261 /* found him. */
4262 if (holds_lock == 0)
4263 SCTP_IPI_ADDR_UNLOCK();
4264 return (sctp_ifap);
4265 break;
4266 }
4267 }
4268 }
4269 if (holds_lock == 0)
4270 SCTP_IPI_ADDR_UNLOCK();
4073 return (NULL);
4074}
4075
4271 return (NULL);
4272}
4273
4274struct sctp_ifa *
4275sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4276{
4277 struct sctp_ifa *sctp_ifap;
4278 struct sctp_ifn *sctp_ifnp = NULL;
4279 struct sctp_vrf *vrf;
4280
4281 vrf = sctp_find_vrf(vrf_id);
4282 if (vrf == NULL)
4283 return (NULL);
4284
4285 if (holds_lock == 0)
4286 SCTP_IPI_ADDR_LOCK();
4287
4288 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
4289 sctp_ifap = sctp_find_ifa_in_ifn(sctp_ifnp, addr, 1);
4290 if (sctp_ifap) {
4291 if (holds_lock == 0)
4292 SCTP_IPI_ADDR_UNLOCK();
4293 return (sctp_ifap);
4294 }
4295 }
4296 if (holds_lock == 0)
4297 SCTP_IPI_ADDR_UNLOCK();
4298 return (NULL);
4299}
4300
4076static void
4077sctp_user_rcvd(struct sctp_tcb *stcb, int *freed_so_far, int hold_rlock,
4078 uint32_t rwnd_req)
4079{
4080 /* User pulled some data, do we need a rwnd update? */
4081 int r_unlocked = 0;
4082 uint32_t dif, rwnd;
4083 struct socket *so = NULL;
4084
4085 if (stcb == NULL)
4086 return;
4087
4088 atomic_add_int(&stcb->asoc.refcnt, 1);
4089
4090 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4091 /* Pre-check If we are freeing no update */
4092 goto no_lock;
4093 }
4094 SCTP_INP_INCR_REF(stcb->sctp_ep);
4095 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4096 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4097 goto out;
4098 }
4099 so = stcb->sctp_socket;
4100 if (so == NULL) {
4101 goto out;
4102 }
4103 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4104 /* Have you have freed enough to look */
4105#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4106 sctp_misc_ints(SCTP_ENTER_USER_RECV,
4107 (stcb->asoc.my_rwnd - stcb->asoc.my_last_reported_rwnd),
4108 *freed_so_far,
4109 stcb->freed_by_sorcv_sincelast,
4110 rwnd_req);
4111#endif
4112 *freed_so_far = 0;
4113 /* Yep, its worth a look and the lock overhead */
4114
4115 /* Figure out what the rwnd would be */
4116 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4117 if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4118 dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4119 } else {
4120 dif = 0;
4121 }
4122 if (dif >= rwnd_req) {
4123 if (hold_rlock) {
4124 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4125 r_unlocked = 1;
4126 }
4127 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4128 /*
4129 * One last check before we allow the guy possibly
4130 * to get in. There is a race, where the guy has not
4131 * reached the gate. In that case
4132 */
4133 goto out;
4134 }
4135 SCTP_TCB_LOCK(stcb);
4136 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4137 /* No reports here */
4138 SCTP_TCB_UNLOCK(stcb);
4139 goto out;
4140 }
4141#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4142 sctp_misc_ints(SCTP_USER_RECV_SACKS,
4143 stcb->asoc.my_rwnd,
4144 stcb->asoc.my_last_reported_rwnd,
4145 stcb->freed_by_sorcv_sincelast,
4146 dif);
4147#endif
4148 SCTP_STAT_INCR(sctps_wu_sacks_sent);
4149 sctp_send_sack(stcb);
4150 sctp_chunk_output(stcb->sctp_ep, stcb,
4151 SCTP_OUTPUT_FROM_USR_RCVD);
4152 /* make sure no timer is running */
4153 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
4154 SCTP_TCB_UNLOCK(stcb);
4155 } else {
4156 /* Update how much we have pending */
4157 stcb->freed_by_sorcv_sincelast = dif;
4158#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4159 sctp_misc_ints(SCTP_USER_RECV_SACKS,
4160 stcb->asoc.my_rwnd,
4161 stcb->asoc.my_last_reported_rwnd,
4162 stcb->freed_by_sorcv_sincelast,
4163 0);
4164#endif
4165 }
4166out:
4167 if (so && r_unlocked && hold_rlock) {
4168 SCTP_INP_READ_LOCK(stcb->sctp_ep);
4169 }
4170 SCTP_INP_DECR_REF(stcb->sctp_ep);
4171no_lock:
4172 atomic_add_int(&stcb->asoc.refcnt, -1);
4173 return;
4174}
4175
4176int
4177sctp_sorecvmsg(struct socket *so,
4178 struct uio *uio,
4179 struct mbuf **mp,
4180 struct sockaddr *from,
4181 int fromlen,
4182 int *msg_flags,
4183 struct sctp_sndrcvinfo *sinfo,
4184 int filling_sinfo)
4185{
4186 /*
4187 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
4188 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
4189 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
4190 * On the way out we may send out any combination of:
4191 * MSG_NOTIFICATION MSG_EOR
4192 *
4193 */
4194 struct sctp_inpcb *inp = NULL;
4195 int my_len = 0;
4196 int cp_len = 0, error = 0;
4197 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
4198 struct mbuf *m = NULL, *embuf = NULL;
4199 struct sctp_tcb *stcb = NULL;
4200 int wakeup_read_socket = 0;
4201 int freecnt_applied = 0;
4202 int out_flags = 0, in_flags = 0;
4203 int block_allowed = 1;
4204 int freed_so_far = 0;
4205 int copied_so_far = 0;
4206 int in_eeor_mode = 0;
4207 int no_rcv_needed = 0;
4208 uint32_t rwnd_req = 0;
4209 int hold_sblock = 0;
4210 int hold_rlock = 0;
4301static void
4302sctp_user_rcvd(struct sctp_tcb *stcb, int *freed_so_far, int hold_rlock,
4303 uint32_t rwnd_req)
4304{
4305 /* User pulled some data, do we need a rwnd update? */
4306 int r_unlocked = 0;
4307 uint32_t dif, rwnd;
4308 struct socket *so = NULL;
4309
4310 if (stcb == NULL)
4311 return;
4312
4313 atomic_add_int(&stcb->asoc.refcnt, 1);
4314
4315 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4316 /* Pre-check If we are freeing no update */
4317 goto no_lock;
4318 }
4319 SCTP_INP_INCR_REF(stcb->sctp_ep);
4320 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4321 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4322 goto out;
4323 }
4324 so = stcb->sctp_socket;
4325 if (so == NULL) {
4326 goto out;
4327 }
4328 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4329 /* Have you have freed enough to look */
4330#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4331 sctp_misc_ints(SCTP_ENTER_USER_RECV,
4332 (stcb->asoc.my_rwnd - stcb->asoc.my_last_reported_rwnd),
4333 *freed_so_far,
4334 stcb->freed_by_sorcv_sincelast,
4335 rwnd_req);
4336#endif
4337 *freed_so_far = 0;
4338 /* Yep, its worth a look and the lock overhead */
4339
4340 /* Figure out what the rwnd would be */
4341 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4342 if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4343 dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4344 } else {
4345 dif = 0;
4346 }
4347 if (dif >= rwnd_req) {
4348 if (hold_rlock) {
4349 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4350 r_unlocked = 1;
4351 }
4352 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4353 /*
4354 * One last check before we allow the guy possibly
4355 * to get in. There is a race, where the guy has not
4356 * reached the gate. In that case
4357 */
4358 goto out;
4359 }
4360 SCTP_TCB_LOCK(stcb);
4361 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4362 /* No reports here */
4363 SCTP_TCB_UNLOCK(stcb);
4364 goto out;
4365 }
4366#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4367 sctp_misc_ints(SCTP_USER_RECV_SACKS,
4368 stcb->asoc.my_rwnd,
4369 stcb->asoc.my_last_reported_rwnd,
4370 stcb->freed_by_sorcv_sincelast,
4371 dif);
4372#endif
4373 SCTP_STAT_INCR(sctps_wu_sacks_sent);
4374 sctp_send_sack(stcb);
4375 sctp_chunk_output(stcb->sctp_ep, stcb,
4376 SCTP_OUTPUT_FROM_USR_RCVD);
4377 /* make sure no timer is running */
4378 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
4379 SCTP_TCB_UNLOCK(stcb);
4380 } else {
4381 /* Update how much we have pending */
4382 stcb->freed_by_sorcv_sincelast = dif;
4383#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4384 sctp_misc_ints(SCTP_USER_RECV_SACKS,
4385 stcb->asoc.my_rwnd,
4386 stcb->asoc.my_last_reported_rwnd,
4387 stcb->freed_by_sorcv_sincelast,
4388 0);
4389#endif
4390 }
4391out:
4392 if (so && r_unlocked && hold_rlock) {
4393 SCTP_INP_READ_LOCK(stcb->sctp_ep);
4394 }
4395 SCTP_INP_DECR_REF(stcb->sctp_ep);
4396no_lock:
4397 atomic_add_int(&stcb->asoc.refcnt, -1);
4398 return;
4399}
4400
4401int
4402sctp_sorecvmsg(struct socket *so,
4403 struct uio *uio,
4404 struct mbuf **mp,
4405 struct sockaddr *from,
4406 int fromlen,
4407 int *msg_flags,
4408 struct sctp_sndrcvinfo *sinfo,
4409 int filling_sinfo)
4410{
4411 /*
4412 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
4413 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
4414 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
4415 * On the way out we may send out any combination of:
4416 * MSG_NOTIFICATION MSG_EOR
4417 *
4418 */
4419 struct sctp_inpcb *inp = NULL;
4420 int my_len = 0;
4421 int cp_len = 0, error = 0;
4422 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
4423 struct mbuf *m = NULL, *embuf = NULL;
4424 struct sctp_tcb *stcb = NULL;
4425 int wakeup_read_socket = 0;
4426 int freecnt_applied = 0;
4427 int out_flags = 0, in_flags = 0;
4428 int block_allowed = 1;
4429 int freed_so_far = 0;
4430 int copied_so_far = 0;
4431 int in_eeor_mode = 0;
4432 int no_rcv_needed = 0;
4433 uint32_t rwnd_req = 0;
4434 int hold_sblock = 0;
4435 int hold_rlock = 0;
4211 int alen = 0, slen = 0;
4436 int alen = 0;
4437 int slen = 0;
4212 int held_length = 0;
4213
4214 if (msg_flags) {
4215 in_flags = *msg_flags;
4216 } else {
4217 in_flags = 0;
4218 }
4219 slen = uio->uio_resid;
4220 /* Pull in and set up our int flags */
4221 if (in_flags & MSG_OOB) {
4222 /* Out of band's NOT supported */
4223 return (EOPNOTSUPP);
4224 }
4225 if ((in_flags & MSG_PEEK) && (mp != NULL)) {
4226 return (EINVAL);
4227 }
4228 if ((in_flags & (MSG_DONTWAIT
4229 | MSG_NBIO
4230 )) ||
4438 int held_length = 0;
4439
4440 if (msg_flags) {
4441 in_flags = *msg_flags;
4442 } else {
4443 in_flags = 0;
4444 }
4445 slen = uio->uio_resid;
4446 /* Pull in and set up our int flags */
4447 if (in_flags & MSG_OOB) {
4448 /* Out of band's NOT supported */
4449 return (EOPNOTSUPP);
4450 }
4451 if ((in_flags & MSG_PEEK) && (mp != NULL)) {
4452 return (EINVAL);
4453 }
4454 if ((in_flags & (MSG_DONTWAIT
4455 | MSG_NBIO
4456 )) ||
4231 (so->so_state & SS_NBIO)) {
4457 SCTP_SO_IS_NBIO(so)) {
4232 block_allowed = 0;
4233 }
4234 /* setup the endpoint */
4235 inp = (struct sctp_inpcb *)so->so_pcb;
4236 if (inp == NULL) {
4237 return (EFAULT);
4238 }
4239 rwnd_req = (so->so_rcv.sb_hiwat >> SCTP_RWND_HIWAT_SHIFT);
4240 /* Must be at least a MTU's worth */
4241 if (rwnd_req < SCTP_MIN_RWND)
4242 rwnd_req = SCTP_MIN_RWND;
4243 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
4244#ifdef SCTP_RECV_RWND_LOGGING
4245 sctp_misc_ints(SCTP_SORECV_ENTER,
4246 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
4247#endif
4248 SOCKBUF_LOCK(&so->so_rcv);
4249 hold_sblock = 1;
4250#ifdef SCTP_RECV_RWND_LOGGING
4251 sctp_misc_ints(SCTP_SORECV_ENTERPL,
4252 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
4253#endif
4254
4255
4256 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
4257 if (error) {
4258 goto release_unlocked;
4259 }
4260restart:
4261 if (hold_sblock == 0) {
4262 SOCKBUF_LOCK(&so->so_rcv);
4263 hold_sblock = 1;
4264 }
4265 sbunlock(&so->so_rcv);
4266
4267restart_nosblocks:
4268 if (hold_sblock == 0) {
4269 SOCKBUF_LOCK(&so->so_rcv);
4270 hold_sblock = 1;
4271 }
4272 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4273 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4274 goto out;
4275 }
4276 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
4277 if (so->so_error) {
4278 error = so->so_error;
4279 if ((in_flags & MSG_PEEK) == 0)
4280 so->so_error = 0;
4281 } else {
4282 error = ENOTCONN;
4283 }
4284 goto out;
4285 }
4286 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
4287 /* we need to wait for data */
4288#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4289 sctp_misc_ints(SCTP_SORECV_BLOCKSA,
4290 0, 0, so->so_rcv.sb_cc, uio->uio_resid);
4291#endif
4292 if ((so->so_rcv.sb_cc == 0) &&
4293 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
4294 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
4295 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
4296 /*
4297 * For active open side clear flags for
4298 * re-use passive open is blocked by
4299 * connect.
4300 */
4301 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
4302 /*
4303 * You were aborted, passive side
4304 * always hits here
4305 */
4306 error = ECONNRESET;
4307 /*
4308 * You get this once if you are
4309 * active open side
4310 */
4311 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4312 /*
4313 * Remove flag if on the
4314 * active open side
4315 */
4316 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
4317 }
4318 }
4319 so->so_state &= ~(SS_ISCONNECTING |
4320 SS_ISDISCONNECTING |
4321 SS_ISCONFIRMING |
4322 SS_ISCONNECTED);
4323 if (error == 0) {
4324 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
4325 error = ENOTCONN;
4326 } else {
4327 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
4328 }
4329 }
4330 goto out;
4331 }
4332 }
4333 error = sbwait(&so->so_rcv);
4334 if (error) {
4335 goto out;
4336 }
4337 held_length = 0;
4338 goto restart_nosblocks;
4339 } else if (so->so_rcv.sb_cc == 0) {
4340 if (so->so_error) {
4341 error = so->so_error;
4342 if ((in_flags & MSG_PEEK) == 0)
4343 so->so_error = 0;
4344 } else {
4345 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
4346 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4347 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
4348 /*
4349 * For active open side clear flags
4350 * for re-use passive open is
4351 * blocked by connect.
4352 */
4353 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
4354 /*
4355 * You were aborted, passive
4356 * side always hits here
4357 */
4358 error = ECONNRESET;
4359 /*
4360 * You get this once if you
4361 * are active open side
4362 */
4363 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4364 /*
4365 * Remove flag if on
4366 * the active open
4367 * side
4368 */
4369 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
4370 }
4371 }
4372 so->so_state &= ~(SS_ISCONNECTING |
4373 SS_ISDISCONNECTING |
4374 SS_ISCONFIRMING |
4375 SS_ISCONNECTED);
4376 if (error == 0) {
4377 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
4378 error = ENOTCONN;
4379 } else {
4380 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
4381 }
4382 }
4383 goto out;
4384 }
4385 }
4386 error = EWOULDBLOCK;
4387 }
4388 goto out;
4389 }
4390 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
4391 /* we possibly have data we can read */
4392 control = TAILQ_FIRST(&inp->read_queue);
4393 if (control == NULL) {
4394 /*
4395 * This could be happening since the appender did the
4396 * increment but as not yet did the tailq insert onto the
4397 * read_queue
4398 */
4399 if (hold_rlock == 0) {
4400 SCTP_INP_READ_LOCK(inp);
4401 hold_rlock = 1;
4402 }
4403 control = TAILQ_FIRST(&inp->read_queue);
4404 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
4405#ifdef INVARIANTS
4406 panic("Huh, its non zero and nothing on control?");
4407#endif
4408 so->so_rcv.sb_cc = 0;
4409 }
4410 SCTP_INP_READ_UNLOCK(inp);
4411 hold_rlock = 0;
4412 goto restart;
4413 }
4414 if ((control->length == 0) &&
4415 (control->do_not_ref_stcb)) {
4416 /*
4417 * Clean up code for freeing assoc that left behind a
4418 * pdapi.. maybe a peer in EEOR that just closed after
4419 * sending and never indicated a EOR.
4420 */
4421 if (hold_rlock == 0) {
4422 hold_rlock = 1;
4423 SCTP_INP_READ_LOCK(inp);
4424 }
4425 control->held_length = 0;
4426 if (control->data) {
4427 /* Hmm there is data here .. fix */
4428 struct mbuf *m;
4429 int cnt = 0;
4430
4431 m = control->data;
4432 while (m) {
4433 cnt += SCTP_BUF_LEN(m);
4434 if (SCTP_BUF_NEXT(m) == NULL) {
4435 control->tail_mbuf = m;
4436 control->end_added = 1;
4437 }
4438 m = SCTP_BUF_NEXT(m);
4439 }
4440 control->length = cnt;
4441 } else {
4442 /* remove it */
4443 TAILQ_REMOVE(&inp->read_queue, control, next);
4444 /* Add back any hiddend data */
4445 sctp_free_remote_addr(control->whoFrom);
4446 sctp_free_a_readq(stcb, control);
4447 }
4448 if (hold_rlock) {
4449 hold_rlock = 0;
4450 SCTP_INP_READ_UNLOCK(inp);
4451 }
4452 goto restart;
4453 }
4454 if (control->length == 0) {
4455 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
4456 (filling_sinfo)) {
4457 /* find a more suitable one then this */
4458 ctl = TAILQ_NEXT(control, next);
4459 while (ctl) {
4460 if ((ctl->stcb != control->stcb) && (ctl->length)) {
4461 /* found one */
4462 control = ctl;
4463 goto found_one;
4464 }
4465 ctl = TAILQ_NEXT(ctl, next);
4466 }
4467 }
4468 /*
4469 * if we reach here, not suitable replacement is available
4470 * <or> fragment interleave is NOT on. So stuff the sb_cc
4471 * into the our held count, and its time to sleep again.
4472 */
4473 held_length = so->so_rcv.sb_cc;
4474 control->held_length = so->so_rcv.sb_cc;
4475 goto restart;
4476 }
4477 /* Clear the held length since there is something to read */
4478 control->held_length = 0;
4479 if (hold_rlock) {
4480 SCTP_INP_READ_UNLOCK(inp);
4481 hold_rlock = 0;
4482 }
4483found_one:
4484 /*
4485 * If we reach here, control has a some data for us to read off.
4486 * Note that stcb COULD be NULL.
4487 */
4488 if (hold_sblock) {
4489 SOCKBUF_UNLOCK(&so->so_rcv);
4490 hold_sblock = 0;
4491 }
4492 stcb = control->stcb;
4493 if (stcb) {
4494 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
4495 (control->do_not_ref_stcb == 0)) {
4496 if (freecnt_applied == 0)
4497 stcb = NULL;
4498 } else if (control->do_not_ref_stcb == 0) {
4499 /* you can't free it on me please */
4500 /*
4501 * The lock on the socket buffer protects us so the
4502 * free code will stop. But since we used the
4503 * socketbuf lock and the sender uses the tcb_lock
4504 * to increment, we need to use the atomic add to
4505 * the refcnt
4506 */
4507 atomic_add_int(&stcb->asoc.refcnt, 1);
4508 freecnt_applied = 1;
4509 /*
4510 * Setup to remember how much we have not yet told
4511 * the peer our rwnd has opened up. Note we grab the
4512 * value from the tcb from last time. Note too that
4513 * sack sending clears this when a sack is sent..
4514 * which is fine. Once we hit the rwnd_req, we then
4515 * will go to the sctp_user_rcvd() that will not
4516 * lock until it KNOWs it MUST send a WUP-SACK.
4517 *
4518 */
4519 freed_so_far = stcb->freed_by_sorcv_sincelast;
4520 stcb->freed_by_sorcv_sincelast = 0;
4521 }
4522 }
4523 /* First lets get off the sinfo and sockaddr info */
4524 if ((sinfo) && filling_sinfo) {
4525 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
4526 nxt = TAILQ_NEXT(control, next);
4527 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
4528 struct sctp_extrcvinfo *s_extra;
4529
4530 s_extra = (struct sctp_extrcvinfo *)sinfo;
4531 if (nxt) {
4532 s_extra->next_flags = SCTP_NEXT_MSG_AVAIL;
4533 if (nxt->sinfo_flags & SCTP_UNORDERED) {
4534 s_extra->next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
4535 }
4536 if (nxt->spec_flags & M_NOTIFICATION) {
4537 s_extra->next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
4538 }
4539 s_extra->next_asocid = nxt->sinfo_assoc_id;
4540 s_extra->next_length = nxt->length;
4541 s_extra->next_ppid = nxt->sinfo_ppid;
4542 s_extra->next_stream = nxt->sinfo_stream;
4543 if (nxt->tail_mbuf != NULL) {
4544 if (nxt->end_added) {
4545 s_extra->next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
4546 }
4547 }
4548 } else {
4549 /*
4550 * we explicitly 0 this, since the memcpy
4551 * got some other things beyond the older
4552 * sinfo_ that is on the control's structure
4553 * :-D
4554 */
4555 s_extra->next_flags = SCTP_NO_NEXT_MSG;
4556 s_extra->next_asocid = 0;
4557 s_extra->next_length = 0;
4558 s_extra->next_ppid = 0;
4559 s_extra->next_stream = 0;
4560 }
4561 }
4562 /*
4563 * update off the real current cum-ack, if we have an stcb.
4564 */
4565 if (stcb)
4566 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
4567 /*
4568 * mask off the high bits, we keep the actual chunk bits in
4569 * there.
4570 */
4571 sinfo->sinfo_flags &= 0x00ff;
4572 }
4573 if (fromlen && from) {
4574 struct sockaddr *to;
4575
4458 block_allowed = 0;
4459 }
4460 /* setup the endpoint */
4461 inp = (struct sctp_inpcb *)so->so_pcb;
4462 if (inp == NULL) {
4463 return (EFAULT);
4464 }
4465 rwnd_req = (so->so_rcv.sb_hiwat >> SCTP_RWND_HIWAT_SHIFT);
4466 /* Must be at least a MTU's worth */
4467 if (rwnd_req < SCTP_MIN_RWND)
4468 rwnd_req = SCTP_MIN_RWND;
4469 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
4470#ifdef SCTP_RECV_RWND_LOGGING
4471 sctp_misc_ints(SCTP_SORECV_ENTER,
4472 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
4473#endif
4474 SOCKBUF_LOCK(&so->so_rcv);
4475 hold_sblock = 1;
4476#ifdef SCTP_RECV_RWND_LOGGING
4477 sctp_misc_ints(SCTP_SORECV_ENTERPL,
4478 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
4479#endif
4480
4481
4482 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
4483 if (error) {
4484 goto release_unlocked;
4485 }
4486restart:
4487 if (hold_sblock == 0) {
4488 SOCKBUF_LOCK(&so->so_rcv);
4489 hold_sblock = 1;
4490 }
4491 sbunlock(&so->so_rcv);
4492
4493restart_nosblocks:
4494 if (hold_sblock == 0) {
4495 SOCKBUF_LOCK(&so->so_rcv);
4496 hold_sblock = 1;
4497 }
4498 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4499 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4500 goto out;
4501 }
4502 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
4503 if (so->so_error) {
4504 error = so->so_error;
4505 if ((in_flags & MSG_PEEK) == 0)
4506 so->so_error = 0;
4507 } else {
4508 error = ENOTCONN;
4509 }
4510 goto out;
4511 }
4512 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
4513 /* we need to wait for data */
4514#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4515 sctp_misc_ints(SCTP_SORECV_BLOCKSA,
4516 0, 0, so->so_rcv.sb_cc, uio->uio_resid);
4517#endif
4518 if ((so->so_rcv.sb_cc == 0) &&
4519 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
4520 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
4521 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
4522 /*
4523 * For active open side clear flags for
4524 * re-use passive open is blocked by
4525 * connect.
4526 */
4527 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
4528 /*
4529 * You were aborted, passive side
4530 * always hits here
4531 */
4532 error = ECONNRESET;
4533 /*
4534 * You get this once if you are
4535 * active open side
4536 */
4537 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4538 /*
4539 * Remove flag if on the
4540 * active open side
4541 */
4542 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
4543 }
4544 }
4545 so->so_state &= ~(SS_ISCONNECTING |
4546 SS_ISDISCONNECTING |
4547 SS_ISCONFIRMING |
4548 SS_ISCONNECTED);
4549 if (error == 0) {
4550 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
4551 error = ENOTCONN;
4552 } else {
4553 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
4554 }
4555 }
4556 goto out;
4557 }
4558 }
4559 error = sbwait(&so->so_rcv);
4560 if (error) {
4561 goto out;
4562 }
4563 held_length = 0;
4564 goto restart_nosblocks;
4565 } else if (so->so_rcv.sb_cc == 0) {
4566 if (so->so_error) {
4567 error = so->so_error;
4568 if ((in_flags & MSG_PEEK) == 0)
4569 so->so_error = 0;
4570 } else {
4571 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
4572 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4573 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
4574 /*
4575 * For active open side clear flags
4576 * for re-use passive open is
4577 * blocked by connect.
4578 */
4579 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
4580 /*
4581 * You were aborted, passive
4582 * side always hits here
4583 */
4584 error = ECONNRESET;
4585 /*
4586 * You get this once if you
4587 * are active open side
4588 */
4589 if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4590 /*
4591 * Remove flag if on
4592 * the active open
4593 * side
4594 */
4595 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
4596 }
4597 }
4598 so->so_state &= ~(SS_ISCONNECTING |
4599 SS_ISDISCONNECTING |
4600 SS_ISCONFIRMING |
4601 SS_ISCONNECTED);
4602 if (error == 0) {
4603 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
4604 error = ENOTCONN;
4605 } else {
4606 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
4607 }
4608 }
4609 goto out;
4610 }
4611 }
4612 error = EWOULDBLOCK;
4613 }
4614 goto out;
4615 }
4616 error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
4617 /* we possibly have data we can read */
4618 control = TAILQ_FIRST(&inp->read_queue);
4619 if (control == NULL) {
4620 /*
4621 * This could be happening since the appender did the
4622 * increment but as not yet did the tailq insert onto the
4623 * read_queue
4624 */
4625 if (hold_rlock == 0) {
4626 SCTP_INP_READ_LOCK(inp);
4627 hold_rlock = 1;
4628 }
4629 control = TAILQ_FIRST(&inp->read_queue);
4630 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
4631#ifdef INVARIANTS
4632 panic("Huh, its non zero and nothing on control?");
4633#endif
4634 so->so_rcv.sb_cc = 0;
4635 }
4636 SCTP_INP_READ_UNLOCK(inp);
4637 hold_rlock = 0;
4638 goto restart;
4639 }
4640 if ((control->length == 0) &&
4641 (control->do_not_ref_stcb)) {
4642 /*
4643 * Clean up code for freeing assoc that left behind a
4644 * pdapi.. maybe a peer in EEOR that just closed after
4645 * sending and never indicated a EOR.
4646 */
4647 if (hold_rlock == 0) {
4648 hold_rlock = 1;
4649 SCTP_INP_READ_LOCK(inp);
4650 }
4651 control->held_length = 0;
4652 if (control->data) {
4653 /* Hmm there is data here .. fix */
4654 struct mbuf *m;
4655 int cnt = 0;
4656
4657 m = control->data;
4658 while (m) {
4659 cnt += SCTP_BUF_LEN(m);
4660 if (SCTP_BUF_NEXT(m) == NULL) {
4661 control->tail_mbuf = m;
4662 control->end_added = 1;
4663 }
4664 m = SCTP_BUF_NEXT(m);
4665 }
4666 control->length = cnt;
4667 } else {
4668 /* remove it */
4669 TAILQ_REMOVE(&inp->read_queue, control, next);
4670 /* Add back any hiddend data */
4671 sctp_free_remote_addr(control->whoFrom);
4672 sctp_free_a_readq(stcb, control);
4673 }
4674 if (hold_rlock) {
4675 hold_rlock = 0;
4676 SCTP_INP_READ_UNLOCK(inp);
4677 }
4678 goto restart;
4679 }
4680 if (control->length == 0) {
4681 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
4682 (filling_sinfo)) {
4683 /* find a more suitable one then this */
4684 ctl = TAILQ_NEXT(control, next);
4685 while (ctl) {
4686 if ((ctl->stcb != control->stcb) && (ctl->length)) {
4687 /* found one */
4688 control = ctl;
4689 goto found_one;
4690 }
4691 ctl = TAILQ_NEXT(ctl, next);
4692 }
4693 }
4694 /*
4695 * if we reach here, not suitable replacement is available
4696 * <or> fragment interleave is NOT on. So stuff the sb_cc
4697 * into the our held count, and its time to sleep again.
4698 */
4699 held_length = so->so_rcv.sb_cc;
4700 control->held_length = so->so_rcv.sb_cc;
4701 goto restart;
4702 }
4703 /* Clear the held length since there is something to read */
4704 control->held_length = 0;
4705 if (hold_rlock) {
4706 SCTP_INP_READ_UNLOCK(inp);
4707 hold_rlock = 0;
4708 }
4709found_one:
4710 /*
4711 * If we reach here, control has a some data for us to read off.
4712 * Note that stcb COULD be NULL.
4713 */
4714 if (hold_sblock) {
4715 SOCKBUF_UNLOCK(&so->so_rcv);
4716 hold_sblock = 0;
4717 }
4718 stcb = control->stcb;
4719 if (stcb) {
4720 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
4721 (control->do_not_ref_stcb == 0)) {
4722 if (freecnt_applied == 0)
4723 stcb = NULL;
4724 } else if (control->do_not_ref_stcb == 0) {
4725 /* you can't free it on me please */
4726 /*
4727 * The lock on the socket buffer protects us so the
4728 * free code will stop. But since we used the
4729 * socketbuf lock and the sender uses the tcb_lock
4730 * to increment, we need to use the atomic add to
4731 * the refcnt
4732 */
4733 atomic_add_int(&stcb->asoc.refcnt, 1);
4734 freecnt_applied = 1;
4735 /*
4736 * Setup to remember how much we have not yet told
4737 * the peer our rwnd has opened up. Note we grab the
4738 * value from the tcb from last time. Note too that
4739 * sack sending clears this when a sack is sent..
4740 * which is fine. Once we hit the rwnd_req, we then
4741 * will go to the sctp_user_rcvd() that will not
4742 * lock until it KNOWs it MUST send a WUP-SACK.
4743 *
4744 */
4745 freed_so_far = stcb->freed_by_sorcv_sincelast;
4746 stcb->freed_by_sorcv_sincelast = 0;
4747 }
4748 }
4749 /* First lets get off the sinfo and sockaddr info */
4750 if ((sinfo) && filling_sinfo) {
4751 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
4752 nxt = TAILQ_NEXT(control, next);
4753 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
4754 struct sctp_extrcvinfo *s_extra;
4755
4756 s_extra = (struct sctp_extrcvinfo *)sinfo;
4757 if (nxt) {
4758 s_extra->next_flags = SCTP_NEXT_MSG_AVAIL;
4759 if (nxt->sinfo_flags & SCTP_UNORDERED) {
4760 s_extra->next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
4761 }
4762 if (nxt->spec_flags & M_NOTIFICATION) {
4763 s_extra->next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
4764 }
4765 s_extra->next_asocid = nxt->sinfo_assoc_id;
4766 s_extra->next_length = nxt->length;
4767 s_extra->next_ppid = nxt->sinfo_ppid;
4768 s_extra->next_stream = nxt->sinfo_stream;
4769 if (nxt->tail_mbuf != NULL) {
4770 if (nxt->end_added) {
4771 s_extra->next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
4772 }
4773 }
4774 } else {
4775 /*
4776 * we explicitly 0 this, since the memcpy
4777 * got some other things beyond the older
4778 * sinfo_ that is on the control's structure
4779 * :-D
4780 */
4781 s_extra->next_flags = SCTP_NO_NEXT_MSG;
4782 s_extra->next_asocid = 0;
4783 s_extra->next_length = 0;
4784 s_extra->next_ppid = 0;
4785 s_extra->next_stream = 0;
4786 }
4787 }
4788 /*
4789 * update off the real current cum-ack, if we have an stcb.
4790 */
4791 if (stcb)
4792 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
4793 /*
4794 * mask off the high bits, we keep the actual chunk bits in
4795 * there.
4796 */
4797 sinfo->sinfo_flags &= 0x00ff;
4798 }
4799 if (fromlen && from) {
4800 struct sockaddr *to;
4801
4576#ifdef AF_INET
4802#ifdef INET
4577 cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin.sin_len);
4578 memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
4579 ((struct sockaddr_in *)from)->sin_port = control->port_from;
4580#else
4581 /* No AF_INET use AF_INET6 */
4582 cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin6.sin6_len);
4583 memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
4584 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
4585#endif
4586
4587 to = from;
4803 cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin.sin_len);
4804 memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
4805 ((struct sockaddr_in *)from)->sin_port = control->port_from;
4806#else
4807 /* No AF_INET use AF_INET6 */
4808 cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin6.sin6_len);
4809 memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
4810 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
4811#endif
4812
4813 to = from;
4588#if defined(AF_INET) && defined(AF_INET6)
4814#if defined(INET) && defined(INET6)
4589 if ((inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
4590 (to->sa_family == AF_INET) &&
4591 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
4592 struct sockaddr_in *sin;
4593 struct sockaddr_in6 sin6;
4594
4595 sin = (struct sockaddr_in *)to;
4596 bzero(&sin6, sizeof(sin6));
4597 sin6.sin6_family = AF_INET6;
4598 sin6.sin6_len = sizeof(struct sockaddr_in6);
4599 sin6.sin6_addr.s6_addr16[2] = 0xffff;
4600 bcopy(&sin->sin_addr,
4601 &sin6.sin6_addr.s6_addr16[3],
4602 sizeof(sin6.sin6_addr.s6_addr16[3]));
4603 sin6.sin6_port = sin->sin_port;
4604 memcpy(from, (caddr_t)&sin6, sizeof(sin6));
4605 }
4606#endif
4815 if ((inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
4816 (to->sa_family == AF_INET) &&
4817 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
4818 struct sockaddr_in *sin;
4819 struct sockaddr_in6 sin6;
4820
4821 sin = (struct sockaddr_in *)to;
4822 bzero(&sin6, sizeof(sin6));
4823 sin6.sin6_family = AF_INET6;
4824 sin6.sin6_len = sizeof(struct sockaddr_in6);
4825 sin6.sin6_addr.s6_addr16[2] = 0xffff;
4826 bcopy(&sin->sin_addr,
4827 &sin6.sin6_addr.s6_addr16[3],
4828 sizeof(sin6.sin6_addr.s6_addr16[3]));
4829 sin6.sin6_port = sin->sin_port;
4830 memcpy(from, (caddr_t)&sin6, sizeof(sin6));
4831 }
4832#endif
4607#if defined(AF_INET6)
4833#if defined(INET6)
4608 {
4609 struct sockaddr_in6 lsa6, *to6;
4610
4611 to6 = (struct sockaddr_in6 *)to;
4612 sctp_recover_scope_mac(to6, (&lsa6));
4613 }
4614#endif
4615 }
4616 /* now copy out what data we can */
4617 if (mp == NULL) {
4618 /* copy out each mbuf in the chain up to length */
4619get_more_data:
4620 m = control->data;
4621 while (m) {
4622 /* Move out all we can */
4623 cp_len = (int)uio->uio_resid;
4624 my_len = (int)SCTP_BUF_LEN(m);
4625 if (cp_len > my_len) {
4626 /* not enough in this buf */
4627 cp_len = my_len;
4628 }
4629 if (hold_rlock) {
4630 SCTP_INP_READ_UNLOCK(inp);
4631 hold_rlock = 0;
4632 }
4633 if (cp_len > 0)
4634 error = uiomove(mtod(m, char *), cp_len, uio);
4635#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4636 sctp_misc_ints(SCTP_SORCV_DOESCPY,
4637 so->so_rcv.sb_cc,
4638 cp_len,
4639 0,
4640 0);
4641#endif
4642 /* re-read */
4643 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4644 goto release;
4645 }
4646 if (stcb &&
4647 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4648 no_rcv_needed = 1;
4649 }
4650 if (error) {
4651 /* error we are out of here */
4652 goto release;
4653 }
4654 if ((SCTP_BUF_NEXT(m) == NULL) &&
4655 (cp_len >= SCTP_BUF_LEN(m)) &&
4656 ((control->end_added == 0) ||
4657 (control->end_added && (TAILQ_NEXT(control, next) == NULL)))
4658 ) {
4659#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4660 sctp_misc_ints(SCTP_SORCV_DOESLCK,
4661 so->so_rcv.sb_cc,
4662 cp_len,
4663 SCTP_BUF_LEN(m),
4664 control->length);
4665#endif
4666 SCTP_INP_READ_LOCK(inp);
4667 hold_rlock = 1;
4668 }
4669 if (cp_len == SCTP_BUF_LEN(m)) {
4670#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4671 sctp_misc_ints(SCTP_SORCV_DOESADJ,
4672 so->so_rcv.sb_cc,
4673 control->length,
4674 cp_len,
4675 0);
4676#endif
4677 if ((SCTP_BUF_NEXT(m) == NULL) &&
4678 (control->end_added)) {
4679 out_flags |= MSG_EOR;
4680 }
4681 if (control->spec_flags & M_NOTIFICATION) {
4682 out_flags |= MSG_NOTIFICATION;
4683 }
4684 /* we ate up the mbuf */
4685 if (in_flags & MSG_PEEK) {
4686 /* just looking */
4687 m = SCTP_BUF_NEXT(m);
4688 copied_so_far += cp_len;
4689 } else {
4690 /* dispose of the mbuf */
4691#ifdef SCTP_SB_LOGGING
4692 sctp_sblog(&so->so_rcv,
4693 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4694#endif
4695 sctp_sbfree(control, stcb, &so->so_rcv, m);
4696#ifdef SCTP_SB_LOGGING
4697 sctp_sblog(&so->so_rcv,
4698 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4699#endif
4700 embuf = m;
4701 copied_so_far += cp_len;
4702 freed_so_far += cp_len;
4703 alen = atomic_fetchadd_int(&control->length, -(cp_len));
4704 if (alen < cp_len) {
4705 panic("Control length goes negative?");
4706 }
4707#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4708 sctp_misc_ints(SCTP_SORCV_PASSBF,
4709 so->so_rcv.sb_cc,
4710 control->length,
4711 0,
4712 0);
4713#endif
4714 control->data = sctp_m_free(m);
4715 m = control->data;
4716 /*
4717 * been through it all, must hold sb
4718 * lock ok to null tail
4719 */
4720 if (control->data == NULL) {
4721#ifdef INVARIANTS
4722 if ((control->end_added == 0) ||
4723 (TAILQ_NEXT(control, next) == NULL)) {
4724 /*
4725 * If the end is not
4726 * added, OR the
4727 * next is NOT null
4728 * we MUST have the
4729 * lock.
4730 */
4731 if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
4732 panic("Hmm we don't own the lock?");
4733 }
4734 }
4735#endif
4736 control->tail_mbuf = NULL;
4737#ifdef INVARIANTS
4738 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
4739 panic("end_added, nothing left and no MSG_EOR");
4740 }
4741#endif
4742 }
4743#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4744 sctp_misc_ints(SCTP_SORCV_ADJD,
4745 so->so_rcv.sb_cc,
4746 control->length,
4747 0,
4748 0);
4749#endif
4750 }
4751 } else {
4752 /* Do we need to trim the mbuf? */
4753 if (control->spec_flags & M_NOTIFICATION) {
4754 out_flags |= MSG_NOTIFICATION;
4755 }
4756 if ((in_flags & MSG_PEEK) == 0) {
4757 SCTP_BUF_RESV_UF(m, cp_len);
4758 SCTP_BUF_LEN(m) -= cp_len;
4759#ifdef SCTP_SB_LOGGING
4760 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
4761#endif
4762 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
4763 if (stcb) {
4764 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
4765 }
4766 copied_so_far += cp_len;
4767 embuf = m;
4768 freed_so_far += cp_len;
4769#ifdef SCTP_SB_LOGGING
4770 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
4771 SCTP_LOG_SBRESULT, 0);
4772#endif
4773 alen = atomic_fetchadd_int(&control->length, -(cp_len));
4774 if (alen < cp_len) {
4775 panic("Control length goes negative2?");
4776 }
4777 } else {
4778 copied_so_far += cp_len;
4779 }
4780 }
4781 if ((out_flags & MSG_EOR) ||
4782 (uio->uio_resid == 0)
4783 ) {
4784 break;
4785 }
4786 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
4787 (control->do_not_ref_stcb == 0) &&
4788 (freed_so_far >= rwnd_req)) {
4789 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
4790 }
4791#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4792 sctp_misc_ints(SCTP_SORCV_BOTWHILE,
4793 so->so_rcv.sb_cc,
4794 control->length,
4795 0,
4796 0);
4797#endif
4798
4799 } /* end while(m) */
4800 /*
4801 * At this point we have looked at it all and we either have
4802 * a MSG_EOR/or read all the user wants... <OR>
4803 * control->length == 0.
4804 */
4805 if ((out_flags & MSG_EOR) &&
4806 ((in_flags & MSG_PEEK) == 0)) {
4807 /* we are done with this control */
4808 if (control->length == 0) {
4809 if (control->data) {
4810#ifdef INVARIANTS
4811 panic("control->data not null at read eor?");
4812#else
4813 printf("Strange, data left in the control buffer .. invarients would panic?\n");
4814 sctp_m_freem(control->data);
4815 control->data = NULL;
4816#endif
4817 }
4818 done_with_control:
4819#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4820 sctp_misc_ints(SCTP_SORCV_FREECTL,
4821 so->so_rcv.sb_cc,
4822 0,
4823 0,
4824 0);
4825#endif
4826 if (TAILQ_NEXT(control, next) == NULL) {
4827 /*
4828 * If we don't have a next we need a
4829 * lock, if there is a next interupt
4830 * is filling ahead of us and we
4831 * don't need a lock to remove this
4832 * guy (which is the head of the
4833 * queue).
4834 */
4835 if (hold_rlock == 0) {
4836 SCTP_INP_READ_LOCK(inp);
4837 hold_rlock = 1;
4838 }
4839 }
4840 TAILQ_REMOVE(&inp->read_queue, control, next);
4841 /* Add back any hiddend data */
4842 if (control->held_length) {
4843 held_length = 0;
4844 control->held_length = 0;
4845 wakeup_read_socket = 1;
4846 }
4847 no_rcv_needed = control->do_not_ref_stcb;
4848 sctp_free_remote_addr(control->whoFrom);
4849 control->data = NULL;
4850 sctp_free_a_readq(stcb, control);
4851 control = NULL;
4852 if ((freed_so_far >= rwnd_req) && (no_rcv_needed == 0))
4853 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
4854
4855 } else {
4856 /*
4857 * The user did not read all of this
4858 * message, turn off the returned MSG_EOR
4859 * since we are leaving more behind on the
4860 * control to read.
4861 */
4862#ifdef INVARIANTS
4863 if (control->end_added && (control->data == NULL) &&
4864 (control->tail_mbuf == NULL)) {
4865 panic("Gak, control->length is corrupt?");
4866 }
4867#endif
4868 no_rcv_needed = control->do_not_ref_stcb;
4869 out_flags &= ~MSG_EOR;
4870 }
4871 }
4872 if (out_flags & MSG_EOR) {
4873 goto release;
4874 }
4875 if ((uio->uio_resid == 0) ||
4876 ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
4877 ) {
4878 goto release;
4879 }
4880 /*
4881 * If I hit here the receiver wants more and this message is
4882 * NOT done (pd-api). So two questions. Can we block? if not
4883 * we are done. Did the user NOT set MSG_WAITALL?
4884 */
4885 if (block_allowed == 0) {
4886 goto release;
4887 }
4888 /*
4889 * We need to wait for more data a few things: - We don't
4890 * sbunlock() so we don't get someone else reading. - We
4891 * must be sure to account for the case where what is added
4892 * is NOT to our control when we wakeup.
4893 */
4894
4895 /*
4896 * Do we need to tell the transport a rwnd update might be
4897 * needed before we go to sleep?
4898 */
4899 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
4900 ((freed_so_far >= rwnd_req) &&
4901 (control->do_not_ref_stcb == 0) &&
4902 (no_rcv_needed == 0))) {
4903 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
4904 }
4905wait_some_more:
4906 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
4907 goto release;
4908 }
4909 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
4910 goto release;
4911
4912 if (hold_rlock == 1) {
4913 SCTP_INP_READ_UNLOCK(inp);
4914 hold_rlock = 0;
4915 }
4916 if (hold_sblock == 0) {
4917 SOCKBUF_LOCK(&so->so_rcv);
4918 hold_sblock = 1;
4919 }
4920#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4921 if (stcb)
4922 sctp_misc_ints(SCTP_SORECV_BLOCKSB,
4923 freed_so_far,
4924 stcb->asoc.my_rwnd,
4925 so->so_rcv.sb_cc,
4926 uio->uio_resid);
4927 else
4928 sctp_misc_ints(SCTP_SORECV_BLOCKSB,
4929 freed_so_far,
4930 0,
4931 so->so_rcv.sb_cc,
4932 uio->uio_resid);
4933#endif
4934 if (so->so_rcv.sb_cc <= control->held_length) {
4935 error = sbwait(&so->so_rcv);
4936 if (error) {
4937 goto release;
4938 }
4939 control->held_length = 0;
4940 }
4941 if (hold_sblock) {
4942 SOCKBUF_UNLOCK(&so->so_rcv);
4943 hold_sblock = 0;
4944 }
4945 if (control->length == 0) {
4946 /* still nothing here */
4947 if (control->end_added == 1) {
4948 /* he aborted, or is done i.e.did a shutdown */
4949 out_flags |= MSG_EOR;
4950 if (control->pdapi_aborted)
4951 out_flags |= MSG_TRUNC;
4952 goto done_with_control;
4953 }
4954 if (so->so_rcv.sb_cc > held_length) {
4955 control->held_length = so->so_rcv.sb_cc;
4956 held_length = 0;
4957 }
4958 goto wait_some_more;
4959 } else if (control->data == NULL) {
4960 /*
4961 * we must re-sync since data is probably being
4962 * added
4963 */
4964 SCTP_INP_READ_LOCK(inp);
4965 if ((control->length > 0) && (control->data == NULL)) {
4966 /*
4967 * big trouble.. we have the lock and its
4968 * corrupt?
4969 */
4970 panic("Impossible data==NULL length !=0");
4971 }
4972 SCTP_INP_READ_UNLOCK(inp);
4973 /* We will fall around to get more data */
4974 }
4975 goto get_more_data;
4976 } else {
4977 /* copy out the mbuf chain */
4978get_more_data2:
4979 /*
4980 * Do we have a uio, I doubt it if so we grab the size from
4981 * it, if not you get it all
4982 */
4983 if (uio)
4984 cp_len = uio->uio_resid;
4985 else
4986 cp_len = control->length;
4987
4988 if ((uint32_t) cp_len >= control->length) {
4989 /* easy way */
4990 if ((control->end_added == 0) ||
4991 (TAILQ_NEXT(control, next) == NULL)) {
4992 /* Need to get rlock */
4993 if (hold_rlock == 0) {
4994 SCTP_INP_READ_LOCK(inp);
4995 hold_rlock = 1;
4996 }
4997 }
4998 if (control->end_added) {
4999 out_flags |= MSG_EOR;
5000 }
5001 if (control->spec_flags & M_NOTIFICATION) {
5002 out_flags |= MSG_NOTIFICATION;
5003 }
5004 if (uio)
5005 uio->uio_resid -= control->length;
5006 *mp = control->data;
5007 m = control->data;
5008 while (m) {
5009#ifdef SCTP_SB_LOGGING
5010 sctp_sblog(&so->so_rcv,
5011 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5012#endif
5013 sctp_sbfree(control, stcb, &so->so_rcv, m);
5014 freed_so_far += SCTP_BUF_LEN(m);
5015#ifdef SCTP_SB_LOGGING
5016 sctp_sblog(&so->so_rcv,
5017 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5018#endif
5019 m = SCTP_BUF_NEXT(m);
5020 }
5021 control->data = control->tail_mbuf = NULL;
5022 control->length = 0;
5023 if (out_flags & MSG_EOR) {
5024 /* Done with this control */
5025 goto done_with_control;
5026 }
5027 /* still more to do with this conntrol */
5028 /* do we really support msg_waitall here? */
5029 if ((block_allowed == 0) ||
5030 ((in_flags & MSG_WAITALL) == 0)) {
5031 goto release;
5032 }
5033 wait_some_more2:
5034 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
5035 goto release;
5036 if (hold_rlock == 1) {
5037 SCTP_INP_READ_UNLOCK(inp);
5038 hold_rlock = 0;
5039 }
5040 if (hold_sblock == 0) {
5041 SOCKBUF_LOCK(&so->so_rcv);
5042 hold_sblock = 1;
5043 }
5044 if (so->so_rcv.sb_cc <= control->held_length) {
5045 error = sbwait(&so->so_rcv);
5046 if (error) {
5047 goto release;
5048 }
5049 }
5050 if (hold_sblock) {
5051 SOCKBUF_UNLOCK(&so->so_rcv);
5052 hold_sblock = 0;
5053 }
5054 if (control->length == 0) {
5055 /* still nothing here */
5056 if (control->end_added == 1) {
5057 /*
5058 * he aborted, or is done i.e.
5059 * shutdown
5060 */
5061 out_flags |= MSG_EOR;
5062 if (control->pdapi_aborted)
5063 out_flags |= MSG_TRUNC;
5064 goto done_with_control;
5065 }
5066 if (so->so_rcv.sb_cc > held_length) {
5067 control->held_length = so->so_rcv.sb_cc;
5068 /*
5069 * We don't use held_length while
5070 * getting a message
5071 */
5072 held_length = 0;
5073 }
5074 goto wait_some_more2;
5075 }
5076 goto get_more_data2;
5077 } else {
5078 /* hard way mbuf by mbuf */
5079 m = control->data;
5080 if (control->end_added == 0) {
5081 /* need the rlock */
5082 if (hold_rlock == 0) {
5083 SCTP_INP_READ_LOCK(inp);
5084 hold_rlock = 1;
5085 }
5086 }
5087 if (control->spec_flags & M_NOTIFICATION) {
5088 out_flags |= MSG_NOTIFICATION;
5089 }
5090 while ((m) && (cp_len > 0)) {
5091 if (cp_len >= SCTP_BUF_LEN(m)) {
5092 *mp = m;
5093 atomic_subtract_int(&control->length, SCTP_BUF_LEN(m));
5094 if (uio)
5095 uio->uio_resid -= SCTP_BUF_LEN(m);
5096 cp_len -= SCTP_BUF_LEN(m);
5097 control->data = SCTP_BUF_NEXT(m);
5098 SCTP_BUF_NEXT(m) = NULL;
5099#ifdef SCTP_SB_LOGGING
5100 sctp_sblog(&so->so_rcv,
5101 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5102#endif
5103 sctp_sbfree(control, stcb, &so->so_rcv, m);
5104 freed_so_far += SCTP_BUF_LEN(m);
5105#ifdef SCTP_SB_LOGGING
5106 sctp_sblog(&so->so_rcv,
5107 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5108#endif
5109 mp = &SCTP_BUF_NEXT(m);
5110 m = control->data;
5111 } else {
5112 /*
5113 * got all he wants and its part of
5114 * this mbuf only.
5115 */
5116 if (uio)
5117 uio->uio_resid -= SCTP_BUF_LEN(m);
5118 cp_len -= SCTP_BUF_LEN(m);
5119 if (hold_rlock) {
5120 SCTP_INP_READ_UNLOCK(inp);
5121 hold_rlock = 0;
5122 }
5123 if (hold_sblock) {
5124 SOCKBUF_UNLOCK(&so->so_rcv);
5125 hold_sblock = 0;
5126 }
5127 *mp = SCTP_M_COPYM(m, 0, cp_len,
5128 M_TRYWAIT
5129 );
5130#ifdef SCTP_LOCK_LOGGING
5131 sctp_log_lock(inp, stcb, SCTP_LOG_LOCK_SOCKBUF_R);
5132#endif
5133 if (hold_sblock == 0) {
5134 SOCKBUF_LOCK(&so->so_rcv);
5135 hold_sblock = 1;
5136 }
5137 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5138 goto release;
5139
5140 if (stcb &&
5141 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5142 no_rcv_needed = 1;
5143 }
5144 SCTP_BUF_RESV_UF(m, cp_len);
5145 SCTP_BUF_LEN(m) -= cp_len;
5146#ifdef SCTP_SB_LOGGING
5147 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5148#endif
5149 freed_so_far += cp_len;
5150 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5151 if (stcb) {
5152 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5153 if ((freed_so_far >= rwnd_req) &&
5154 (control->do_not_ref_stcb == 0) &&
5155 (no_rcv_needed == 0))
5156 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5157 }
5158#ifdef SCTP_SB_LOGGING
5159 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5160 SCTP_LOG_SBRESULT, 0);
5161#endif
5162 goto release;
5163 }
5164 }
5165 }
5166 }
5167release:
5168 if (hold_rlock == 1) {
5169 SCTP_INP_READ_UNLOCK(inp);
5170 hold_rlock = 0;
5171 }
5172 if (hold_sblock == 0) {
5173 SOCKBUF_LOCK(&so->so_rcv);
5174 hold_sblock = 1;
5175 }
5176 sbunlock(&so->so_rcv);
5177
5178release_unlocked:
5179 if (hold_sblock) {
5180 SOCKBUF_UNLOCK(&so->so_rcv);
5181 hold_sblock = 0;
5182 }
5183 if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5184 if ((freed_so_far >= rwnd_req) &&
5185 (control && (control->do_not_ref_stcb == 0)) &&
5186 (no_rcv_needed == 0))
5187 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5188 }
5189 if (msg_flags)
5190 *msg_flags |= out_flags;
5191out:
5192 if (hold_rlock == 1) {
5193 SCTP_INP_READ_UNLOCK(inp);
5194 hold_rlock = 0;
5195 }
5196 if (hold_sblock) {
5197 SOCKBUF_UNLOCK(&so->so_rcv);
5198 hold_sblock = 0;
5199 }
5200 if (freecnt_applied) {
5201 /*
5202 * The lock on the socket buffer protects us so the free
5203 * code will stop. But since we used the socketbuf lock and
5204 * the sender uses the tcb_lock to increment, we need to use
5205 * the atomic add to the refcnt.
5206 */
5207 if (stcb == NULL) {
5208 panic("stcb for refcnt has gone NULL?");
5209 }
5210 atomic_add_int(&stcb->asoc.refcnt, -1);
5211 freecnt_applied = 0;
5212 /* Save the value back for next time */
5213 stcb->freed_by_sorcv_sincelast = freed_so_far;
5214 }
5215#ifdef SCTP_RECV_RWND_LOGGING
5216 if (stcb) {
5217 sctp_misc_ints(SCTP_SORECV_DONE,
5218 freed_so_far,
5219 ((uio) ? (slen - uio->uio_resid) : slen),
5220 stcb->asoc.my_rwnd,
5221 so->so_rcv.sb_cc);
5222 } else {
5223 sctp_misc_ints(SCTP_SORECV_DONE,
5224 freed_so_far,
5225 ((uio) ? (slen - uio->uio_resid) : slen),
5226 0,
5227 so->so_rcv.sb_cc);
5228 }
5229#endif
5230 if (wakeup_read_socket) {
5231 sctp_sorwakeup(inp, so);
5232 }
5233 return (error);
5234}
5235
5236
5237#ifdef SCTP_MBUF_LOGGING
5238struct mbuf *
5239sctp_m_free(struct mbuf *m)
5240{
5241 if (SCTP_BUF_IS_EXTENDED(m)) {
5242 sctp_log_mb(m, SCTP_MBUF_IFREE);
5243 }
5244 return (m_free(m));
5245}
5246
5247void
5248sctp_m_freem(struct mbuf *mb)
5249{
5250 while (mb != NULL)
5251 mb = sctp_m_free(mb);
5252}
5253
5254#endif
5255
4834 {
4835 struct sockaddr_in6 lsa6, *to6;
4836
4837 to6 = (struct sockaddr_in6 *)to;
4838 sctp_recover_scope_mac(to6, (&lsa6));
4839 }
4840#endif
4841 }
4842 /* now copy out what data we can */
4843 if (mp == NULL) {
4844 /* copy out each mbuf in the chain up to length */
4845get_more_data:
4846 m = control->data;
4847 while (m) {
4848 /* Move out all we can */
4849 cp_len = (int)uio->uio_resid;
4850 my_len = (int)SCTP_BUF_LEN(m);
4851 if (cp_len > my_len) {
4852 /* not enough in this buf */
4853 cp_len = my_len;
4854 }
4855 if (hold_rlock) {
4856 SCTP_INP_READ_UNLOCK(inp);
4857 hold_rlock = 0;
4858 }
4859 if (cp_len > 0)
4860 error = uiomove(mtod(m, char *), cp_len, uio);
4861#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4862 sctp_misc_ints(SCTP_SORCV_DOESCPY,
4863 so->so_rcv.sb_cc,
4864 cp_len,
4865 0,
4866 0);
4867#endif
4868 /* re-read */
4869 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4870 goto release;
4871 }
4872 if (stcb &&
4873 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4874 no_rcv_needed = 1;
4875 }
4876 if (error) {
4877 /* error we are out of here */
4878 goto release;
4879 }
4880 if ((SCTP_BUF_NEXT(m) == NULL) &&
4881 (cp_len >= SCTP_BUF_LEN(m)) &&
4882 ((control->end_added == 0) ||
4883 (control->end_added && (TAILQ_NEXT(control, next) == NULL)))
4884 ) {
4885#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4886 sctp_misc_ints(SCTP_SORCV_DOESLCK,
4887 so->so_rcv.sb_cc,
4888 cp_len,
4889 SCTP_BUF_LEN(m),
4890 control->length);
4891#endif
4892 SCTP_INP_READ_LOCK(inp);
4893 hold_rlock = 1;
4894 }
4895 if (cp_len == SCTP_BUF_LEN(m)) {
4896#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4897 sctp_misc_ints(SCTP_SORCV_DOESADJ,
4898 so->so_rcv.sb_cc,
4899 control->length,
4900 cp_len,
4901 0);
4902#endif
4903 if ((SCTP_BUF_NEXT(m) == NULL) &&
4904 (control->end_added)) {
4905 out_flags |= MSG_EOR;
4906 }
4907 if (control->spec_flags & M_NOTIFICATION) {
4908 out_flags |= MSG_NOTIFICATION;
4909 }
4910 /* we ate up the mbuf */
4911 if (in_flags & MSG_PEEK) {
4912 /* just looking */
4913 m = SCTP_BUF_NEXT(m);
4914 copied_so_far += cp_len;
4915 } else {
4916 /* dispose of the mbuf */
4917#ifdef SCTP_SB_LOGGING
4918 sctp_sblog(&so->so_rcv,
4919 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4920#endif
4921 sctp_sbfree(control, stcb, &so->so_rcv, m);
4922#ifdef SCTP_SB_LOGGING
4923 sctp_sblog(&so->so_rcv,
4924 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4925#endif
4926 embuf = m;
4927 copied_so_far += cp_len;
4928 freed_so_far += cp_len;
4929 alen = atomic_fetchadd_int(&control->length, -(cp_len));
4930 if (alen < cp_len) {
4931 panic("Control length goes negative?");
4932 }
4933#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4934 sctp_misc_ints(SCTP_SORCV_PASSBF,
4935 so->so_rcv.sb_cc,
4936 control->length,
4937 0,
4938 0);
4939#endif
4940 control->data = sctp_m_free(m);
4941 m = control->data;
4942 /*
4943 * been through it all, must hold sb
4944 * lock ok to null tail
4945 */
4946 if (control->data == NULL) {
4947#ifdef INVARIANTS
4948 if ((control->end_added == 0) ||
4949 (TAILQ_NEXT(control, next) == NULL)) {
4950 /*
4951 * If the end is not
4952 * added, OR the
4953 * next is NOT null
4954 * we MUST have the
4955 * lock.
4956 */
4957 if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
4958 panic("Hmm we don't own the lock?");
4959 }
4960 }
4961#endif
4962 control->tail_mbuf = NULL;
4963#ifdef INVARIANTS
4964 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
4965 panic("end_added, nothing left and no MSG_EOR");
4966 }
4967#endif
4968 }
4969#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
4970 sctp_misc_ints(SCTP_SORCV_ADJD,
4971 so->so_rcv.sb_cc,
4972 control->length,
4973 0,
4974 0);
4975#endif
4976 }
4977 } else {
4978 /* Do we need to trim the mbuf? */
4979 if (control->spec_flags & M_NOTIFICATION) {
4980 out_flags |= MSG_NOTIFICATION;
4981 }
4982 if ((in_flags & MSG_PEEK) == 0) {
4983 SCTP_BUF_RESV_UF(m, cp_len);
4984 SCTP_BUF_LEN(m) -= cp_len;
4985#ifdef SCTP_SB_LOGGING
4986 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
4987#endif
4988 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
4989 if (stcb) {
4990 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
4991 }
4992 copied_so_far += cp_len;
4993 embuf = m;
4994 freed_so_far += cp_len;
4995#ifdef SCTP_SB_LOGGING
4996 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
4997 SCTP_LOG_SBRESULT, 0);
4998#endif
4999 alen = atomic_fetchadd_int(&control->length, -(cp_len));
5000 if (alen < cp_len) {
5001 panic("Control length goes negative2?");
5002 }
5003 } else {
5004 copied_so_far += cp_len;
5005 }
5006 }
5007 if ((out_flags & MSG_EOR) ||
5008 (uio->uio_resid == 0)
5009 ) {
5010 break;
5011 }
5012 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5013 (control->do_not_ref_stcb == 0) &&
5014 (freed_so_far >= rwnd_req)) {
5015 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5016 }
5017#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
5018 sctp_misc_ints(SCTP_SORCV_BOTWHILE,
5019 so->so_rcv.sb_cc,
5020 control->length,
5021 0,
5022 0);
5023#endif
5024
5025 } /* end while(m) */
5026 /*
5027 * At this point we have looked at it all and we either have
5028 * a MSG_EOR/or read all the user wants... <OR>
5029 * control->length == 0.
5030 */
5031 if ((out_flags & MSG_EOR) &&
5032 ((in_flags & MSG_PEEK) == 0)) {
5033 /* we are done with this control */
5034 if (control->length == 0) {
5035 if (control->data) {
5036#ifdef INVARIANTS
5037 panic("control->data not null at read eor?");
5038#else
5039 printf("Strange, data left in the control buffer .. invarients would panic?\n");
5040 sctp_m_freem(control->data);
5041 control->data = NULL;
5042#endif
5043 }
5044 done_with_control:
5045#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
5046 sctp_misc_ints(SCTP_SORCV_FREECTL,
5047 so->so_rcv.sb_cc,
5048 0,
5049 0,
5050 0);
5051#endif
5052 if (TAILQ_NEXT(control, next) == NULL) {
5053 /*
5054 * If we don't have a next we need a
5055 * lock, if there is a next interupt
5056 * is filling ahead of us and we
5057 * don't need a lock to remove this
5058 * guy (which is the head of the
5059 * queue).
5060 */
5061 if (hold_rlock == 0) {
5062 SCTP_INP_READ_LOCK(inp);
5063 hold_rlock = 1;
5064 }
5065 }
5066 TAILQ_REMOVE(&inp->read_queue, control, next);
5067 /* Add back any hiddend data */
5068 if (control->held_length) {
5069 held_length = 0;
5070 control->held_length = 0;
5071 wakeup_read_socket = 1;
5072 }
5073 no_rcv_needed = control->do_not_ref_stcb;
5074 sctp_free_remote_addr(control->whoFrom);
5075 control->data = NULL;
5076 sctp_free_a_readq(stcb, control);
5077 control = NULL;
5078 if ((freed_so_far >= rwnd_req) && (no_rcv_needed == 0))
5079 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5080
5081 } else {
5082 /*
5083 * The user did not read all of this
5084 * message, turn off the returned MSG_EOR
5085 * since we are leaving more behind on the
5086 * control to read.
5087 */
5088#ifdef INVARIANTS
5089 if (control->end_added && (control->data == NULL) &&
5090 (control->tail_mbuf == NULL)) {
5091 panic("Gak, control->length is corrupt?");
5092 }
5093#endif
5094 no_rcv_needed = control->do_not_ref_stcb;
5095 out_flags &= ~MSG_EOR;
5096 }
5097 }
5098 if (out_flags & MSG_EOR) {
5099 goto release;
5100 }
5101 if ((uio->uio_resid == 0) ||
5102 ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5103 ) {
5104 goto release;
5105 }
5106 /*
5107 * If I hit here the receiver wants more and this message is
5108 * NOT done (pd-api). So two questions. Can we block? if not
5109 * we are done. Did the user NOT set MSG_WAITALL?
5110 */
5111 if (block_allowed == 0) {
5112 goto release;
5113 }
5114 /*
5115 * We need to wait for more data a few things: - We don't
5116 * sbunlock() so we don't get someone else reading. - We
5117 * must be sure to account for the case where what is added
5118 * is NOT to our control when we wakeup.
5119 */
5120
5121 /*
5122 * Do we need to tell the transport a rwnd update might be
5123 * needed before we go to sleep?
5124 */
5125 if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5126 ((freed_so_far >= rwnd_req) &&
5127 (control->do_not_ref_stcb == 0) &&
5128 (no_rcv_needed == 0))) {
5129 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5130 }
5131wait_some_more:
5132 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5133 goto release;
5134 }
5135 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5136 goto release;
5137
5138 if (hold_rlock == 1) {
5139 SCTP_INP_READ_UNLOCK(inp);
5140 hold_rlock = 0;
5141 }
5142 if (hold_sblock == 0) {
5143 SOCKBUF_LOCK(&so->so_rcv);
5144 hold_sblock = 1;
5145 }
5146#ifdef SCTP_RECV_DETAIL_RWND_LOGGING
5147 if (stcb)
5148 sctp_misc_ints(SCTP_SORECV_BLOCKSB,
5149 freed_so_far,
5150 stcb->asoc.my_rwnd,
5151 so->so_rcv.sb_cc,
5152 uio->uio_resid);
5153 else
5154 sctp_misc_ints(SCTP_SORECV_BLOCKSB,
5155 freed_so_far,
5156 0,
5157 so->so_rcv.sb_cc,
5158 uio->uio_resid);
5159#endif
5160 if (so->so_rcv.sb_cc <= control->held_length) {
5161 error = sbwait(&so->so_rcv);
5162 if (error) {
5163 goto release;
5164 }
5165 control->held_length = 0;
5166 }
5167 if (hold_sblock) {
5168 SOCKBUF_UNLOCK(&so->so_rcv);
5169 hold_sblock = 0;
5170 }
5171 if (control->length == 0) {
5172 /* still nothing here */
5173 if (control->end_added == 1) {
5174 /* he aborted, or is done i.e.did a shutdown */
5175 out_flags |= MSG_EOR;
5176 if (control->pdapi_aborted)
5177 out_flags |= MSG_TRUNC;
5178 goto done_with_control;
5179 }
5180 if (so->so_rcv.sb_cc > held_length) {
5181 control->held_length = so->so_rcv.sb_cc;
5182 held_length = 0;
5183 }
5184 goto wait_some_more;
5185 } else if (control->data == NULL) {
5186 /*
5187 * we must re-sync since data is probably being
5188 * added
5189 */
5190 SCTP_INP_READ_LOCK(inp);
5191 if ((control->length > 0) && (control->data == NULL)) {
5192 /*
5193 * big trouble.. we have the lock and its
5194 * corrupt?
5195 */
5196 panic("Impossible data==NULL length !=0");
5197 }
5198 SCTP_INP_READ_UNLOCK(inp);
5199 /* We will fall around to get more data */
5200 }
5201 goto get_more_data;
5202 } else {
5203 /* copy out the mbuf chain */
5204get_more_data2:
5205 /*
5206 * Do we have a uio, I doubt it if so we grab the size from
5207 * it, if not you get it all
5208 */
5209 if (uio)
5210 cp_len = uio->uio_resid;
5211 else
5212 cp_len = control->length;
5213
5214 if ((uint32_t) cp_len >= control->length) {
5215 /* easy way */
5216 if ((control->end_added == 0) ||
5217 (TAILQ_NEXT(control, next) == NULL)) {
5218 /* Need to get rlock */
5219 if (hold_rlock == 0) {
5220 SCTP_INP_READ_LOCK(inp);
5221 hold_rlock = 1;
5222 }
5223 }
5224 if (control->end_added) {
5225 out_flags |= MSG_EOR;
5226 }
5227 if (control->spec_flags & M_NOTIFICATION) {
5228 out_flags |= MSG_NOTIFICATION;
5229 }
5230 if (uio)
5231 uio->uio_resid -= control->length;
5232 *mp = control->data;
5233 m = control->data;
5234 while (m) {
5235#ifdef SCTP_SB_LOGGING
5236 sctp_sblog(&so->so_rcv,
5237 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5238#endif
5239 sctp_sbfree(control, stcb, &so->so_rcv, m);
5240 freed_so_far += SCTP_BUF_LEN(m);
5241#ifdef SCTP_SB_LOGGING
5242 sctp_sblog(&so->so_rcv,
5243 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5244#endif
5245 m = SCTP_BUF_NEXT(m);
5246 }
5247 control->data = control->tail_mbuf = NULL;
5248 control->length = 0;
5249 if (out_flags & MSG_EOR) {
5250 /* Done with this control */
5251 goto done_with_control;
5252 }
5253 /* still more to do with this conntrol */
5254 /* do we really support msg_waitall here? */
5255 if ((block_allowed == 0) ||
5256 ((in_flags & MSG_WAITALL) == 0)) {
5257 goto release;
5258 }
5259 wait_some_more2:
5260 if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
5261 goto release;
5262 if (hold_rlock == 1) {
5263 SCTP_INP_READ_UNLOCK(inp);
5264 hold_rlock = 0;
5265 }
5266 if (hold_sblock == 0) {
5267 SOCKBUF_LOCK(&so->so_rcv);
5268 hold_sblock = 1;
5269 }
5270 if (so->so_rcv.sb_cc <= control->held_length) {
5271 error = sbwait(&so->so_rcv);
5272 if (error) {
5273 goto release;
5274 }
5275 }
5276 if (hold_sblock) {
5277 SOCKBUF_UNLOCK(&so->so_rcv);
5278 hold_sblock = 0;
5279 }
5280 if (control->length == 0) {
5281 /* still nothing here */
5282 if (control->end_added == 1) {
5283 /*
5284 * he aborted, or is done i.e.
5285 * shutdown
5286 */
5287 out_flags |= MSG_EOR;
5288 if (control->pdapi_aborted)
5289 out_flags |= MSG_TRUNC;
5290 goto done_with_control;
5291 }
5292 if (so->so_rcv.sb_cc > held_length) {
5293 control->held_length = so->so_rcv.sb_cc;
5294 /*
5295 * We don't use held_length while
5296 * getting a message
5297 */
5298 held_length = 0;
5299 }
5300 goto wait_some_more2;
5301 }
5302 goto get_more_data2;
5303 } else {
5304 /* hard way mbuf by mbuf */
5305 m = control->data;
5306 if (control->end_added == 0) {
5307 /* need the rlock */
5308 if (hold_rlock == 0) {
5309 SCTP_INP_READ_LOCK(inp);
5310 hold_rlock = 1;
5311 }
5312 }
5313 if (control->spec_flags & M_NOTIFICATION) {
5314 out_flags |= MSG_NOTIFICATION;
5315 }
5316 while ((m) && (cp_len > 0)) {
5317 if (cp_len >= SCTP_BUF_LEN(m)) {
5318 *mp = m;
5319 atomic_subtract_int(&control->length, SCTP_BUF_LEN(m));
5320 if (uio)
5321 uio->uio_resid -= SCTP_BUF_LEN(m);
5322 cp_len -= SCTP_BUF_LEN(m);
5323 control->data = SCTP_BUF_NEXT(m);
5324 SCTP_BUF_NEXT(m) = NULL;
5325#ifdef SCTP_SB_LOGGING
5326 sctp_sblog(&so->so_rcv,
5327 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5328#endif
5329 sctp_sbfree(control, stcb, &so->so_rcv, m);
5330 freed_so_far += SCTP_BUF_LEN(m);
5331#ifdef SCTP_SB_LOGGING
5332 sctp_sblog(&so->so_rcv,
5333 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5334#endif
5335 mp = &SCTP_BUF_NEXT(m);
5336 m = control->data;
5337 } else {
5338 /*
5339 * got all he wants and its part of
5340 * this mbuf only.
5341 */
5342 if (uio)
5343 uio->uio_resid -= SCTP_BUF_LEN(m);
5344 cp_len -= SCTP_BUF_LEN(m);
5345 if (hold_rlock) {
5346 SCTP_INP_READ_UNLOCK(inp);
5347 hold_rlock = 0;
5348 }
5349 if (hold_sblock) {
5350 SOCKBUF_UNLOCK(&so->so_rcv);
5351 hold_sblock = 0;
5352 }
5353 *mp = SCTP_M_COPYM(m, 0, cp_len,
5354 M_TRYWAIT
5355 );
5356#ifdef SCTP_LOCK_LOGGING
5357 sctp_log_lock(inp, stcb, SCTP_LOG_LOCK_SOCKBUF_R);
5358#endif
5359 if (hold_sblock == 0) {
5360 SOCKBUF_LOCK(&so->so_rcv);
5361 hold_sblock = 1;
5362 }
5363 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5364 goto release;
5365
5366 if (stcb &&
5367 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5368 no_rcv_needed = 1;
5369 }
5370 SCTP_BUF_RESV_UF(m, cp_len);
5371 SCTP_BUF_LEN(m) -= cp_len;
5372#ifdef SCTP_SB_LOGGING
5373 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5374#endif
5375 freed_so_far += cp_len;
5376 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5377 if (stcb) {
5378 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5379 if ((freed_so_far >= rwnd_req) &&
5380 (control->do_not_ref_stcb == 0) &&
5381 (no_rcv_needed == 0))
5382 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5383 }
5384#ifdef SCTP_SB_LOGGING
5385 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5386 SCTP_LOG_SBRESULT, 0);
5387#endif
5388 goto release;
5389 }
5390 }
5391 }
5392 }
5393release:
5394 if (hold_rlock == 1) {
5395 SCTP_INP_READ_UNLOCK(inp);
5396 hold_rlock = 0;
5397 }
5398 if (hold_sblock == 0) {
5399 SOCKBUF_LOCK(&so->so_rcv);
5400 hold_sblock = 1;
5401 }
5402 sbunlock(&so->so_rcv);
5403
5404release_unlocked:
5405 if (hold_sblock) {
5406 SOCKBUF_UNLOCK(&so->so_rcv);
5407 hold_sblock = 0;
5408 }
5409 if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5410 if ((freed_so_far >= rwnd_req) &&
5411 (control && (control->do_not_ref_stcb == 0)) &&
5412 (no_rcv_needed == 0))
5413 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5414 }
5415 if (msg_flags)
5416 *msg_flags |= out_flags;
5417out:
5418 if (hold_rlock == 1) {
5419 SCTP_INP_READ_UNLOCK(inp);
5420 hold_rlock = 0;
5421 }
5422 if (hold_sblock) {
5423 SOCKBUF_UNLOCK(&so->so_rcv);
5424 hold_sblock = 0;
5425 }
5426 if (freecnt_applied) {
5427 /*
5428 * The lock on the socket buffer protects us so the free
5429 * code will stop. But since we used the socketbuf lock and
5430 * the sender uses the tcb_lock to increment, we need to use
5431 * the atomic add to the refcnt.
5432 */
5433 if (stcb == NULL) {
5434 panic("stcb for refcnt has gone NULL?");
5435 }
5436 atomic_add_int(&stcb->asoc.refcnt, -1);
5437 freecnt_applied = 0;
5438 /* Save the value back for next time */
5439 stcb->freed_by_sorcv_sincelast = freed_so_far;
5440 }
5441#ifdef SCTP_RECV_RWND_LOGGING
5442 if (stcb) {
5443 sctp_misc_ints(SCTP_SORECV_DONE,
5444 freed_so_far,
5445 ((uio) ? (slen - uio->uio_resid) : slen),
5446 stcb->asoc.my_rwnd,
5447 so->so_rcv.sb_cc);
5448 } else {
5449 sctp_misc_ints(SCTP_SORECV_DONE,
5450 freed_so_far,
5451 ((uio) ? (slen - uio->uio_resid) : slen),
5452 0,
5453 so->so_rcv.sb_cc);
5454 }
5455#endif
5456 if (wakeup_read_socket) {
5457 sctp_sorwakeup(inp, so);
5458 }
5459 return (error);
5460}
5461
5462
5463#ifdef SCTP_MBUF_LOGGING
5464struct mbuf *
5465sctp_m_free(struct mbuf *m)
5466{
5467 if (SCTP_BUF_IS_EXTENDED(m)) {
5468 sctp_log_mb(m, SCTP_MBUF_IFREE);
5469 }
5470 return (m_free(m));
5471}
5472
5473void
5474sctp_m_freem(struct mbuf *mb)
5475{
5476 while (mb != NULL)
5477 mb = sctp_m_free(mb);
5478}
5479
5480#endif
5481
5482int
5483sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
5484{
5485 /*
5486 * Given a local address. For all associations that holds the
5487 * address, request a peer-set-primary.
5488 */
5489 struct sctp_ifa *ifa;
5490 struct sctp_laddr *wi;
5256
5491
5492 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
5493 if (ifa == NULL) {
5494 return (EADDRNOTAVAIL);
5495 }
5496 /*
5497 * Now that we have the ifa we must awaken the iterator with this
5498 * message.
5499 */
5500 wi = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr);
5501 if (wi == NULL) {
5502 return (ENOMEM);
5503 }
5504 /* Now incr the count and int wi structure */
5505 SCTP_INCR_LADDR_COUNT();
5506 bzero(wi, sizeof(*wi));
5507 wi->ifa = ifa;
5508 wi->action = SCTP_SET_PRIM_ADDR;
5509 atomic_add_int(&ifa->refcount, 1);
5510
5511 /* Now add it to the work queue */
5512 SCTP_IPI_ITERATOR_WQ_LOCK();
5513 /*
5514 * Should this really be a tailq? As it is we will process the
5515 * newest first :-0
5516 */
5517 LIST_INSERT_HEAD(&sctppcbinfo.addr_wq, wi, sctp_nxt_addr);
5518 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
5519 (struct sctp_inpcb *)NULL,
5520 (struct sctp_tcb *)NULL,
5521 (struct sctp_nets *)NULL);
5522 SCTP_IPI_ITERATOR_WQ_UNLOCK();
5523 return (0);
5524}
5525
5526
5527
5528
5257int
5258sctp_soreceive(so, psa, uio, mp0, controlp, flagsp)
5259 struct socket *so;
5260 struct sockaddr **psa;
5261 struct uio *uio;
5262 struct mbuf **mp0;
5263 struct mbuf **controlp;
5264 int *flagsp;
5265{
5266 int error, fromlen;
5267 uint8_t sockbuf[256];
5268 struct sockaddr *from;
5269 struct sctp_extrcvinfo sinfo;
5270 int filling_sinfo = 1;
5271 struct sctp_inpcb *inp;
5272
5273 inp = (struct sctp_inpcb *)so->so_pcb;
5274 /* pickup the assoc we are reading from */
5275 if (inp == NULL) {
5276 return (EINVAL);
5277 }
5278 if ((sctp_is_feature_off(inp,
5279 SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
5280 (controlp == NULL)) {
5281 /* user does not want the sndrcv ctl */
5282 filling_sinfo = 0;
5283 }
5284 if (psa) {
5285 from = (struct sockaddr *)sockbuf;
5286 fromlen = sizeof(sockbuf);
5287 from->sa_len = 0;
5288 } else {
5289 from = NULL;
5290 fromlen = 0;
5291 }
5292
5293 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
5294 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
5295 if ((controlp) && (filling_sinfo)) {
5296 /* copy back the sinfo in a CMSG format */
5297 if (filling_sinfo)
5298 *controlp = sctp_build_ctl_nchunk(inp,
5299 (struct sctp_sndrcvinfo *)&sinfo);
5300 else
5301 *controlp = NULL;
5302 }
5303 if (psa) {
5304 /* copy back the address info */
5305 if (from && from->sa_len) {
5306 *psa = sodupsockaddr(from, M_NOWAIT);
5307 } else {
5308 *psa = NULL;
5309 }
5310 }
5311 return (error);
5312}
5529int
5530sctp_soreceive(so, psa, uio, mp0, controlp, flagsp)
5531 struct socket *so;
5532 struct sockaddr **psa;
5533 struct uio *uio;
5534 struct mbuf **mp0;
5535 struct mbuf **controlp;
5536 int *flagsp;
5537{
5538 int error, fromlen;
5539 uint8_t sockbuf[256];
5540 struct sockaddr *from;
5541 struct sctp_extrcvinfo sinfo;
5542 int filling_sinfo = 1;
5543 struct sctp_inpcb *inp;
5544
5545 inp = (struct sctp_inpcb *)so->so_pcb;
5546 /* pickup the assoc we are reading from */
5547 if (inp == NULL) {
5548 return (EINVAL);
5549 }
5550 if ((sctp_is_feature_off(inp,
5551 SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
5552 (controlp == NULL)) {
5553 /* user does not want the sndrcv ctl */
5554 filling_sinfo = 0;
5555 }
5556 if (psa) {
5557 from = (struct sockaddr *)sockbuf;
5558 fromlen = sizeof(sockbuf);
5559 from->sa_len = 0;
5560 } else {
5561 from = NULL;
5562 fromlen = 0;
5563 }
5564
5565 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
5566 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
5567 if ((controlp) && (filling_sinfo)) {
5568 /* copy back the sinfo in a CMSG format */
5569 if (filling_sinfo)
5570 *controlp = sctp_build_ctl_nchunk(inp,
5571 (struct sctp_sndrcvinfo *)&sinfo);
5572 else
5573 *controlp = NULL;
5574 }
5575 if (psa) {
5576 /* copy back the address info */
5577 if (from && from->sa_len) {
5578 *psa = sodupsockaddr(from, M_NOWAIT);
5579 } else {
5580 *psa = NULL;
5581 }
5582 }
5583 return (error);
5584}