Deleted Added
full compact
1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $ */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.c 179783 2008-06-14 07:58:05Z rrs $");
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.c 180387 2008-07-09 16:45:30Z rrs $");
35
36#define _IP_VHL
37#include <netinet/sctp_os.h>
38#include <netinet/sctp_pcb.h>
39#ifdef INET6
40#include <netinet6/sctp6_var.h>
40#endif
41#include <netinet/sctp_var.h>
42#include <netinet/sctp_sysctl.h>
43#include <netinet/sctp_timer.h>
44#include <netinet/sctputil.h>
45#include <netinet/sctp_output.h>
46#include <netinet/sctp_header.h>
47#include <netinet/sctp_indata.h>
48#include <netinet/sctp_asconf.h>
49#include <netinet/sctp_input.h>
50#include <netinet/sctp.h>
51#include <netinet/sctp_uio.h>
52
53
54
55void
56sctp_early_fr_timer(struct sctp_inpcb *inp,
57 struct sctp_tcb *stcb,
58 struct sctp_nets *net)
59{
60 struct sctp_tmit_chunk *chk, *tp2;
61 struct timeval now, min_wait, tv;
62 unsigned int cur_rtt, cnt = 0, cnt_resend = 0;
63
64 /* an early FR is occuring. */
65 (void)SCTP_GETTIME_TIMEVAL(&now);
66 /* get cur rto in micro-seconds */
67 if (net->lastsa == 0) {
68 /* Hmm no rtt estimate yet? */
69 cur_rtt = stcb->asoc.initial_rto >> 2;
70 } else {
71
72 cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
73 }
74 if (cur_rtt < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
75 cur_rtt = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
76 }
77 cur_rtt *= 1000;
78 tv.tv_sec = cur_rtt / 1000000;
79 tv.tv_usec = cur_rtt % 1000000;
80 min_wait = now;
81 timevalsub(&min_wait, &tv);
82 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
83 /*
84 * if we hit here, we don't have enough seconds on the clock
85 * to account for the RTO. We just let the lower seconds be
86 * the bounds and don't worry about it. This may mean we
87 * will mark a lot more than we should.
88 */
89 min_wait.tv_sec = min_wait.tv_usec = 0;
90 }
91 chk = TAILQ_LAST(&stcb->asoc.sent_queue, sctpchunk_listhead);
92 for (; chk != NULL; chk = tp2) {
93 tp2 = TAILQ_PREV(chk, sctpchunk_listhead, sctp_next);
94 if (chk->whoTo != net) {
95 continue;
96 }
97 if (chk->sent == SCTP_DATAGRAM_RESEND)
98 cnt_resend++;
99 else if ((chk->sent > SCTP_DATAGRAM_UNSENT) &&
100 (chk->sent < SCTP_DATAGRAM_RESEND)) {
101 /* pending, may need retran */
102 if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) {
103 /*
104 * we have reached a chunk that was sent
105 * some seconds past our min.. forget it we
106 * will find no more to send.
107 */
108 continue;
109 } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) {
110 /*
111 * we must look at the micro seconds to
112 * know.
113 */
114 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
115 /*
116 * ok it was sent after our boundary
117 * time.
118 */
119 continue;
120 }
121 }
122 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_EARLYFR_LOGGING_ENABLE) {
123 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
124 4, SCTP_FR_MARKED_EARLY);
125 }
126 SCTP_STAT_INCR(sctps_earlyfrmrkretrans);
127 chk->sent = SCTP_DATAGRAM_RESEND;
128 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
129 /* double book size since we are doing an early FR */
130 chk->book_size_scale++;
131 cnt += chk->send_size;
132 if ((cnt + net->flight_size) > net->cwnd) {
133 /* Mark all we could possibly resend */
134 break;
135 }
136 }
137 }
138 if (cnt) {
139 /*
140 * JRS - Use the congestion control given in the congestion
141 * control module
142 */
143 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer(inp, stcb, net);
144 } else if (cnt_resend) {
145 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED);
146 }
147 /* Restart it? */
148 if (net->flight_size < net->cwnd) {
149 SCTP_STAT_INCR(sctps_earlyfrstrtmr);
150 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
151 }
152}
153
154void
155sctp_audit_retranmission_queue(struct sctp_association *asoc)
156{
157 struct sctp_tmit_chunk *chk;
158
159 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n",
160 asoc->sent_queue_retran_cnt,
161 asoc->sent_queue_cnt);
162 asoc->sent_queue_retran_cnt = 0;
163 asoc->sent_queue_cnt = 0;
164 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
165 if (chk->sent == SCTP_DATAGRAM_RESEND) {
166 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
167 }
168 asoc->sent_queue_cnt++;
169 }
170 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
171 if (chk->sent == SCTP_DATAGRAM_RESEND) {
172 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
173 }
174 }
175 TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) {
176 if (chk->sent == SCTP_DATAGRAM_RESEND) {
177 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
178 }
179 }
180 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n",
181 asoc->sent_queue_retran_cnt,
182 asoc->sent_queue_cnt);
183}
184
185int
186sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
187 struct sctp_nets *net, uint16_t threshold)
188{
189 if (net) {
190 net->error_count++;
191 SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n",
192 net, net->error_count,
193 net->failure_threshold);
194 if (net->error_count > net->failure_threshold) {
195 /* We had a threshold failure */
196 if (net->dest_state & SCTP_ADDR_REACHABLE) {
197 net->dest_state &= ~SCTP_ADDR_REACHABLE;
198 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
199 net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
200 if (net == stcb->asoc.primary_destination) {
201 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
202 }
203 /*
204 * JRS 5/14/07 - If a destination is
205 * unreachable, the PF bit is turned off.
206 * This allows an unambiguous use of the PF
207 * bit for destinations that are reachable
208 * but potentially failed. If the
209 * destination is set to the unreachable
210 * state, also set the destination to the PF
211 * state.
212 */
213 /*
214 * Add debug message here if destination is
215 * not in PF state.
216 */
217 /* Stop any running T3 timers here? */
218 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
219 net->dest_state &= ~SCTP_ADDR_PF;
220 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n",
221 net);
222 }
223 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
224 stcb,
225 SCTP_FAILED_THRESHOLD,
226 (void *)net, SCTP_SO_NOT_LOCKED);
227 }
228 }
229 /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE
230 *********ROUTING CODE
231 */
232 /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE
233 *********ROUTING CODE
234 */
235 }
236 if (stcb == NULL)
237 return (0);
238
239 if (net) {
240 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
241 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
242 sctp_misc_ints(SCTP_THRESHOLD_INCR,
243 stcb->asoc.overall_error_count,
244 (stcb->asoc.overall_error_count + 1),
245 SCTP_FROM_SCTP_TIMER,
246 __LINE__);
247 }
248 stcb->asoc.overall_error_count++;
249 }
250 } else {
251 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
252 sctp_misc_ints(SCTP_THRESHOLD_INCR,
253 stcb->asoc.overall_error_count,
254 (stcb->asoc.overall_error_count + 1),
255 SCTP_FROM_SCTP_TIMER,
256 __LINE__);
257 }
258 stcb->asoc.overall_error_count++;
259 }
260 SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n",
261 &stcb->asoc, stcb->asoc.overall_error_count,
262 (uint32_t) threshold,
263 ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state));
264 /*
265 * We specifically do not do >= to give the assoc one more change
266 * before we fail it.
267 */
268 if (stcb->asoc.overall_error_count > threshold) {
269 /* Abort notification sends a ULP notify */
270 struct mbuf *oper;
271
272 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
273 0, M_DONTWAIT, 1, MT_DATA);
274 if (oper) {
275 struct sctp_paramhdr *ph;
276 uint32_t *ippp;
277
278 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
279 sizeof(uint32_t);
280 ph = mtod(oper, struct sctp_paramhdr *);
281 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
282 ph->param_length = htons(SCTP_BUF_LEN(oper));
283 ippp = (uint32_t *) (ph + 1);
284 *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_1);
285 }
286 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_1;
287 printf("Aborting association threshold:%d overall error count:%d\n",
288 threshold,
289 stcb->asoc.overall_error_count);
290 sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper, SCTP_SO_NOT_LOCKED);
291 return (1);
292 }
293 return (0);
294}
295
296struct sctp_nets *
297sctp_find_alternate_net(struct sctp_tcb *stcb,
298 struct sctp_nets *net,
299 int mode)
300{
301 /* Find and return an alternate network if possible */
302 struct sctp_nets *alt, *mnet, *min_errors_net = NULL, *max_cwnd_net = NULL;
303 int once;
304
305 /* JRS 5/14/07 - Initialize min_errors to an impossible value. */
306 int min_errors = -1;
307 uint32_t max_cwnd = 0;
308
309 if (stcb->asoc.numnets == 1) {
310 /* No others but net */
311 return (TAILQ_FIRST(&stcb->asoc.nets));
312 }
313 /*
314 * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate
315 * net algorithm. This algorithm chooses the active destination (not
316 * in PF state) with the largest cwnd value. If all destinations are
317 * in PF state, unreachable, or unconfirmed, choose the desination
318 * that is in PF state with the lowest error count. In case of a
319 * tie, choose the destination that was most recently active.
320 */
321 if (mode == 2) {
322 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
323 /*
324 * JRS 5/14/07 - If the destination is unreachable
325 * or unconfirmed, skip it.
326 */
327 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
328 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) {
329 continue;
330 }
331 /*
332 * JRS 5/14/07 - If the destination is reachable
333 * but in PF state, compare the error count of the
334 * destination to the minimum error count seen thus
335 * far. Store the destination with the lower error
336 * count. If the error counts are equal, store the
337 * destination that was most recently active.
338 */
339 if (mnet->dest_state & SCTP_ADDR_PF) {
340 /*
341 * JRS 5/14/07 - If the destination under
342 * consideration is the current destination,
343 * work as if the error count is one higher.
344 * The actual error count will not be
345 * incremented until later in the t3
346 * handler.
347 */
348 if (mnet == net) {
349 if (min_errors == -1) {
350 min_errors = mnet->error_count + 1;
351 min_errors_net = mnet;
352 } else if (mnet->error_count + 1 < min_errors) {
353 min_errors = mnet->error_count + 1;
354 min_errors_net = mnet;
355 } else if (mnet->error_count + 1 == min_errors
356 && mnet->last_active > min_errors_net->last_active) {
357 min_errors_net = mnet;
358 min_errors = mnet->error_count + 1;
359 }
360 continue;
361 } else {
362 if (min_errors == -1) {
363 min_errors = mnet->error_count;
364 min_errors_net = mnet;
365 } else if (mnet->error_count < min_errors) {
366 min_errors = mnet->error_count;
367 min_errors_net = mnet;
368 } else if (mnet->error_count == min_errors
369 && mnet->last_active > min_errors_net->last_active) {
370 min_errors_net = mnet;
371 min_errors = mnet->error_count;
372 }
373 continue;
374 }
375 }
376 /*
377 * JRS 5/14/07 - If the destination is reachable and
378 * not in PF state, compare the cwnd of the
379 * destination to the highest cwnd seen thus far.
380 * Store the destination with the higher cwnd value.
381 * If the cwnd values are equal, randomly choose one
382 * of the two destinations.
383 */
384 if (max_cwnd < mnet->cwnd) {
385 max_cwnd_net = mnet;
386 max_cwnd = mnet->cwnd;
387 } else if (max_cwnd == mnet->cwnd) {
388 uint32_t rndval;
389 uint8_t this_random;
390
391 if (stcb->asoc.hb_random_idx > 3) {
392 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
393 memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values));
394 this_random = stcb->asoc.hb_random_values[0];
395 stcb->asoc.hb_random_idx++;
396 stcb->asoc.hb_ect_randombit = 0;
397 } else {
398 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
399 stcb->asoc.hb_random_idx++;
400 stcb->asoc.hb_ect_randombit = 0;
401 }
402 if (this_random % 2 == 1) {
403 max_cwnd_net = mnet;
405 max_cwnd = mnet->cwnd;
406 //Useless ?
404 max_cwnd = mnet->cwnd; /* Useless? */
405 }
406 }
407 }
408 /*
409 * JRS 5/14/07 - After all destination have been considered
410 * as alternates, check to see if there was some active
411 * destination (not in PF state). If not, check to see if
412 * there was some PF destination with the minimum number of
413 * errors. If not, return the original destination. If
414 * there is a min_errors_net, remove the PF flag from that
415 * destination, set the cwnd to one or two MTUs, and return
416 * the destination as an alt. If there was some active
417 * destination with a highest cwnd, return the destination
418 * as an alt.
419 */
420 if (max_cwnd_net == NULL) {
421 if (min_errors_net == NULL) {
422 return (net);
423 }
424 min_errors_net->dest_state &= ~SCTP_ADDR_PF;
425 min_errors_net->cwnd = min_errors_net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf);
426 if (SCTP_OS_TIMER_PENDING(&min_errors_net->rxt_timer.timer)) {
427 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
428 stcb, min_errors_net,
429 SCTP_FROM_SCTP_TIMER + SCTP_LOC_2);
430 }
431 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to active with %d errors.\n",
432 min_errors_net, min_errors_net->error_count);
433 return (min_errors_net);
434 } else {
435 return (max_cwnd_net);
436 }
437 }
438 /*
439 * JRS 5/14/07 - If mode is set to 1, use the CMT policy for
440 * choosing an alternate net.
441 */
442 else if (mode == 1) {
443 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
444 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
445 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)
446 ) {
447 /*
448 * will skip ones that are not-reachable or
449 * unconfirmed
450 */
451 continue;
452 }
453 if (max_cwnd < mnet->cwnd) {
454 max_cwnd_net = mnet;
455 max_cwnd = mnet->cwnd;
456 } else if (max_cwnd == mnet->cwnd) {
457 uint32_t rndval;
458 uint8_t this_random;
459
460 if (stcb->asoc.hb_random_idx > 3) {
461 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
462 memcpy(stcb->asoc.hb_random_values, &rndval,
463 sizeof(stcb->asoc.hb_random_values));
464 this_random = stcb->asoc.hb_random_values[0];
465 stcb->asoc.hb_random_idx = 0;
466 stcb->asoc.hb_ect_randombit = 0;
467 } else {
468 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
469 stcb->asoc.hb_random_idx++;
470 stcb->asoc.hb_ect_randombit = 0;
471 }
472 if (this_random % 2) {
473 max_cwnd_net = mnet;
474 max_cwnd = mnet->cwnd;
475 }
476 }
477 }
478 if (max_cwnd_net) {
479 return (max_cwnd_net);
480 }
481 }
482 mnet = net;
483 once = 0;
484
485 if (mnet == NULL) {
486 mnet = TAILQ_FIRST(&stcb->asoc.nets);
487 }
488 do {
489 alt = TAILQ_NEXT(mnet, sctp_next);
490 if (alt == NULL) {
491 once++;
492 if (once > 1) {
493 break;
494 }
495 alt = TAILQ_FIRST(&stcb->asoc.nets);
496 }
497 if (alt->ro.ro_rt == NULL) {
498 if (alt->ro._s_addr) {
499 sctp_free_ifa(alt->ro._s_addr);
500 alt->ro._s_addr = NULL;
501 }
502 alt->src_addr_selected = 0;
503 }
504 if (
505 ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
506 (alt->ro.ro_rt != NULL) &&
507 /* sa_ignore NO_NULL_CHK */
508 (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))
509 ) {
510 /* Found a reachable address */
511 break;
512 }
513 mnet = alt;
514 } while (alt != NULL);
515
516 if (alt == NULL) {
517 /* Case where NO insv network exists (dormant state) */
518 /* we rotate destinations */
519 once = 0;
520 mnet = net;
521 do {
522 alt = TAILQ_NEXT(mnet, sctp_next);
523 if (alt == NULL) {
524 once++;
525 if (once > 1) {
526 break;
527 }
528 alt = TAILQ_FIRST(&stcb->asoc.nets);
529 }
530 /* sa_ignore NO_NULL_CHK */
531 if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
532 (alt != net)) {
533 /* Found an alternate address */
534 break;
535 }
536 mnet = alt;
537 } while (alt != NULL);
538 }
539 if (alt == NULL) {
540 return (net);
541 }
542 return (alt);
543}
544
545
546
547static void
548sctp_backoff_on_timeout(struct sctp_tcb *stcb,
549 struct sctp_nets *net,
550 int win_probe,
551 int num_marked)
552{
553 if (net->RTO == 0) {
554 net->RTO = stcb->asoc.minrto;
555 }
556 net->RTO <<= 1;
557 if (net->RTO > stcb->asoc.maxrto) {
558 net->RTO = stcb->asoc.maxrto;
559 }
560 if ((win_probe == 0) && num_marked) {
561 /* We don't apply penalty to window probe scenarios */
562 /* JRS - Use the congestion control given in the CC module */
563 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net);
564 }
565}
566
567static int
568sctp_mark_all_for_resend(struct sctp_tcb *stcb,
569 struct sctp_nets *net,
570 struct sctp_nets *alt,
571 int window_probe,
572 int *num_marked)
573{
574
575 /*
576 * Mark all chunks (well not all) that were sent to *net for
577 * retransmission. Move them to alt for there destination as well...
578 * We only mark chunks that have been outstanding long enough to
579 * have received feed-back.
580 */
581 struct sctp_tmit_chunk *chk, *tp2, *could_be_sent = NULL;
582 struct sctp_nets *lnets;
583 struct timeval now, min_wait, tv;
584 int cur_rtt;
585 int audit_tf, num_mk, fir;
586 unsigned int cnt_mk;
587 uint32_t orig_flight, orig_tf;
588 uint32_t tsnlast, tsnfirst;
589
590
591 /* none in flight now */
592 audit_tf = 0;
593 fir = 0;
594 /*
595 * figure out how long a data chunk must be pending before we can
596 * mark it ..
597 */
598 (void)SCTP_GETTIME_TIMEVAL(&now);
599 /* get cur rto in micro-seconds */
600 cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1);
601 cur_rtt *= 1000;
602 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
603 sctp_log_fr(cur_rtt,
604 stcb->asoc.peers_rwnd,
605 window_probe,
606 SCTP_FR_T3_MARK_TIME);
607 sctp_log_fr(net->flight_size,
608 SCTP_OS_TIMER_PENDING(&net->fr_timer.timer),
609 SCTP_OS_TIMER_ACTIVE(&net->fr_timer.timer),
610 SCTP_FR_CWND_REPORT);
611 sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT);
612 }
613 tv.tv_sec = cur_rtt / 1000000;
614 tv.tv_usec = cur_rtt % 1000000;
615 min_wait = now;
616 timevalsub(&min_wait, &tv);
617 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
618 /*
619 * if we hit here, we don't have enough seconds on the clock
620 * to account for the RTO. We just let the lower seconds be
621 * the bounds and don't worry about it. This may mean we
622 * will mark a lot more than we should.
623 */
624 min_wait.tv_sec = min_wait.tv_usec = 0;
625 }
626 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
627 sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
628 sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
629 }
630 /*
631 * Our rwnd will be incorrect here since we are not adding back the
632 * cnt * mbuf but we will fix that down below.
633 */
634 orig_flight = net->flight_size;
635 orig_tf = stcb->asoc.total_flight;
636
637 net->fast_retran_ip = 0;
638 /* Now on to each chunk */
639 num_mk = cnt_mk = 0;
640 tsnfirst = tsnlast = 0;
641 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
642 for (; chk != NULL; chk = tp2) {
643 tp2 = TAILQ_NEXT(chk, sctp_next);
644 if ((compare_with_wrap(stcb->asoc.last_acked_seq,
645 chk->rec.data.TSN_seq,
646 MAX_TSN)) ||
647 (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
648 /* Strange case our list got out of order? */
649 SCTP_PRINTF("Our list is out of order?\n");
650 panic("Out of order list");
651 }
652 if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
653 /*
654 * found one to mark: If it is less than
655 * DATAGRAM_ACKED it MUST not be a skipped or marked
656 * TSN but instead one that is either already set
657 * for retransmission OR one that needs
658 * retransmission.
659 */
660
661 /* validate its been outstanding long enough */
662 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
663 sctp_log_fr(chk->rec.data.TSN_seq,
664 chk->sent_rcv_time.tv_sec,
665 chk->sent_rcv_time.tv_usec,
666 SCTP_FR_T3_MARK_TIME);
667 }
668 if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) {
669 /*
670 * we have reached a chunk that was sent
671 * some seconds past our min.. forget it we
672 * will find no more to send.
673 */
674 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
675 sctp_log_fr(0,
676 chk->sent_rcv_time.tv_sec,
677 chk->sent_rcv_time.tv_usec,
678 SCTP_FR_T3_STOPPED);
679 }
680 continue;
681 } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) &&
682 (window_probe == 0)) {
683 /*
684 * we must look at the micro seconds to
685 * know.
686 */
687 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
688 /*
689 * ok it was sent after our boundary
690 * time.
691 */
692 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
693 sctp_log_fr(0,
694 chk->sent_rcv_time.tv_sec,
695 chk->sent_rcv_time.tv_usec,
696 SCTP_FR_T3_STOPPED);
697 }
698 continue;
699 }
700 }
701 if (PR_SCTP_TTL_ENABLED(chk->flags)) {
702 /* Is it expired? */
703 if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) ||
704 ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) &&
705 (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) {
706 /* Yes so drop it */
707 if (chk->data) {
708 (void)sctp_release_pr_sctp_chunk(stcb,
709 chk,
710 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
711 &stcb->asoc.sent_queue, SCTP_SO_NOT_LOCKED);
712 }
713 }
714 continue;
715 }
716 if (PR_SCTP_RTX_ENABLED(chk->flags)) {
717 /* Has it been retransmitted tv_sec times? */
718 if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) {
719 if (chk->data) {
720 (void)sctp_release_pr_sctp_chunk(stcb,
721 chk,
722 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
723 &stcb->asoc.sent_queue, SCTP_SO_NOT_LOCKED);
724 }
725 }
726 continue;
727 }
728 if (chk->sent < SCTP_DATAGRAM_RESEND) {
729 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
730 num_mk++;
731 if (fir == 0) {
732 fir = 1;
733 tsnfirst = chk->rec.data.TSN_seq;
734 }
735 tsnlast = chk->rec.data.TSN_seq;
736 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
737 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
738 0, SCTP_FR_T3_MARKED);
739 }
740 if (chk->rec.data.chunk_was_revoked) {
741 /* deflate the cwnd */
742 chk->whoTo->cwnd -= chk->book_size;
743 chk->rec.data.chunk_was_revoked = 0;
744 }
745 net->marked_retrans++;
746 stcb->asoc.marked_retrans++;
747 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
748 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO,
749 chk->whoTo->flight_size,
750 chk->book_size,
751 (uintptr_t) chk->whoTo,
752 chk->rec.data.TSN_seq);
753 }
754 sctp_flight_size_decrease(chk);
755 sctp_total_flight_decrease(stcb, chk);
756 stcb->asoc.peers_rwnd += chk->send_size;
757 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
758 }
759 chk->sent = SCTP_DATAGRAM_RESEND;
760 SCTP_STAT_INCR(sctps_markedretrans);
761
762 /* reset the TSN for striking and other FR stuff */
763 chk->rec.data.doing_fast_retransmit = 0;
764 /* Clear any time so NO RTT is being done */
765 chk->do_rtt = 0;
766 if (alt != net) {
767 sctp_free_remote_addr(chk->whoTo);
768 chk->no_fr_allowed = 1;
769 chk->whoTo = alt;
770 atomic_add_int(&alt->ref_count, 1);
771 } else {
772 chk->no_fr_allowed = 0;
773 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
774 chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
775 } else {
776 chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
777 }
778 }
779 /*
780 * CMT: Do not allow FRs on retransmitted TSNs.
781 */
782 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 1) {
783 chk->no_fr_allowed = 1;
784 }
785 } else if (chk->sent == SCTP_DATAGRAM_ACKED) {
786 /* remember highest acked one */
787 could_be_sent = chk;
788 }
789 if (chk->sent == SCTP_DATAGRAM_RESEND) {
790 cnt_mk++;
791 }
792 }
793 if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) {
794 /* we did not subtract the same things? */
795 audit_tf = 1;
796 }
797 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
798 sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
799 }
800#ifdef SCTP_DEBUG
801 if (num_mk) {
802 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n",
803 tsnlast);
804 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%ld\n",
805 num_mk, (u_long)stcb->asoc.peers_rwnd);
806 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n",
807 tsnlast);
808 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%d\n",
809 num_mk,
810 (int)stcb->asoc.peers_rwnd);
811 }
812#endif
813 *num_marked = num_mk;
814 if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) {
815 /* fix it so we retransmit the highest acked anyway */
816 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
817 cnt_mk++;
818 could_be_sent->sent = SCTP_DATAGRAM_RESEND;
819 }
820 if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
821#ifdef INVARIANTS
822 SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n",
823 cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk);
824#endif
825#ifndef SCTP_AUDITING_ENABLED
826 stcb->asoc.sent_queue_retran_cnt = cnt_mk;
827#endif
828 }
829 /* Now check for a ECN Echo that may be stranded */
830 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
831 if ((chk->whoTo == net) &&
832 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
833 sctp_free_remote_addr(chk->whoTo);
834 chk->whoTo = alt;
835 if (chk->sent != SCTP_DATAGRAM_RESEND) {
836 chk->sent = SCTP_DATAGRAM_RESEND;
837 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
838 }
839 atomic_add_int(&alt->ref_count, 1);
840 }
841 }
842 if (audit_tf) {
843 SCTPDBG(SCTP_DEBUG_TIMER4,
844 "Audit total flight due to negative value net:%p\n",
845 net);
846 stcb->asoc.total_flight = 0;
847 stcb->asoc.total_flight_count = 0;
848 /* Clear all networks flight size */
849 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
850 lnets->flight_size = 0;
851 SCTPDBG(SCTP_DEBUG_TIMER4,
852 "Net:%p c-f cwnd:%d ssthresh:%d\n",
853 lnets, lnets->cwnd, lnets->ssthresh);
854 }
855 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
856 if (chk->sent < SCTP_DATAGRAM_RESEND) {
857 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
858 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
859 chk->whoTo->flight_size,
860 chk->book_size,
861 (uintptr_t) chk->whoTo,
862 chk->rec.data.TSN_seq);
863 }
864 sctp_flight_size_increase(chk);
865 sctp_total_flight_increase(stcb, chk);
866 }
867 }
868 }
869 /*
870 * Setup the ecn nonce re-sync point. We do this since
871 * retranmissions are NOT setup for ECN. This means that do to
872 * Karn's rule, we don't know the total of the peers ecn bits.
873 */
874 chk = TAILQ_FIRST(&stcb->asoc.send_queue);
875 if (chk == NULL) {
876 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
877 } else {
878 stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq;
879 }
880 stcb->asoc.nonce_wait_for_ecne = 0;
881 stcb->asoc.nonce_sum_check = 0;
882 /* We return 1 if we only have a window probe outstanding */
883 return (0);
884}
885
886static void
887sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb,
888 struct sctp_nets *net,
889 struct sctp_nets *alt)
890{
891 struct sctp_association *asoc;
892 struct sctp_stream_out *outs;
893 struct sctp_tmit_chunk *chk;
894 struct sctp_stream_queue_pending *sp;
895
896 if (net == alt)
897 /* nothing to do */
898 return;
899
900 asoc = &stcb->asoc;
901
902 /*
903 * now through all the streams checking for chunks sent to our bad
904 * network.
905 */
906 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
907 /* now clean up any chunks here */
908 TAILQ_FOREACH(sp, &outs->outqueue, next) {
909 if (sp->net == net) {
910 sctp_free_remote_addr(sp->net);
911 sp->net = alt;
912 atomic_add_int(&alt->ref_count, 1);
913 }
914 }
915 }
916 /* Now check the pending queue */
917 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
918 if (chk->whoTo == net) {
919 sctp_free_remote_addr(chk->whoTo);
920 chk->whoTo = alt;
921 atomic_add_int(&alt->ref_count, 1);
922 }
923 }
924
925}
926
927int
928sctp_t3rxt_timer(struct sctp_inpcb *inp,
929 struct sctp_tcb *stcb,
930 struct sctp_nets *net)
931{
932 struct sctp_nets *alt;
933 int win_probe, num_mk;
934
935 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
936 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
937 }
938 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
939 struct sctp_nets *lnet;
940
941 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
942 if (net == lnet) {
943 sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3);
944 } else {
945 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3);
946 }
947 }
948 }
949 /* Find an alternate and mark those for retransmission */
950 if ((stcb->asoc.peers_rwnd == 0) &&
951 (stcb->asoc.total_flight < net->mtu)) {
952 SCTP_STAT_INCR(sctps_timowindowprobe);
953 win_probe = 1;
954 } else {
955 win_probe = 0;
956 }
957
958 /*
959 * JRS 5/14/07 - If CMT PF is on and the destination if not already
960 * in PF state, set the destination to PF state and store the
961 * current time as the time that the destination was last active. In
962 * addition, find an alternate destination with PF-based
963 * find_alt_net().
964 */
965 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
966 if ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF) {
967 net->dest_state |= SCTP_ADDR_PF;
968 net->last_active = sctp_get_tick_count();
969 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from active to PF.\n",
970 net);
971 }
972 alt = sctp_find_alternate_net(stcb, net, 2);
973 } else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
974 /*
975 * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being
976 * used, then pick dest with largest ssthresh for any
977 * retransmission.
978 */
979 alt = net;
980 alt = sctp_find_alternate_net(stcb, alt, 1);
981 /*
982 * CUCv2: If a different dest is picked for the
983 * retransmission, then new (rtx-)pseudo_cumack needs to be
984 * tracked for orig dest. Let CUCv2 track new (rtx-)
985 * pseudo-cumack always.
986 */
987 net->find_pseudo_cumack = 1;
988 net->find_rtx_pseudo_cumack = 1;
989 } else { /* CMT is OFF */
990 alt = sctp_find_alternate_net(stcb, net, 0);
991 }
992
993 (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe, &num_mk);
994 /* FR Loss recovery just ended with the T3. */
995 stcb->asoc.fast_retran_loss_recovery = 0;
996
997 /* CMT FR loss recovery ended with the T3 */
998 net->fast_retran_loss_recovery = 0;
999
1000 /*
1001 * setup the sat loss recovery that prevents satellite cwnd advance.
1002 */
1003 stcb->asoc.sat_t3_loss_recovery = 1;
1004 stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
1005
1006 /* Backoff the timer and cwnd */
1007 sctp_backoff_on_timeout(stcb, net, win_probe, num_mk);
1008 if (win_probe == 0) {
1009 /* We don't do normal threshold management on window probes */
1010 if (sctp_threshold_management(inp, stcb, net,
1011 stcb->asoc.max_send_times)) {
1012 /* Association was destroyed */
1013 return (1);
1014 } else {
1015 if (net != stcb->asoc.primary_destination) {
1016 /* send a immediate HB if our RTO is stale */
1017 struct timeval now;
1018 unsigned int ms_goneby;
1019
1020 (void)SCTP_GETTIME_TIMEVAL(&now);
1021 if (net->last_sent_time.tv_sec) {
1022 ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000;
1023 } else {
1024 ms_goneby = 0;
1025 }
1026 if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
1027 /*
1028 * no recent feed back in an RTO or
1029 * more, request a RTT update
1030 */
1031 if (sctp_send_hb(stcb, 1, net) < 0)
1032 return 1;
1033 }
1034 }
1035 }
1036 } else {
1037 /*
1038 * For a window probe we don't penalize the net's but only
1039 * the association. This may fail it if SACKs are not coming
1040 * back. If sack's are coming with rwnd locked at 0, we will
1041 * continue to hold things waiting for rwnd to raise
1042 */
1043 if (sctp_threshold_management(inp, stcb, NULL,
1044 stcb->asoc.max_send_times)) {
1045 /* Association was destroyed */
1046 return (1);
1047 }
1048 }
1049 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1050 /* Move all pending over too */
1051 sctp_move_all_chunks_to_alt(stcb, net, alt);
1052
1053 /*
1054 * Get the address that failed, to force a new src address
1055 * selecton and a route allocation.
1056 */
1057 if (net->ro._s_addr) {
1058 sctp_free_ifa(net->ro._s_addr);
1059 net->ro._s_addr = NULL;
1060 }
1061 net->src_addr_selected = 0;
1062
1063 /* Force a route allocation too */
1064 if (net->ro.ro_rt) {
1065 RTFREE(net->ro.ro_rt);
1066 net->ro.ro_rt = NULL;
1067 }
1068 /* Was it our primary? */
1069 if ((stcb->asoc.primary_destination == net) && (alt != net)) {
1070 /*
1071 * Yes, note it as such and find an alternate note:
1072 * this means HB code must use this to resent the
1073 * primary if it goes active AND if someone does a
1074 * change-primary then this flag must be cleared
1075 * from any net structures.
1076 */
1077 if (sctp_set_primary_addr(stcb,
1078 (struct sockaddr *)NULL,
1079 alt) == 0) {
1080 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
1081 }
1082 }
1083 } else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf) && (net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) {
1084 /*
1085 * JRS 5/14/07 - If the destination hasn't failed completely
1086 * but is in PF state, a PF-heartbeat needs to be sent
1087 * manually.
1088 */
1089 if (sctp_send_hb(stcb, 1, net) < 0)
1090 return 1;
1091 }
1092 /*
1093 * Special case for cookie-echo'ed case, we don't do output but must
1094 * await the COOKIE-ACK before retransmission
1095 */
1096 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
1097 /*
1098 * Here we just reset the timer and start again since we
1099 * have not established the asoc
1100 */
1101 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
1102 return (0);
1103 }
1104 if (stcb->asoc.peer_supports_prsctp) {
1105 struct sctp_tmit_chunk *lchk;
1106
1107 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
1108 /* C3. See if we need to send a Fwd-TSN */
1109 if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point,
1110 stcb->asoc.last_acked_seq, MAX_TSN)) {
1111 /*
1112 * ISSUE with ECN, see FWD-TSN processing for notes
1113 * on issues that will occur when the ECN NONCE
1114 * stuff is put into SCTP for cross checking.
1115 */
1116 send_forward_tsn(stcb, &stcb->asoc);
1117 if (lchk) {
1118 /* Assure a timer is up */
1119 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
1120 }
1121 }
1122 }
1123 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1124 sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX);
1125 }
1126 return (0);
1127}
1128
1129int
1130sctp_t1init_timer(struct sctp_inpcb *inp,
1131 struct sctp_tcb *stcb,
1132 struct sctp_nets *net)
1133{
1134 /* bump the thresholds */
1135 if (stcb->asoc.delayed_connection) {
1136 /*
1137 * special hook for delayed connection. The library did NOT
1138 * complete the rest of its sends.
1139 */
1140 stcb->asoc.delayed_connection = 0;
1141 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED);
1142 return (0);
1143 }
1144 if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) {
1145 return (0);
1146 }
1147 if (sctp_threshold_management(inp, stcb, net,
1148 stcb->asoc.max_init_times)) {
1149 /* Association was destroyed */
1150 return (1);
1151 }
1152 stcb->asoc.dropped_special_cnt = 0;
1153 sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0);
1154 if (stcb->asoc.initial_init_rto_max < net->RTO) {
1155 net->RTO = stcb->asoc.initial_init_rto_max;
1156 }
1157 if (stcb->asoc.numnets > 1) {
1158 /* If we have more than one addr use it */
1159 struct sctp_nets *alt;
1160
1161 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0);
1162 if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) {
1163 sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt);
1164 stcb->asoc.primary_destination = alt;
1165 }
1166 }
1167 /* Send out a new init */
1168 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED);
1169 return (0);
1170}
1171
1172/*
1173 * For cookie and asconf we actually need to find and mark for resend, then
1174 * increment the resend counter (after all the threshold management stuff of
1175 * course).
1176 */
1177int
1178sctp_cookie_timer(struct sctp_inpcb *inp,
1179 struct sctp_tcb *stcb,
1180 struct sctp_nets *net)
1181{
1182 struct sctp_nets *alt;
1183 struct sctp_tmit_chunk *cookie;
1184
1185 /* first before all else we must find the cookie */
1186 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
1187 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
1188 break;
1189 }
1190 }
1191 if (cookie == NULL) {
1192 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
1193 /* FOOBAR! */
1194 struct mbuf *oper;
1195
1196 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1197 0, M_DONTWAIT, 1, MT_DATA);
1198 if (oper) {
1199 struct sctp_paramhdr *ph;
1200 uint32_t *ippp;
1201
1202 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1203 sizeof(uint32_t);
1204 ph = mtod(oper, struct sctp_paramhdr *);
1205 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1206 ph->param_length = htons(SCTP_BUF_LEN(oper));
1207 ippp = (uint32_t *) (ph + 1);
1208 *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_3);
1209 }
1210 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_4;
1211 sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR,
1212 oper, SCTP_SO_NOT_LOCKED);
1213 } else {
1214#ifdef INVARIANTS
1215 panic("Cookie timer expires in wrong state?");
1216#else
1217 SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc));
1218 return (0);
1219#endif
1220 }
1221 return (0);
1222 }
1223 /* Ok we found the cookie, threshold management next */
1224 if (sctp_threshold_management(inp, stcb, cookie->whoTo,
1225 stcb->asoc.max_init_times)) {
1226 /* Assoc is over */
1227 return (1);
1228 }
1229 /*
1230 * cleared theshold management now lets backoff the address & select
1231 * an alternate
1232 */
1233 stcb->asoc.dropped_special_cnt = 0;
1234 sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0);
1235 alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0);
1236 if (alt != cookie->whoTo) {
1237 sctp_free_remote_addr(cookie->whoTo);
1238 cookie->whoTo = alt;
1239 atomic_add_int(&alt->ref_count, 1);
1240 }
1241 /* Now mark the retran info */
1242 if (cookie->sent != SCTP_DATAGRAM_RESEND) {
1243 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1244 }
1245 cookie->sent = SCTP_DATAGRAM_RESEND;
1246 /*
1247 * Now call the output routine to kick out the cookie again, Note we
1248 * don't mark any chunks for retran so that FR will need to kick in
1249 * to move these (or a send timer).
1250 */
1251 return (0);
1252}
1253
1254int
1255sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1256 struct sctp_nets *net)
1257{
1258 struct sctp_nets *alt;
1259 struct sctp_tmit_chunk *strrst = NULL, *chk = NULL;
1260
1261 if (stcb->asoc.stream_reset_outstanding == 0) {
1262 return (0);
1263 }
1264 /* find the existing STRRESET, we use the seq number we sent out on */
1265 (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst);
1266 if (strrst == NULL) {
1267 return (0);
1268 }
1269 /* do threshold management */
1270 if (sctp_threshold_management(inp, stcb, strrst->whoTo,
1271 stcb->asoc.max_send_times)) {
1272 /* Assoc is over */
1273 return (1);
1274 }
1275 /*
1276 * cleared theshold management now lets backoff the address & select
1277 * an alternate
1278 */
1279 sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0);
1280 alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0);
1281 sctp_free_remote_addr(strrst->whoTo);
1282 strrst->whoTo = alt;
1283 atomic_add_int(&alt->ref_count, 1);
1284
1285 /* See if a ECN Echo is also stranded */
1286 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1287 if ((chk->whoTo == net) &&
1288 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1289 sctp_free_remote_addr(chk->whoTo);
1290 if (chk->sent != SCTP_DATAGRAM_RESEND) {
1291 chk->sent = SCTP_DATAGRAM_RESEND;
1292 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1293 }
1294 chk->whoTo = alt;
1295 atomic_add_int(&alt->ref_count, 1);
1296 }
1297 }
1298 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1299 /*
1300 * If the address went un-reachable, we need to move to
1301 * alternates for ALL chk's in queue
1302 */
1303 sctp_move_all_chunks_to_alt(stcb, net, alt);
1304 }
1305 /* mark the retran info */
1306 if (strrst->sent != SCTP_DATAGRAM_RESEND)
1307 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1308 strrst->sent = SCTP_DATAGRAM_RESEND;
1309
1310 /* restart the timer */
1311 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo);
1312 return (0);
1313}
1314
1315int
1316sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1317 struct sctp_nets *net)
1318{
1319 struct sctp_nets *alt;
1320 struct sctp_tmit_chunk *asconf, *chk, *nchk;
1321
1322 /* is this a first send, or a retransmission? */
1323 if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) {
1324 /* compose a new ASCONF chunk and send it */
1325 sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED);
1326 } else {
1327 /*
1328 * Retransmission of the existing ASCONF is needed
1329 */
1330
1331 /* find the existing ASCONF */
1332 asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue);
1333 if (asconf == NULL) {
1334 return (0);
1335 }
1336 /* do threshold management */
1337 if (sctp_threshold_management(inp, stcb, asconf->whoTo,
1338 stcb->asoc.max_send_times)) {
1339 /* Assoc is over */
1340 return (1);
1341 }
1342 if (asconf->snd_count > stcb->asoc.max_send_times) {
1343 /*
1344 * Something is rotten: our peer is not responding
1345 * to ASCONFs but apparently is to other chunks.
1346 * i.e. it is not properly handling the chunk type
1347 * upper bits. Mark this peer as ASCONF incapable
1348 * and cleanup.
1349 */
1350 SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n");
1351 sctp_asconf_cleanup(stcb, net);
1352 return (0);
1353 }
1354 /*
1355 * cleared threshold management, so now backoff the net and
1356 * select an alternate
1357 */
1358 sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0);
1359 alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0);
1360 if (asconf->whoTo != alt) {
1361 sctp_free_remote_addr(asconf->whoTo);
1362 asconf->whoTo = alt;
1363 atomic_add_int(&alt->ref_count, 1);
1364 }
1365 /* See if an ECN Echo is also stranded */
1366 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1367 if ((chk->whoTo == net) &&
1368 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1369 sctp_free_remote_addr(chk->whoTo);
1370 chk->whoTo = alt;
1371 if (chk->sent != SCTP_DATAGRAM_RESEND) {
1372 chk->sent = SCTP_DATAGRAM_RESEND;
1373 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1374 }
1375 atomic_add_int(&alt->ref_count, 1);
1376 }
1377 }
1378 for (chk = asconf; chk; chk = nchk) {
1379 nchk = TAILQ_NEXT(chk, sctp_next);
1380 if (chk->whoTo != alt) {
1381 sctp_free_remote_addr(chk->whoTo);
1382 chk->whoTo = alt;
1383 atomic_add_int(&alt->ref_count, 1);
1384 }
1385 if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT)
1386 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1387 chk->sent = SCTP_DATAGRAM_RESEND;
1388 }
1389 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1390 /*
1391 * If the address went un-reachable, we need to move
1392 * to the alternate for ALL chunks in queue
1393 */
1394 sctp_move_all_chunks_to_alt(stcb, net, alt);
1395 net = alt;
1396 }
1397 /* mark the retran info */
1398 if (asconf->sent != SCTP_DATAGRAM_RESEND)
1399 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1400 asconf->sent = SCTP_DATAGRAM_RESEND;
1401
1402 /* send another ASCONF if any and we can do */
1403 sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED);
1404 }
1405 return (0);
1406}
1407
1408/* Mobility adaptation */
1409void
1410sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1411 struct sctp_nets *net)
1412{
1413 if (stcb->asoc.deleted_primary == NULL) {
1414 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n");
1415 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
1416 return;
1417 }
1418 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary ");
1419 SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa);
1420 sctp_free_remote_addr(stcb->asoc.deleted_primary);
1421 stcb->asoc.deleted_primary = NULL;
1422 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
1423 return;
1424}
1425
1426/*
1427 * For the shutdown and shutdown-ack, we do not keep one around on the
1428 * control queue. This means we must generate a new one and call the general
1429 * chunk output routine, AFTER having done threshold management.
1430 */
1431int
1432sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1433 struct sctp_nets *net)
1434{
1435 struct sctp_nets *alt;
1436
1437 /* first threshold managment */
1438 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1439 /* Assoc is over */
1440 return (1);
1441 }
1442 /* second select an alternative */
1443 alt = sctp_find_alternate_net(stcb, net, 0);
1444
1445 /* third generate a shutdown into the queue for out net */
1446 if (alt) {
1447 sctp_send_shutdown(stcb, alt);
1448 } else {
1449 /*
1450 * if alt is NULL, there is no dest to send to??
1451 */
1452 return (0);
1453 }
1454 /* fourth restart timer */
1455 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
1456 return (0);
1457}
1458
1459int
1460sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1461 struct sctp_nets *net)
1462{
1463 struct sctp_nets *alt;
1464
1465 /* first threshold managment */
1466 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1467 /* Assoc is over */
1468 return (1);
1469 }
1470 /* second select an alternative */
1471 alt = sctp_find_alternate_net(stcb, net, 0);
1472
1473 /* third generate a shutdown into the queue for out net */
1474 sctp_send_shutdown_ack(stcb, alt);
1475
1476 /* fourth restart timer */
1477 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
1478 return (0);
1479}
1480
1481static void
1482sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
1483 struct sctp_tcb *stcb)
1484{
1485 struct sctp_stream_out *outs;
1486 struct sctp_stream_queue_pending *sp;
1487 unsigned int chks_in_queue = 0;
1488 int being_filled = 0;
1489
1490 /*
1491 * This function is ONLY called when the send/sent queues are empty.
1492 */
1493 if ((stcb == NULL) || (inp == NULL))
1494 return;
1495
1496 if (stcb->asoc.sent_queue_retran_cnt) {
1497 SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n",
1498 stcb->asoc.sent_queue_retran_cnt);
1499 stcb->asoc.sent_queue_retran_cnt = 0;
1500 }
1501 SCTP_TCB_SEND_LOCK(stcb);
1502 if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) {
1503 int i, cnt = 0;
1504
1505 /* Check to see if a spoke fell off the wheel */
1506 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1507 if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
1508 sctp_insert_on_wheel(stcb, &stcb->asoc, &stcb->asoc.strmout[i], 1);
1509 cnt++;
1510 }
1511 }
1512 if (cnt) {
1513 /* yep, we lost a spoke or two */
1514 SCTP_PRINTF("Found an additional %d streams NOT on outwheel, corrected\n", cnt);
1515 } else {
1516 /* no spokes lost, */
1517 stcb->asoc.total_output_queue_size = 0;
1518 }
1519 SCTP_TCB_SEND_UNLOCK(stcb);
1520 return;
1521 }
1522 SCTP_TCB_SEND_UNLOCK(stcb);
1523 /* Check to see if some data queued, if so report it */
1524 TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) {
1525 if (!TAILQ_EMPTY(&outs->outqueue)) {
1526 TAILQ_FOREACH(sp, &outs->outqueue, next) {
1527 if (sp->msg_is_complete)
1528 being_filled++;
1529 chks_in_queue++;
1530 }
1531 }
1532 }
1533 if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
1534 SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
1535 stcb->asoc.stream_queue_cnt, chks_in_queue);
1536 }
1537 if (chks_in_queue) {
1538 /* call the output queue function */
1539 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1540 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1541 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1542 /*
1543 * Probably should go in and make it go back through
1544 * and add fragments allowed
1545 */
1546 if (being_filled == 0) {
1547 SCTP_PRINTF("Still nothing moved %d chunks are stuck\n",
1548 chks_in_queue);
1549 }
1550 }
1551 } else {
1552 SCTP_PRINTF("Found no chunks on any queue tot:%lu\n",
1553 (u_long)stcb->asoc.total_output_queue_size);
1554 stcb->asoc.total_output_queue_size = 0;
1555 }
1556}
1557
1558int
1559sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1560 struct sctp_nets *net, int cnt_of_unconf)
1561{
1562 int ret;
1563
1564 if (net) {
1565 if (net->hb_responded == 0) {
1566 if (net->ro._s_addr) {
1567 /*
1568 * Invalidate the src address if we did not
1569 * get a response last time.
1570 */
1571 sctp_free_ifa(net->ro._s_addr);
1572 net->ro._s_addr = NULL;
1573 net->src_addr_selected = 0;
1574 }
1575 sctp_backoff_on_timeout(stcb, net, 1, 0);
1576 }
1577 /* Zero PBA, if it needs it */
1578 if (net->partial_bytes_acked) {
1579 net->partial_bytes_acked = 0;
1580 }
1581 }
1582 if ((stcb->asoc.total_output_queue_size > 0) &&
1583 (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1584 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1585 sctp_audit_stream_queues_for_size(inp, stcb);
1586 }
1587 /* Send a new HB, this will do threshold managment, pick a new dest */
1588 if (cnt_of_unconf == 0) {
1589 if (sctp_send_hb(stcb, 0, NULL) < 0) {
1590 return (1);
1591 }
1592 } else {
1593 /*
1594 * this will send out extra hb's up to maxburst if there are
1595 * any unconfirmed addresses.
1596 */
1597 uint32_t cnt_sent = 0;
1598
1599 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1600 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1601 (net->dest_state & SCTP_ADDR_REACHABLE)) {
1602 cnt_sent++;
1603 if (net->hb_responded == 0) {
1604 /* Did we respond last time? */
1605 if (net->ro._s_addr) {
1606 sctp_free_ifa(net->ro._s_addr);
1607 net->ro._s_addr = NULL;
1608 net->src_addr_selected = 0;
1609 }
1610 }
1611 ret = sctp_send_hb(stcb, 1, net);
1612 if (ret < 0)
1613 return 1;
1614 else if (ret == 0) {
1615 break;
1616 }
1617 if (cnt_sent >= SCTP_BASE_SYSCTL(sctp_hb_maxburst))
1618 break;
1619 }
1620 }
1621 }
1622 return (0);
1623}
1624
1625int
1626sctp_is_hb_timer_running(struct sctp_tcb *stcb)
1627{
1628 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.hb_timer.timer)) {
1629 /* its running */
1630 return (1);
1631 } else {
1632 /* nope */
1633 return (0);
1634 }
1635}
1636
1637int
1638sctp_is_sack_timer_running(struct sctp_tcb *stcb)
1639{
1640 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
1641 /* its running */
1642 return (1);
1643 } else {
1644 /* nope */
1645 return (0);
1646 }
1647}
1648
1649#define SCTP_NUMBER_OF_MTU_SIZES 18
1650static uint32_t mtu_sizes[] = {
1651 68,
1652 296,
1653 508,
1654 512,
1655 544,
1656 576,
1657 1006,
1658 1492,
1659 1500,
1660 1536,
1661 2002,
1662 2048,
1663 4352,
1664 4464,
1665 8166,
1666 17914,
1667 32000,
1668 65535
1669};
1670
1671
1672static uint32_t
1673sctp_getnext_mtu(struct sctp_inpcb *inp, uint32_t cur_mtu)
1674{
1675 /* select another MTU that is just bigger than this one */
1676 int i;
1677
1678 for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) {
1679 if (cur_mtu < mtu_sizes[i]) {
1680 /* no max_mtu is bigger than this one */
1681 return (mtu_sizes[i]);
1682 }
1683 }
1684 /* here return the highest allowable */
1685 return (cur_mtu);
1686}
1687
1688
1689void
1690sctp_pathmtu_timer(struct sctp_inpcb *inp,
1691 struct sctp_tcb *stcb,
1692 struct sctp_nets *net)
1693{
1694 uint32_t next_mtu, mtu;
1695
1696 next_mtu = sctp_getnext_mtu(inp, net->mtu);
1697
1698 if ((next_mtu > net->mtu) && (net->port == 0)) {
1699 if ((net->src_addr_selected == 0) ||
1700 (net->ro._s_addr == NULL) ||
1701 (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) {
1702 if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) {
1703 sctp_free_ifa(net->ro._s_addr);
1704 net->ro._s_addr = NULL;
1705 net->src_addr_selected = 0;
1706 } else if (net->ro._s_addr == NULL) {
1707#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE)
1708 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
1709 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
1710
1711 /* KAME hack: embed scopeid */
1714 (void)sa6_embedscope(sin6, ip6_use_defzone);
1712 (void)sa6_embedscope(sin6, MODULE_GLOBAL(MOD_INET6, ip6_use_defzone));
1713 }
1714#endif
1715
1716 net->ro._s_addr = sctp_source_address_selection(inp,
1717 stcb,
1718 (sctp_route_t *) & net->ro,
1719 net, 0, stcb->asoc.vrf_id);
1720#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE)
1721 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
1722 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
1723
1724 (void)sa6_recoverscope(sin6);
1725 }
1726#endif /* INET6 */
1727 }
1728 if (net->ro._s_addr)
1729 net->src_addr_selected = 1;
1730 }
1731 if (net->ro._s_addr) {
1732 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt);
1733 if (mtu > next_mtu) {
1734 net->mtu = next_mtu;
1735 }
1736 }
1737 }
1738 /* restart the timer */
1739 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
1740}
1741
1742void
1743sctp_autoclose_timer(struct sctp_inpcb *inp,
1744 struct sctp_tcb *stcb,
1745 struct sctp_nets *net)
1746{
1747 struct timeval tn, *tim_touse;
1748 struct sctp_association *asoc;
1749 int ticks_gone_by;
1750
1751 (void)SCTP_GETTIME_TIMEVAL(&tn);
1752 if (stcb->asoc.sctp_autoclose_ticks &&
1753 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1754 /* Auto close is on */
1755 asoc = &stcb->asoc;
1756 /* pick the time to use */
1757 if (asoc->time_last_rcvd.tv_sec >
1758 asoc->time_last_sent.tv_sec) {
1759 tim_touse = &asoc->time_last_rcvd;
1760 } else {
1761 tim_touse = &asoc->time_last_sent;
1762 }
1763 /* Now has long enough transpired to autoclose? */
1764 ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec);
1765 if ((ticks_gone_by > 0) &&
1766 (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) {
1767 /*
1768 * autoclose time has hit, call the output routine,
1769 * which should do nothing just to be SURE we don't
1770 * have hanging data. We can then safely check the
1771 * queues and know that we are clear to send
1772 * shutdown
1773 */
1774 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1775 /* Are we clean? */
1776 if (TAILQ_EMPTY(&asoc->send_queue) &&
1777 TAILQ_EMPTY(&asoc->sent_queue)) {
1778 /*
1779 * there is nothing queued to send, so I'm
1780 * done...
1781 */
1782 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1783 /* only send SHUTDOWN 1st time thru */
1784 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
1785 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
1786 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1787 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1788 }
1789 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
1790 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
1791 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1792 stcb->sctp_ep, stcb,
1793 asoc->primary_destination);
1794 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1795 stcb->sctp_ep, stcb,
1796 asoc->primary_destination);
1797 }
1798 }
1799 } else {
1800 /*
1801 * No auto close at this time, reset t-o to check
1802 * later
1803 */
1804 int tmp;
1805
1806 /* fool the timer startup to use the time left */
1807 tmp = asoc->sctp_autoclose_ticks;
1808 asoc->sctp_autoclose_ticks -= ticks_gone_by;
1809 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1810 net);
1811 /* restore the real tick value */
1812 asoc->sctp_autoclose_ticks = tmp;
1813 }
1814 }
1815}
1816
1817void
1818sctp_iterator_timer(struct sctp_iterator *it)
1819{
1820 int iteration_count = 0;
1821 int inp_skip = 0;
1822
1823 /*
1824 * only one iterator can run at a time. This is the only way we can
1825 * cleanly pull ep's from underneath all the running interators when
1826 * a ep is freed.
1827 */
1828 SCTP_ITERATOR_LOCK();
1829 if (it->inp == NULL) {
1830 /* iterator is complete */
1831done_with_iterator:
1832 SCTP_ITERATOR_UNLOCK();
1833 SCTP_INP_INFO_WLOCK();
1834 TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
1835 /* stopping the callout is not needed, in theory */
1836 SCTP_INP_INFO_WUNLOCK();
1837 (void)SCTP_OS_TIMER_STOP(&it->tmr.timer);
1838 if (it->function_atend != NULL) {
1839 (*it->function_atend) (it->pointer, it->val);
1840 }
1841 SCTP_FREE(it, SCTP_M_ITER);
1842 return;
1843 }
1844select_a_new_ep:
1845 SCTP_INP_WLOCK(it->inp);
1846 while (((it->pcb_flags) &&
1847 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1848 ((it->pcb_features) &&
1849 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1850 /* endpoint flags or features don't match, so keep looking */
1851 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1852 SCTP_INP_WUNLOCK(it->inp);
1853 goto done_with_iterator;
1854 }
1855 SCTP_INP_WUNLOCK(it->inp);
1856 it->inp = LIST_NEXT(it->inp, sctp_list);
1857 if (it->inp == NULL) {
1858 goto done_with_iterator;
1859 }
1860 SCTP_INP_WLOCK(it->inp);
1861 }
1862 if ((it->inp->inp_starting_point_for_iterator != NULL) &&
1863 (it->inp->inp_starting_point_for_iterator != it)) {
1864 SCTP_PRINTF("Iterator collision, waiting for one at %p\n",
1865 it->inp);
1866 SCTP_INP_WUNLOCK(it->inp);
1867 goto start_timer_return;
1868 }
1869 /* mark the current iterator on the endpoint */
1870 it->inp->inp_starting_point_for_iterator = it;
1871 SCTP_INP_WUNLOCK(it->inp);
1872 SCTP_INP_RLOCK(it->inp);
1873 /* now go through each assoc which is in the desired state */
1874 if (it->done_current_ep == 0) {
1875 if (it->function_inp != NULL)
1876 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1877 it->done_current_ep = 1;
1878 }
1879 if (it->stcb == NULL) {
1880 /* run the per instance function */
1881 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1882 }
1883 SCTP_INP_RUNLOCK(it->inp);
1884 if ((inp_skip) || it->stcb == NULL) {
1885 if (it->function_inp_end != NULL) {
1886 inp_skip = (*it->function_inp_end) (it->inp,
1887 it->pointer,
1888 it->val);
1889 }
1890 goto no_stcb;
1891 }
1892 if ((it->stcb) &&
1893 (it->stcb->asoc.stcb_starting_point_for_iterator == it)) {
1894 it->stcb->asoc.stcb_starting_point_for_iterator = NULL;
1895 }
1896 while (it->stcb) {
1897 SCTP_TCB_LOCK(it->stcb);
1898 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1899 /* not in the right state... keep looking */
1900 SCTP_TCB_UNLOCK(it->stcb);
1901 goto next_assoc;
1902 }
1903 /* mark the current iterator on the assoc */
1904 it->stcb->asoc.stcb_starting_point_for_iterator = it;
1905 /* see if we have limited out the iterator loop */
1906 iteration_count++;
1907 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1908 start_timer_return:
1909 /* set a timer to continue this later */
1910 if (it->stcb)
1911 SCTP_TCB_UNLOCK(it->stcb);
1912 sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR,
1913 (struct sctp_inpcb *)it, NULL, NULL);
1914 SCTP_ITERATOR_UNLOCK();
1915 return;
1916 }
1917 /* run function on this one */
1918 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1919
1920 /*
1921 * we lie here, it really needs to have its own type but
1922 * first I must verify that this won't effect things :-0
1923 */
1924 if (it->no_chunk_output == 0)
1925 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1926
1927 SCTP_TCB_UNLOCK(it->stcb);
1928next_assoc:
1929 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1930 if (it->stcb == NULL) {
1931 if (it->function_inp_end != NULL) {
1932 inp_skip = (*it->function_inp_end) (it->inp,
1933 it->pointer,
1934 it->val);
1935 }
1936 }
1937 }
1938no_stcb:
1939 /* done with all assocs on this endpoint, move on to next endpoint */
1940 it->done_current_ep = 0;
1941 SCTP_INP_WLOCK(it->inp);
1942 it->inp->inp_starting_point_for_iterator = NULL;
1943 SCTP_INP_WUNLOCK(it->inp);
1944 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1945 it->inp = NULL;
1946 } else {
1947 SCTP_INP_INFO_RLOCK();
1948 it->inp = LIST_NEXT(it->inp, sctp_list);
1949 SCTP_INP_INFO_RUNLOCK();
1950 }
1951 if (it->inp == NULL) {
1952 goto done_with_iterator;
1953 }
1954 goto select_a_new_ep;
1955}