Deleted Added
full compact
sctp_output.c (166675) sctp_output.c (167598)
1/*-
2 * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
32
33#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 166675 2007-02-12 23:24:31Z rrs $");
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_output.c 167598 2007-03-15 11:27:14Z rrs $");
35
36#include <netinet/sctp_os.h>
37#include <sys/proc.h>
38#include <netinet/sctp_var.h>
35
36#include <netinet/sctp_os.h>
37#include <sys/proc.h>
38#include <netinet/sctp_var.h>
39#include <netinet/sctp_sysctl.h>
39#include <netinet/sctp_header.h>
40#include <netinet/sctp_pcb.h>
41#include <netinet/sctputil.h>
42#include <netinet/sctp_output.h>
43#include <netinet/sctp_uio.h>
44#include <netinet/sctputil.h>
45#include <netinet/sctp_auth.h>
46#include <netinet/sctp_timer.h>
47#include <netinet/sctp_asconf.h>
48#include <netinet/sctp_indata.h>
49#include <netinet/sctp_bsd_addr.h>
50
40#include <netinet/sctp_header.h>
41#include <netinet/sctp_pcb.h>
42#include <netinet/sctputil.h>
43#include <netinet/sctp_output.h>
44#include <netinet/sctp_uio.h>
45#include <netinet/sctputil.h>
46#include <netinet/sctp_auth.h>
47#include <netinet/sctp_timer.h>
48#include <netinet/sctp_asconf.h>
49#include <netinet/sctp_indata.h>
50#include <netinet/sctp_bsd_addr.h>
51
51#ifdef SCTP_DEBUG
52extern uint32_t sctp_debug_on;
53
52
54#endif
55
53
56
57
58#define SCTP_MAX_GAPS_INARRAY 4
59struct sack_track {
60 uint8_t right_edge; /* mergable on the right edge */
61 uint8_t left_edge; /* mergable on the left edge */
62 uint8_t num_entries;
63 uint8_t spare;
64 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
65};
66
67struct sack_track sack_array[256] = {
68 {0, 0, 0, 0, /* 0x00 */
69 {{0, 0},
70 {0, 0},
71 {0, 0},
72 {0, 0}
73 }
74 },
75 {1, 0, 1, 0, /* 0x01 */
76 {{0, 0},
77 {0, 0},
78 {0, 0},
79 {0, 0}
80 }
81 },
82 {0, 0, 1, 0, /* 0x02 */
83 {{1, 1},
84 {0, 0},
85 {0, 0},
86 {0, 0}
87 }
88 },
89 {1, 0, 1, 0, /* 0x03 */
90 {{0, 1},
91 {0, 0},
92 {0, 0},
93 {0, 0}
94 }
95 },
96 {0, 0, 1, 0, /* 0x04 */
97 {{2, 2},
98 {0, 0},
99 {0, 0},
100 {0, 0}
101 }
102 },
103 {1, 0, 2, 0, /* 0x05 */
104 {{0, 0},
105 {2, 2},
106 {0, 0},
107 {0, 0}
108 }
109 },
110 {0, 0, 1, 0, /* 0x06 */
111 {{1, 2},
112 {0, 0},
113 {0, 0},
114 {0, 0}
115 }
116 },
117 {1, 0, 1, 0, /* 0x07 */
118 {{0, 2},
119 {0, 0},
120 {0, 0},
121 {0, 0}
122 }
123 },
124 {0, 0, 1, 0, /* 0x08 */
125 {{3, 3},
126 {0, 0},
127 {0, 0},
128 {0, 0}
129 }
130 },
131 {1, 0, 2, 0, /* 0x09 */
132 {{0, 0},
133 {3, 3},
134 {0, 0},
135 {0, 0}
136 }
137 },
138 {0, 0, 2, 0, /* 0x0a */
139 {{1, 1},
140 {3, 3},
141 {0, 0},
142 {0, 0}
143 }
144 },
145 {1, 0, 2, 0, /* 0x0b */
146 {{0, 1},
147 {3, 3},
148 {0, 0},
149 {0, 0}
150 }
151 },
152 {0, 0, 1, 0, /* 0x0c */
153 {{2, 3},
154 {0, 0},
155 {0, 0},
156 {0, 0}
157 }
158 },
159 {1, 0, 2, 0, /* 0x0d */
160 {{0, 0},
161 {2, 3},
162 {0, 0},
163 {0, 0}
164 }
165 },
166 {0, 0, 1, 0, /* 0x0e */
167 {{1, 3},
168 {0, 0},
169 {0, 0},
170 {0, 0}
171 }
172 },
173 {1, 0, 1, 0, /* 0x0f */
174 {{0, 3},
175 {0, 0},
176 {0, 0},
177 {0, 0}
178 }
179 },
180 {0, 0, 1, 0, /* 0x10 */
181 {{4, 4},
182 {0, 0},
183 {0, 0},
184 {0, 0}
185 }
186 },
187 {1, 0, 2, 0, /* 0x11 */
188 {{0, 0},
189 {4, 4},
190 {0, 0},
191 {0, 0}
192 }
193 },
194 {0, 0, 2, 0, /* 0x12 */
195 {{1, 1},
196 {4, 4},
197 {0, 0},
198 {0, 0}
199 }
200 },
201 {1, 0, 2, 0, /* 0x13 */
202 {{0, 1},
203 {4, 4},
204 {0, 0},
205 {0, 0}
206 }
207 },
208 {0, 0, 2, 0, /* 0x14 */
209 {{2, 2},
210 {4, 4},
211 {0, 0},
212 {0, 0}
213 }
214 },
215 {1, 0, 3, 0, /* 0x15 */
216 {{0, 0},
217 {2, 2},
218 {4, 4},
219 {0, 0}
220 }
221 },
222 {0, 0, 2, 0, /* 0x16 */
223 {{1, 2},
224 {4, 4},
225 {0, 0},
226 {0, 0}
227 }
228 },
229 {1, 0, 2, 0, /* 0x17 */
230 {{0, 2},
231 {4, 4},
232 {0, 0},
233 {0, 0}
234 }
235 },
236 {0, 0, 1, 0, /* 0x18 */
237 {{3, 4},
238 {0, 0},
239 {0, 0},
240 {0, 0}
241 }
242 },
243 {1, 0, 2, 0, /* 0x19 */
244 {{0, 0},
245 {3, 4},
246 {0, 0},
247 {0, 0}
248 }
249 },
250 {0, 0, 2, 0, /* 0x1a */
251 {{1, 1},
252 {3, 4},
253 {0, 0},
254 {0, 0}
255 }
256 },
257 {1, 0, 2, 0, /* 0x1b */
258 {{0, 1},
259 {3, 4},
260 {0, 0},
261 {0, 0}
262 }
263 },
264 {0, 0, 1, 0, /* 0x1c */
265 {{2, 4},
266 {0, 0},
267 {0, 0},
268 {0, 0}
269 }
270 },
271 {1, 0, 2, 0, /* 0x1d */
272 {{0, 0},
273 {2, 4},
274 {0, 0},
275 {0, 0}
276 }
277 },
278 {0, 0, 1, 0, /* 0x1e */
279 {{1, 4},
280 {0, 0},
281 {0, 0},
282 {0, 0}
283 }
284 },
285 {1, 0, 1, 0, /* 0x1f */
286 {{0, 4},
287 {0, 0},
288 {0, 0},
289 {0, 0}
290 }
291 },
292 {0, 0, 1, 0, /* 0x20 */
293 {{5, 5},
294 {0, 0},
295 {0, 0},
296 {0, 0}
297 }
298 },
299 {1, 0, 2, 0, /* 0x21 */
300 {{0, 0},
301 {5, 5},
302 {0, 0},
303 {0, 0}
304 }
305 },
306 {0, 0, 2, 0, /* 0x22 */
307 {{1, 1},
308 {5, 5},
309 {0, 0},
310 {0, 0}
311 }
312 },
313 {1, 0, 2, 0, /* 0x23 */
314 {{0, 1},
315 {5, 5},
316 {0, 0},
317 {0, 0}
318 }
319 },
320 {0, 0, 2, 0, /* 0x24 */
321 {{2, 2},
322 {5, 5},
323 {0, 0},
324 {0, 0}
325 }
326 },
327 {1, 0, 3, 0, /* 0x25 */
328 {{0, 0},
329 {2, 2},
330 {5, 5},
331 {0, 0}
332 }
333 },
334 {0, 0, 2, 0, /* 0x26 */
335 {{1, 2},
336 {5, 5},
337 {0, 0},
338 {0, 0}
339 }
340 },
341 {1, 0, 2, 0, /* 0x27 */
342 {{0, 2},
343 {5, 5},
344 {0, 0},
345 {0, 0}
346 }
347 },
348 {0, 0, 2, 0, /* 0x28 */
349 {{3, 3},
350 {5, 5},
351 {0, 0},
352 {0, 0}
353 }
354 },
355 {1, 0, 3, 0, /* 0x29 */
356 {{0, 0},
357 {3, 3},
358 {5, 5},
359 {0, 0}
360 }
361 },
362 {0, 0, 3, 0, /* 0x2a */
363 {{1, 1},
364 {3, 3},
365 {5, 5},
366 {0, 0}
367 }
368 },
369 {1, 0, 3, 0, /* 0x2b */
370 {{0, 1},
371 {3, 3},
372 {5, 5},
373 {0, 0}
374 }
375 },
376 {0, 0, 2, 0, /* 0x2c */
377 {{2, 3},
378 {5, 5},
379 {0, 0},
380 {0, 0}
381 }
382 },
383 {1, 0, 3, 0, /* 0x2d */
384 {{0, 0},
385 {2, 3},
386 {5, 5},
387 {0, 0}
388 }
389 },
390 {0, 0, 2, 0, /* 0x2e */
391 {{1, 3},
392 {5, 5},
393 {0, 0},
394 {0, 0}
395 }
396 },
397 {1, 0, 2, 0, /* 0x2f */
398 {{0, 3},
399 {5, 5},
400 {0, 0},
401 {0, 0}
402 }
403 },
404 {0, 0, 1, 0, /* 0x30 */
405 {{4, 5},
406 {0, 0},
407 {0, 0},
408 {0, 0}
409 }
410 },
411 {1, 0, 2, 0, /* 0x31 */
412 {{0, 0},
413 {4, 5},
414 {0, 0},
415 {0, 0}
416 }
417 },
418 {0, 0, 2, 0, /* 0x32 */
419 {{1, 1},
420 {4, 5},
421 {0, 0},
422 {0, 0}
423 }
424 },
425 {1, 0, 2, 0, /* 0x33 */
426 {{0, 1},
427 {4, 5},
428 {0, 0},
429 {0, 0}
430 }
431 },
432 {0, 0, 2, 0, /* 0x34 */
433 {{2, 2},
434 {4, 5},
435 {0, 0},
436 {0, 0}
437 }
438 },
439 {1, 0, 3, 0, /* 0x35 */
440 {{0, 0},
441 {2, 2},
442 {4, 5},
443 {0, 0}
444 }
445 },
446 {0, 0, 2, 0, /* 0x36 */
447 {{1, 2},
448 {4, 5},
449 {0, 0},
450 {0, 0}
451 }
452 },
453 {1, 0, 2, 0, /* 0x37 */
454 {{0, 2},
455 {4, 5},
456 {0, 0},
457 {0, 0}
458 }
459 },
460 {0, 0, 1, 0, /* 0x38 */
461 {{3, 5},
462 {0, 0},
463 {0, 0},
464 {0, 0}
465 }
466 },
467 {1, 0, 2, 0, /* 0x39 */
468 {{0, 0},
469 {3, 5},
470 {0, 0},
471 {0, 0}
472 }
473 },
474 {0, 0, 2, 0, /* 0x3a */
475 {{1, 1},
476 {3, 5},
477 {0, 0},
478 {0, 0}
479 }
480 },
481 {1, 0, 2, 0, /* 0x3b */
482 {{0, 1},
483 {3, 5},
484 {0, 0},
485 {0, 0}
486 }
487 },
488 {0, 0, 1, 0, /* 0x3c */
489 {{2, 5},
490 {0, 0},
491 {0, 0},
492 {0, 0}
493 }
494 },
495 {1, 0, 2, 0, /* 0x3d */
496 {{0, 0},
497 {2, 5},
498 {0, 0},
499 {0, 0}
500 }
501 },
502 {0, 0, 1, 0, /* 0x3e */
503 {{1, 5},
504 {0, 0},
505 {0, 0},
506 {0, 0}
507 }
508 },
509 {1, 0, 1, 0, /* 0x3f */
510 {{0, 5},
511 {0, 0},
512 {0, 0},
513 {0, 0}
514 }
515 },
516 {0, 0, 1, 0, /* 0x40 */
517 {{6, 6},
518 {0, 0},
519 {0, 0},
520 {0, 0}
521 }
522 },
523 {1, 0, 2, 0, /* 0x41 */
524 {{0, 0},
525 {6, 6},
526 {0, 0},
527 {0, 0}
528 }
529 },
530 {0, 0, 2, 0, /* 0x42 */
531 {{1, 1},
532 {6, 6},
533 {0, 0},
534 {0, 0}
535 }
536 },
537 {1, 0, 2, 0, /* 0x43 */
538 {{0, 1},
539 {6, 6},
540 {0, 0},
541 {0, 0}
542 }
543 },
544 {0, 0, 2, 0, /* 0x44 */
545 {{2, 2},
546 {6, 6},
547 {0, 0},
548 {0, 0}
549 }
550 },
551 {1, 0, 3, 0, /* 0x45 */
552 {{0, 0},
553 {2, 2},
554 {6, 6},
555 {0, 0}
556 }
557 },
558 {0, 0, 2, 0, /* 0x46 */
559 {{1, 2},
560 {6, 6},
561 {0, 0},
562 {0, 0}
563 }
564 },
565 {1, 0, 2, 0, /* 0x47 */
566 {{0, 2},
567 {6, 6},
568 {0, 0},
569 {0, 0}
570 }
571 },
572 {0, 0, 2, 0, /* 0x48 */
573 {{3, 3},
574 {6, 6},
575 {0, 0},
576 {0, 0}
577 }
578 },
579 {1, 0, 3, 0, /* 0x49 */
580 {{0, 0},
581 {3, 3},
582 {6, 6},
583 {0, 0}
584 }
585 },
586 {0, 0, 3, 0, /* 0x4a */
587 {{1, 1},
588 {3, 3},
589 {6, 6},
590 {0, 0}
591 }
592 },
593 {1, 0, 3, 0, /* 0x4b */
594 {{0, 1},
595 {3, 3},
596 {6, 6},
597 {0, 0}
598 }
599 },
600 {0, 0, 2, 0, /* 0x4c */
601 {{2, 3},
602 {6, 6},
603 {0, 0},
604 {0, 0}
605 }
606 },
607 {1, 0, 3, 0, /* 0x4d */
608 {{0, 0},
609 {2, 3},
610 {6, 6},
611 {0, 0}
612 }
613 },
614 {0, 0, 2, 0, /* 0x4e */
615 {{1, 3},
616 {6, 6},
617 {0, 0},
618 {0, 0}
619 }
620 },
621 {1, 0, 2, 0, /* 0x4f */
622 {{0, 3},
623 {6, 6},
624 {0, 0},
625 {0, 0}
626 }
627 },
628 {0, 0, 2, 0, /* 0x50 */
629 {{4, 4},
630 {6, 6},
631 {0, 0},
632 {0, 0}
633 }
634 },
635 {1, 0, 3, 0, /* 0x51 */
636 {{0, 0},
637 {4, 4},
638 {6, 6},
639 {0, 0}
640 }
641 },
642 {0, 0, 3, 0, /* 0x52 */
643 {{1, 1},
644 {4, 4},
645 {6, 6},
646 {0, 0}
647 }
648 },
649 {1, 0, 3, 0, /* 0x53 */
650 {{0, 1},
651 {4, 4},
652 {6, 6},
653 {0, 0}
654 }
655 },
656 {0, 0, 3, 0, /* 0x54 */
657 {{2, 2},
658 {4, 4},
659 {6, 6},
660 {0, 0}
661 }
662 },
663 {1, 0, 4, 0, /* 0x55 */
664 {{0, 0},
665 {2, 2},
666 {4, 4},
667 {6, 6}
668 }
669 },
670 {0, 0, 3, 0, /* 0x56 */
671 {{1, 2},
672 {4, 4},
673 {6, 6},
674 {0, 0}
675 }
676 },
677 {1, 0, 3, 0, /* 0x57 */
678 {{0, 2},
679 {4, 4},
680 {6, 6},
681 {0, 0}
682 }
683 },
684 {0, 0, 2, 0, /* 0x58 */
685 {{3, 4},
686 {6, 6},
687 {0, 0},
688 {0, 0}
689 }
690 },
691 {1, 0, 3, 0, /* 0x59 */
692 {{0, 0},
693 {3, 4},
694 {6, 6},
695 {0, 0}
696 }
697 },
698 {0, 0, 3, 0, /* 0x5a */
699 {{1, 1},
700 {3, 4},
701 {6, 6},
702 {0, 0}
703 }
704 },
705 {1, 0, 3, 0, /* 0x5b */
706 {{0, 1},
707 {3, 4},
708 {6, 6},
709 {0, 0}
710 }
711 },
712 {0, 0, 2, 0, /* 0x5c */
713 {{2, 4},
714 {6, 6},
715 {0, 0},
716 {0, 0}
717 }
718 },
719 {1, 0, 3, 0, /* 0x5d */
720 {{0, 0},
721 {2, 4},
722 {6, 6},
723 {0, 0}
724 }
725 },
726 {0, 0, 2, 0, /* 0x5e */
727 {{1, 4},
728 {6, 6},
729 {0, 0},
730 {0, 0}
731 }
732 },
733 {1, 0, 2, 0, /* 0x5f */
734 {{0, 4},
735 {6, 6},
736 {0, 0},
737 {0, 0}
738 }
739 },
740 {0, 0, 1, 0, /* 0x60 */
741 {{5, 6},
742 {0, 0},
743 {0, 0},
744 {0, 0}
745 }
746 },
747 {1, 0, 2, 0, /* 0x61 */
748 {{0, 0},
749 {5, 6},
750 {0, 0},
751 {0, 0}
752 }
753 },
754 {0, 0, 2, 0, /* 0x62 */
755 {{1, 1},
756 {5, 6},
757 {0, 0},
758 {0, 0}
759 }
760 },
761 {1, 0, 2, 0, /* 0x63 */
762 {{0, 1},
763 {5, 6},
764 {0, 0},
765 {0, 0}
766 }
767 },
768 {0, 0, 2, 0, /* 0x64 */
769 {{2, 2},
770 {5, 6},
771 {0, 0},
772 {0, 0}
773 }
774 },
775 {1, 0, 3, 0, /* 0x65 */
776 {{0, 0},
777 {2, 2},
778 {5, 6},
779 {0, 0}
780 }
781 },
782 {0, 0, 2, 0, /* 0x66 */
783 {{1, 2},
784 {5, 6},
785 {0, 0},
786 {0, 0}
787 }
788 },
789 {1, 0, 2, 0, /* 0x67 */
790 {{0, 2},
791 {5, 6},
792 {0, 0},
793 {0, 0}
794 }
795 },
796 {0, 0, 2, 0, /* 0x68 */
797 {{3, 3},
798 {5, 6},
799 {0, 0},
800 {0, 0}
801 }
802 },
803 {1, 0, 3, 0, /* 0x69 */
804 {{0, 0},
805 {3, 3},
806 {5, 6},
807 {0, 0}
808 }
809 },
810 {0, 0, 3, 0, /* 0x6a */
811 {{1, 1},
812 {3, 3},
813 {5, 6},
814 {0, 0}
815 }
816 },
817 {1, 0, 3, 0, /* 0x6b */
818 {{0, 1},
819 {3, 3},
820 {5, 6},
821 {0, 0}
822 }
823 },
824 {0, 0, 2, 0, /* 0x6c */
825 {{2, 3},
826 {5, 6},
827 {0, 0},
828 {0, 0}
829 }
830 },
831 {1, 0, 3, 0, /* 0x6d */
832 {{0, 0},
833 {2, 3},
834 {5, 6},
835 {0, 0}
836 }
837 },
838 {0, 0, 2, 0, /* 0x6e */
839 {{1, 3},
840 {5, 6},
841 {0, 0},
842 {0, 0}
843 }
844 },
845 {1, 0, 2, 0, /* 0x6f */
846 {{0, 3},
847 {5, 6},
848 {0, 0},
849 {0, 0}
850 }
851 },
852 {0, 0, 1, 0, /* 0x70 */
853 {{4, 6},
854 {0, 0},
855 {0, 0},
856 {0, 0}
857 }
858 },
859 {1, 0, 2, 0, /* 0x71 */
860 {{0, 0},
861 {4, 6},
862 {0, 0},
863 {0, 0}
864 }
865 },
866 {0, 0, 2, 0, /* 0x72 */
867 {{1, 1},
868 {4, 6},
869 {0, 0},
870 {0, 0}
871 }
872 },
873 {1, 0, 2, 0, /* 0x73 */
874 {{0, 1},
875 {4, 6},
876 {0, 0},
877 {0, 0}
878 }
879 },
880 {0, 0, 2, 0, /* 0x74 */
881 {{2, 2},
882 {4, 6},
883 {0, 0},
884 {0, 0}
885 }
886 },
887 {1, 0, 3, 0, /* 0x75 */
888 {{0, 0},
889 {2, 2},
890 {4, 6},
891 {0, 0}
892 }
893 },
894 {0, 0, 2, 0, /* 0x76 */
895 {{1, 2},
896 {4, 6},
897 {0, 0},
898 {0, 0}
899 }
900 },
901 {1, 0, 2, 0, /* 0x77 */
902 {{0, 2},
903 {4, 6},
904 {0, 0},
905 {0, 0}
906 }
907 },
908 {0, 0, 1, 0, /* 0x78 */
909 {{3, 6},
910 {0, 0},
911 {0, 0},
912 {0, 0}
913 }
914 },
915 {1, 0, 2, 0, /* 0x79 */
916 {{0, 0},
917 {3, 6},
918 {0, 0},
919 {0, 0}
920 }
921 },
922 {0, 0, 2, 0, /* 0x7a */
923 {{1, 1},
924 {3, 6},
925 {0, 0},
926 {0, 0}
927 }
928 },
929 {1, 0, 2, 0, /* 0x7b */
930 {{0, 1},
931 {3, 6},
932 {0, 0},
933 {0, 0}
934 }
935 },
936 {0, 0, 1, 0, /* 0x7c */
937 {{2, 6},
938 {0, 0},
939 {0, 0},
940 {0, 0}
941 }
942 },
943 {1, 0, 2, 0, /* 0x7d */
944 {{0, 0},
945 {2, 6},
946 {0, 0},
947 {0, 0}
948 }
949 },
950 {0, 0, 1, 0, /* 0x7e */
951 {{1, 6},
952 {0, 0},
953 {0, 0},
954 {0, 0}
955 }
956 },
957 {1, 0, 1, 0, /* 0x7f */
958 {{0, 6},
959 {0, 0},
960 {0, 0},
961 {0, 0}
962 }
963 },
964 {0, 1, 1, 0, /* 0x80 */
965 {{7, 7},
966 {0, 0},
967 {0, 0},
968 {0, 0}
969 }
970 },
971 {1, 1, 2, 0, /* 0x81 */
972 {{0, 0},
973 {7, 7},
974 {0, 0},
975 {0, 0}
976 }
977 },
978 {0, 1, 2, 0, /* 0x82 */
979 {{1, 1},
980 {7, 7},
981 {0, 0},
982 {0, 0}
983 }
984 },
985 {1, 1, 2, 0, /* 0x83 */
986 {{0, 1},
987 {7, 7},
988 {0, 0},
989 {0, 0}
990 }
991 },
992 {0, 1, 2, 0, /* 0x84 */
993 {{2, 2},
994 {7, 7},
995 {0, 0},
996 {0, 0}
997 }
998 },
999 {1, 1, 3, 0, /* 0x85 */
1000 {{0, 0},
1001 {2, 2},
1002 {7, 7},
1003 {0, 0}
1004 }
1005 },
1006 {0, 1, 2, 0, /* 0x86 */
1007 {{1, 2},
1008 {7, 7},
1009 {0, 0},
1010 {0, 0}
1011 }
1012 },
1013 {1, 1, 2, 0, /* 0x87 */
1014 {{0, 2},
1015 {7, 7},
1016 {0, 0},
1017 {0, 0}
1018 }
1019 },
1020 {0, 1, 2, 0, /* 0x88 */
1021 {{3, 3},
1022 {7, 7},
1023 {0, 0},
1024 {0, 0}
1025 }
1026 },
1027 {1, 1, 3, 0, /* 0x89 */
1028 {{0, 0},
1029 {3, 3},
1030 {7, 7},
1031 {0, 0}
1032 }
1033 },
1034 {0, 1, 3, 0, /* 0x8a */
1035 {{1, 1},
1036 {3, 3},
1037 {7, 7},
1038 {0, 0}
1039 }
1040 },
1041 {1, 1, 3, 0, /* 0x8b */
1042 {{0, 1},
1043 {3, 3},
1044 {7, 7},
1045 {0, 0}
1046 }
1047 },
1048 {0, 1, 2, 0, /* 0x8c */
1049 {{2, 3},
1050 {7, 7},
1051 {0, 0},
1052 {0, 0}
1053 }
1054 },
1055 {1, 1, 3, 0, /* 0x8d */
1056 {{0, 0},
1057 {2, 3},
1058 {7, 7},
1059 {0, 0}
1060 }
1061 },
1062 {0, 1, 2, 0, /* 0x8e */
1063 {{1, 3},
1064 {7, 7},
1065 {0, 0},
1066 {0, 0}
1067 }
1068 },
1069 {1, 1, 2, 0, /* 0x8f */
1070 {{0, 3},
1071 {7, 7},
1072 {0, 0},
1073 {0, 0}
1074 }
1075 },
1076 {0, 1, 2, 0, /* 0x90 */
1077 {{4, 4},
1078 {7, 7},
1079 {0, 0},
1080 {0, 0}
1081 }
1082 },
1083 {1, 1, 3, 0, /* 0x91 */
1084 {{0, 0},
1085 {4, 4},
1086 {7, 7},
1087 {0, 0}
1088 }
1089 },
1090 {0, 1, 3, 0, /* 0x92 */
1091 {{1, 1},
1092 {4, 4},
1093 {7, 7},
1094 {0, 0}
1095 }
1096 },
1097 {1, 1, 3, 0, /* 0x93 */
1098 {{0, 1},
1099 {4, 4},
1100 {7, 7},
1101 {0, 0}
1102 }
1103 },
1104 {0, 1, 3, 0, /* 0x94 */
1105 {{2, 2},
1106 {4, 4},
1107 {7, 7},
1108 {0, 0}
1109 }
1110 },
1111 {1, 1, 4, 0, /* 0x95 */
1112 {{0, 0},
1113 {2, 2},
1114 {4, 4},
1115 {7, 7}
1116 }
1117 },
1118 {0, 1, 3, 0, /* 0x96 */
1119 {{1, 2},
1120 {4, 4},
1121 {7, 7},
1122 {0, 0}
1123 }
1124 },
1125 {1, 1, 3, 0, /* 0x97 */
1126 {{0, 2},
1127 {4, 4},
1128 {7, 7},
1129 {0, 0}
1130 }
1131 },
1132 {0, 1, 2, 0, /* 0x98 */
1133 {{3, 4},
1134 {7, 7},
1135 {0, 0},
1136 {0, 0}
1137 }
1138 },
1139 {1, 1, 3, 0, /* 0x99 */
1140 {{0, 0},
1141 {3, 4},
1142 {7, 7},
1143 {0, 0}
1144 }
1145 },
1146 {0, 1, 3, 0, /* 0x9a */
1147 {{1, 1},
1148 {3, 4},
1149 {7, 7},
1150 {0, 0}
1151 }
1152 },
1153 {1, 1, 3, 0, /* 0x9b */
1154 {{0, 1},
1155 {3, 4},
1156 {7, 7},
1157 {0, 0}
1158 }
1159 },
1160 {0, 1, 2, 0, /* 0x9c */
1161 {{2, 4},
1162 {7, 7},
1163 {0, 0},
1164 {0, 0}
1165 }
1166 },
1167 {1, 1, 3, 0, /* 0x9d */
1168 {{0, 0},
1169 {2, 4},
1170 {7, 7},
1171 {0, 0}
1172 }
1173 },
1174 {0, 1, 2, 0, /* 0x9e */
1175 {{1, 4},
1176 {7, 7},
1177 {0, 0},
1178 {0, 0}
1179 }
1180 },
1181 {1, 1, 2, 0, /* 0x9f */
1182 {{0, 4},
1183 {7, 7},
1184 {0, 0},
1185 {0, 0}
1186 }
1187 },
1188 {0, 1, 2, 0, /* 0xa0 */
1189 {{5, 5},
1190 {7, 7},
1191 {0, 0},
1192 {0, 0}
1193 }
1194 },
1195 {1, 1, 3, 0, /* 0xa1 */
1196 {{0, 0},
1197 {5, 5},
1198 {7, 7},
1199 {0, 0}
1200 }
1201 },
1202 {0, 1, 3, 0, /* 0xa2 */
1203 {{1, 1},
1204 {5, 5},
1205 {7, 7},
1206 {0, 0}
1207 }
1208 },
1209 {1, 1, 3, 0, /* 0xa3 */
1210 {{0, 1},
1211 {5, 5},
1212 {7, 7},
1213 {0, 0}
1214 }
1215 },
1216 {0, 1, 3, 0, /* 0xa4 */
1217 {{2, 2},
1218 {5, 5},
1219 {7, 7},
1220 {0, 0}
1221 }
1222 },
1223 {1, 1, 4, 0, /* 0xa5 */
1224 {{0, 0},
1225 {2, 2},
1226 {5, 5},
1227 {7, 7}
1228 }
1229 },
1230 {0, 1, 3, 0, /* 0xa6 */
1231 {{1, 2},
1232 {5, 5},
1233 {7, 7},
1234 {0, 0}
1235 }
1236 },
1237 {1, 1, 3, 0, /* 0xa7 */
1238 {{0, 2},
1239 {5, 5},
1240 {7, 7},
1241 {0, 0}
1242 }
1243 },
1244 {0, 1, 3, 0, /* 0xa8 */
1245 {{3, 3},
1246 {5, 5},
1247 {7, 7},
1248 {0, 0}
1249 }
1250 },
1251 {1, 1, 4, 0, /* 0xa9 */
1252 {{0, 0},
1253 {3, 3},
1254 {5, 5},
1255 {7, 7}
1256 }
1257 },
1258 {0, 1, 4, 0, /* 0xaa */
1259 {{1, 1},
1260 {3, 3},
1261 {5, 5},
1262 {7, 7}
1263 }
1264 },
1265 {1, 1, 4, 0, /* 0xab */
1266 {{0, 1},
1267 {3, 3},
1268 {5, 5},
1269 {7, 7}
1270 }
1271 },
1272 {0, 1, 3, 0, /* 0xac */
1273 {{2, 3},
1274 {5, 5},
1275 {7, 7},
1276 {0, 0}
1277 }
1278 },
1279 {1, 1, 4, 0, /* 0xad */
1280 {{0, 0},
1281 {2, 3},
1282 {5, 5},
1283 {7, 7}
1284 }
1285 },
1286 {0, 1, 3, 0, /* 0xae */
1287 {{1, 3},
1288 {5, 5},
1289 {7, 7},
1290 {0, 0}
1291 }
1292 },
1293 {1, 1, 3, 0, /* 0xaf */
1294 {{0, 3},
1295 {5, 5},
1296 {7, 7},
1297 {0, 0}
1298 }
1299 },
1300 {0, 1, 2, 0, /* 0xb0 */
1301 {{4, 5},
1302 {7, 7},
1303 {0, 0},
1304 {0, 0}
1305 }
1306 },
1307 {1, 1, 3, 0, /* 0xb1 */
1308 {{0, 0},
1309 {4, 5},
1310 {7, 7},
1311 {0, 0}
1312 }
1313 },
1314 {0, 1, 3, 0, /* 0xb2 */
1315 {{1, 1},
1316 {4, 5},
1317 {7, 7},
1318 {0, 0}
1319 }
1320 },
1321 {1, 1, 3, 0, /* 0xb3 */
1322 {{0, 1},
1323 {4, 5},
1324 {7, 7},
1325 {0, 0}
1326 }
1327 },
1328 {0, 1, 3, 0, /* 0xb4 */
1329 {{2, 2},
1330 {4, 5},
1331 {7, 7},
1332 {0, 0}
1333 }
1334 },
1335 {1, 1, 4, 0, /* 0xb5 */
1336 {{0, 0},
1337 {2, 2},
1338 {4, 5},
1339 {7, 7}
1340 }
1341 },
1342 {0, 1, 3, 0, /* 0xb6 */
1343 {{1, 2},
1344 {4, 5},
1345 {7, 7},
1346 {0, 0}
1347 }
1348 },
1349 {1, 1, 3, 0, /* 0xb7 */
1350 {{0, 2},
1351 {4, 5},
1352 {7, 7},
1353 {0, 0}
1354 }
1355 },
1356 {0, 1, 2, 0, /* 0xb8 */
1357 {{3, 5},
1358 {7, 7},
1359 {0, 0},
1360 {0, 0}
1361 }
1362 },
1363 {1, 1, 3, 0, /* 0xb9 */
1364 {{0, 0},
1365 {3, 5},
1366 {7, 7},
1367 {0, 0}
1368 }
1369 },
1370 {0, 1, 3, 0, /* 0xba */
1371 {{1, 1},
1372 {3, 5},
1373 {7, 7},
1374 {0, 0}
1375 }
1376 },
1377 {1, 1, 3, 0, /* 0xbb */
1378 {{0, 1},
1379 {3, 5},
1380 {7, 7},
1381 {0, 0}
1382 }
1383 },
1384 {0, 1, 2, 0, /* 0xbc */
1385 {{2, 5},
1386 {7, 7},
1387 {0, 0},
1388 {0, 0}
1389 }
1390 },
1391 {1, 1, 3, 0, /* 0xbd */
1392 {{0, 0},
1393 {2, 5},
1394 {7, 7},
1395 {0, 0}
1396 }
1397 },
1398 {0, 1, 2, 0, /* 0xbe */
1399 {{1, 5},
1400 {7, 7},
1401 {0, 0},
1402 {0, 0}
1403 }
1404 },
1405 {1, 1, 2, 0, /* 0xbf */
1406 {{0, 5},
1407 {7, 7},
1408 {0, 0},
1409 {0, 0}
1410 }
1411 },
1412 {0, 1, 1, 0, /* 0xc0 */
1413 {{6, 7},
1414 {0, 0},
1415 {0, 0},
1416 {0, 0}
1417 }
1418 },
1419 {1, 1, 2, 0, /* 0xc1 */
1420 {{0, 0},
1421 {6, 7},
1422 {0, 0},
1423 {0, 0}
1424 }
1425 },
1426 {0, 1, 2, 0, /* 0xc2 */
1427 {{1, 1},
1428 {6, 7},
1429 {0, 0},
1430 {0, 0}
1431 }
1432 },
1433 {1, 1, 2, 0, /* 0xc3 */
1434 {{0, 1},
1435 {6, 7},
1436 {0, 0},
1437 {0, 0}
1438 }
1439 },
1440 {0, 1, 2, 0, /* 0xc4 */
1441 {{2, 2},
1442 {6, 7},
1443 {0, 0},
1444 {0, 0}
1445 }
1446 },
1447 {1, 1, 3, 0, /* 0xc5 */
1448 {{0, 0},
1449 {2, 2},
1450 {6, 7},
1451 {0, 0}
1452 }
1453 },
1454 {0, 1, 2, 0, /* 0xc6 */
1455 {{1, 2},
1456 {6, 7},
1457 {0, 0},
1458 {0, 0}
1459 }
1460 },
1461 {1, 1, 2, 0, /* 0xc7 */
1462 {{0, 2},
1463 {6, 7},
1464 {0, 0},
1465 {0, 0}
1466 }
1467 },
1468 {0, 1, 2, 0, /* 0xc8 */
1469 {{3, 3},
1470 {6, 7},
1471 {0, 0},
1472 {0, 0}
1473 }
1474 },
1475 {1, 1, 3, 0, /* 0xc9 */
1476 {{0, 0},
1477 {3, 3},
1478 {6, 7},
1479 {0, 0}
1480 }
1481 },
1482 {0, 1, 3, 0, /* 0xca */
1483 {{1, 1},
1484 {3, 3},
1485 {6, 7},
1486 {0, 0}
1487 }
1488 },
1489 {1, 1, 3, 0, /* 0xcb */
1490 {{0, 1},
1491 {3, 3},
1492 {6, 7},
1493 {0, 0}
1494 }
1495 },
1496 {0, 1, 2, 0, /* 0xcc */
1497 {{2, 3},
1498 {6, 7},
1499 {0, 0},
1500 {0, 0}
1501 }
1502 },
1503 {1, 1, 3, 0, /* 0xcd */
1504 {{0, 0},
1505 {2, 3},
1506 {6, 7},
1507 {0, 0}
1508 }
1509 },
1510 {0, 1, 2, 0, /* 0xce */
1511 {{1, 3},
1512 {6, 7},
1513 {0, 0},
1514 {0, 0}
1515 }
1516 },
1517 {1, 1, 2, 0, /* 0xcf */
1518 {{0, 3},
1519 {6, 7},
1520 {0, 0},
1521 {0, 0}
1522 }
1523 },
1524 {0, 1, 2, 0, /* 0xd0 */
1525 {{4, 4},
1526 {6, 7},
1527 {0, 0},
1528 {0, 0}
1529 }
1530 },
1531 {1, 1, 3, 0, /* 0xd1 */
1532 {{0, 0},
1533 {4, 4},
1534 {6, 7},
1535 {0, 0}
1536 }
1537 },
1538 {0, 1, 3, 0, /* 0xd2 */
1539 {{1, 1},
1540 {4, 4},
1541 {6, 7},
1542 {0, 0}
1543 }
1544 },
1545 {1, 1, 3, 0, /* 0xd3 */
1546 {{0, 1},
1547 {4, 4},
1548 {6, 7},
1549 {0, 0}
1550 }
1551 },
1552 {0, 1, 3, 0, /* 0xd4 */
1553 {{2, 2},
1554 {4, 4},
1555 {6, 7},
1556 {0, 0}
1557 }
1558 },
1559 {1, 1, 4, 0, /* 0xd5 */
1560 {{0, 0},
1561 {2, 2},
1562 {4, 4},
1563 {6, 7}
1564 }
1565 },
1566 {0, 1, 3, 0, /* 0xd6 */
1567 {{1, 2},
1568 {4, 4},
1569 {6, 7},
1570 {0, 0}
1571 }
1572 },
1573 {1, 1, 3, 0, /* 0xd7 */
1574 {{0, 2},
1575 {4, 4},
1576 {6, 7},
1577 {0, 0}
1578 }
1579 },
1580 {0, 1, 2, 0, /* 0xd8 */
1581 {{3, 4},
1582 {6, 7},
1583 {0, 0},
1584 {0, 0}
1585 }
1586 },
1587 {1, 1, 3, 0, /* 0xd9 */
1588 {{0, 0},
1589 {3, 4},
1590 {6, 7},
1591 {0, 0}
1592 }
1593 },
1594 {0, 1, 3, 0, /* 0xda */
1595 {{1, 1},
1596 {3, 4},
1597 {6, 7},
1598 {0, 0}
1599 }
1600 },
1601 {1, 1, 3, 0, /* 0xdb */
1602 {{0, 1},
1603 {3, 4},
1604 {6, 7},
1605 {0, 0}
1606 }
1607 },
1608 {0, 1, 2, 0, /* 0xdc */
1609 {{2, 4},
1610 {6, 7},
1611 {0, 0},
1612 {0, 0}
1613 }
1614 },
1615 {1, 1, 3, 0, /* 0xdd */
1616 {{0, 0},
1617 {2, 4},
1618 {6, 7},
1619 {0, 0}
1620 }
1621 },
1622 {0, 1, 2, 0, /* 0xde */
1623 {{1, 4},
1624 {6, 7},
1625 {0, 0},
1626 {0, 0}
1627 }
1628 },
1629 {1, 1, 2, 0, /* 0xdf */
1630 {{0, 4},
1631 {6, 7},
1632 {0, 0},
1633 {0, 0}
1634 }
1635 },
1636 {0, 1, 1, 0, /* 0xe0 */
1637 {{5, 7},
1638 {0, 0},
1639 {0, 0},
1640 {0, 0}
1641 }
1642 },
1643 {1, 1, 2, 0, /* 0xe1 */
1644 {{0, 0},
1645 {5, 7},
1646 {0, 0},
1647 {0, 0}
1648 }
1649 },
1650 {0, 1, 2, 0, /* 0xe2 */
1651 {{1, 1},
1652 {5, 7},
1653 {0, 0},
1654 {0, 0}
1655 }
1656 },
1657 {1, 1, 2, 0, /* 0xe3 */
1658 {{0, 1},
1659 {5, 7},
1660 {0, 0},
1661 {0, 0}
1662 }
1663 },
1664 {0, 1, 2, 0, /* 0xe4 */
1665 {{2, 2},
1666 {5, 7},
1667 {0, 0},
1668 {0, 0}
1669 }
1670 },
1671 {1, 1, 3, 0, /* 0xe5 */
1672 {{0, 0},
1673 {2, 2},
1674 {5, 7},
1675 {0, 0}
1676 }
1677 },
1678 {0, 1, 2, 0, /* 0xe6 */
1679 {{1, 2},
1680 {5, 7},
1681 {0, 0},
1682 {0, 0}
1683 }
1684 },
1685 {1, 1, 2, 0, /* 0xe7 */
1686 {{0, 2},
1687 {5, 7},
1688 {0, 0},
1689 {0, 0}
1690 }
1691 },
1692 {0, 1, 2, 0, /* 0xe8 */
1693 {{3, 3},
1694 {5, 7},
1695 {0, 0},
1696 {0, 0}
1697 }
1698 },
1699 {1, 1, 3, 0, /* 0xe9 */
1700 {{0, 0},
1701 {3, 3},
1702 {5, 7},
1703 {0, 0}
1704 }
1705 },
1706 {0, 1, 3, 0, /* 0xea */
1707 {{1, 1},
1708 {3, 3},
1709 {5, 7},
1710 {0, 0}
1711 }
1712 },
1713 {1, 1, 3, 0, /* 0xeb */
1714 {{0, 1},
1715 {3, 3},
1716 {5, 7},
1717 {0, 0}
1718 }
1719 },
1720 {0, 1, 2, 0, /* 0xec */
1721 {{2, 3},
1722 {5, 7},
1723 {0, 0},
1724 {0, 0}
1725 }
1726 },
1727 {1, 1, 3, 0, /* 0xed */
1728 {{0, 0},
1729 {2, 3},
1730 {5, 7},
1731 {0, 0}
1732 }
1733 },
1734 {0, 1, 2, 0, /* 0xee */
1735 {{1, 3},
1736 {5, 7},
1737 {0, 0},
1738 {0, 0}
1739 }
1740 },
1741 {1, 1, 2, 0, /* 0xef */
1742 {{0, 3},
1743 {5, 7},
1744 {0, 0},
1745 {0, 0}
1746 }
1747 },
1748 {0, 1, 1, 0, /* 0xf0 */
1749 {{4, 7},
1750 {0, 0},
1751 {0, 0},
1752 {0, 0}
1753 }
1754 },
1755 {1, 1, 2, 0, /* 0xf1 */
1756 {{0, 0},
1757 {4, 7},
1758 {0, 0},
1759 {0, 0}
1760 }
1761 },
1762 {0, 1, 2, 0, /* 0xf2 */
1763 {{1, 1},
1764 {4, 7},
1765 {0, 0},
1766 {0, 0}
1767 }
1768 },
1769 {1, 1, 2, 0, /* 0xf3 */
1770 {{0, 1},
1771 {4, 7},
1772 {0, 0},
1773 {0, 0}
1774 }
1775 },
1776 {0, 1, 2, 0, /* 0xf4 */
1777 {{2, 2},
1778 {4, 7},
1779 {0, 0},
1780 {0, 0}
1781 }
1782 },
1783 {1, 1, 3, 0, /* 0xf5 */
1784 {{0, 0},
1785 {2, 2},
1786 {4, 7},
1787 {0, 0}
1788 }
1789 },
1790 {0, 1, 2, 0, /* 0xf6 */
1791 {{1, 2},
1792 {4, 7},
1793 {0, 0},
1794 {0, 0}
1795 }
1796 },
1797 {1, 1, 2, 0, /* 0xf7 */
1798 {{0, 2},
1799 {4, 7},
1800 {0, 0},
1801 {0, 0}
1802 }
1803 },
1804 {0, 1, 1, 0, /* 0xf8 */
1805 {{3, 7},
1806 {0, 0},
1807 {0, 0},
1808 {0, 0}
1809 }
1810 },
1811 {1, 1, 2, 0, /* 0xf9 */
1812 {{0, 0},
1813 {3, 7},
1814 {0, 0},
1815 {0, 0}
1816 }
1817 },
1818 {0, 1, 2, 0, /* 0xfa */
1819 {{1, 1},
1820 {3, 7},
1821 {0, 0},
1822 {0, 0}
1823 }
1824 },
1825 {1, 1, 2, 0, /* 0xfb */
1826 {{0, 1},
1827 {3, 7},
1828 {0, 0},
1829 {0, 0}
1830 }
1831 },
1832 {0, 1, 1, 0, /* 0xfc */
1833 {{2, 7},
1834 {0, 0},
1835 {0, 0},
1836 {0, 0}
1837 }
1838 },
1839 {1, 1, 2, 0, /* 0xfd */
1840 {{0, 0},
1841 {2, 7},
1842 {0, 0},
1843 {0, 0}
1844 }
1845 },
1846 {0, 1, 1, 0, /* 0xfe */
1847 {{1, 7},
1848 {0, 0},
1849 {0, 0},
1850 {0, 0}
1851 }
1852 },
1853 {1, 1, 1, 0, /* 0xff */
1854 {{0, 7},
1855 {0, 0},
1856 {0, 0},
1857 {0, 0}
1858 }
1859 }
1860};
1861
1862
54#define SCTP_MAX_GAPS_INARRAY 4
55struct sack_track {
56 uint8_t right_edge; /* mergable on the right edge */
57 uint8_t left_edge; /* mergable on the left edge */
58 uint8_t num_entries;
59 uint8_t spare;
60 struct sctp_gap_ack_block gaps[SCTP_MAX_GAPS_INARRAY];
61};
62
63struct sack_track sack_array[256] = {
64 {0, 0, 0, 0, /* 0x00 */
65 {{0, 0},
66 {0, 0},
67 {0, 0},
68 {0, 0}
69 }
70 },
71 {1, 0, 1, 0, /* 0x01 */
72 {{0, 0},
73 {0, 0},
74 {0, 0},
75 {0, 0}
76 }
77 },
78 {0, 0, 1, 0, /* 0x02 */
79 {{1, 1},
80 {0, 0},
81 {0, 0},
82 {0, 0}
83 }
84 },
85 {1, 0, 1, 0, /* 0x03 */
86 {{0, 1},
87 {0, 0},
88 {0, 0},
89 {0, 0}
90 }
91 },
92 {0, 0, 1, 0, /* 0x04 */
93 {{2, 2},
94 {0, 0},
95 {0, 0},
96 {0, 0}
97 }
98 },
99 {1, 0, 2, 0, /* 0x05 */
100 {{0, 0},
101 {2, 2},
102 {0, 0},
103 {0, 0}
104 }
105 },
106 {0, 0, 1, 0, /* 0x06 */
107 {{1, 2},
108 {0, 0},
109 {0, 0},
110 {0, 0}
111 }
112 },
113 {1, 0, 1, 0, /* 0x07 */
114 {{0, 2},
115 {0, 0},
116 {0, 0},
117 {0, 0}
118 }
119 },
120 {0, 0, 1, 0, /* 0x08 */
121 {{3, 3},
122 {0, 0},
123 {0, 0},
124 {0, 0}
125 }
126 },
127 {1, 0, 2, 0, /* 0x09 */
128 {{0, 0},
129 {3, 3},
130 {0, 0},
131 {0, 0}
132 }
133 },
134 {0, 0, 2, 0, /* 0x0a */
135 {{1, 1},
136 {3, 3},
137 {0, 0},
138 {0, 0}
139 }
140 },
141 {1, 0, 2, 0, /* 0x0b */
142 {{0, 1},
143 {3, 3},
144 {0, 0},
145 {0, 0}
146 }
147 },
148 {0, 0, 1, 0, /* 0x0c */
149 {{2, 3},
150 {0, 0},
151 {0, 0},
152 {0, 0}
153 }
154 },
155 {1, 0, 2, 0, /* 0x0d */
156 {{0, 0},
157 {2, 3},
158 {0, 0},
159 {0, 0}
160 }
161 },
162 {0, 0, 1, 0, /* 0x0e */
163 {{1, 3},
164 {0, 0},
165 {0, 0},
166 {0, 0}
167 }
168 },
169 {1, 0, 1, 0, /* 0x0f */
170 {{0, 3},
171 {0, 0},
172 {0, 0},
173 {0, 0}
174 }
175 },
176 {0, 0, 1, 0, /* 0x10 */
177 {{4, 4},
178 {0, 0},
179 {0, 0},
180 {0, 0}
181 }
182 },
183 {1, 0, 2, 0, /* 0x11 */
184 {{0, 0},
185 {4, 4},
186 {0, 0},
187 {0, 0}
188 }
189 },
190 {0, 0, 2, 0, /* 0x12 */
191 {{1, 1},
192 {4, 4},
193 {0, 0},
194 {0, 0}
195 }
196 },
197 {1, 0, 2, 0, /* 0x13 */
198 {{0, 1},
199 {4, 4},
200 {0, 0},
201 {0, 0}
202 }
203 },
204 {0, 0, 2, 0, /* 0x14 */
205 {{2, 2},
206 {4, 4},
207 {0, 0},
208 {0, 0}
209 }
210 },
211 {1, 0, 3, 0, /* 0x15 */
212 {{0, 0},
213 {2, 2},
214 {4, 4},
215 {0, 0}
216 }
217 },
218 {0, 0, 2, 0, /* 0x16 */
219 {{1, 2},
220 {4, 4},
221 {0, 0},
222 {0, 0}
223 }
224 },
225 {1, 0, 2, 0, /* 0x17 */
226 {{0, 2},
227 {4, 4},
228 {0, 0},
229 {0, 0}
230 }
231 },
232 {0, 0, 1, 0, /* 0x18 */
233 {{3, 4},
234 {0, 0},
235 {0, 0},
236 {0, 0}
237 }
238 },
239 {1, 0, 2, 0, /* 0x19 */
240 {{0, 0},
241 {3, 4},
242 {0, 0},
243 {0, 0}
244 }
245 },
246 {0, 0, 2, 0, /* 0x1a */
247 {{1, 1},
248 {3, 4},
249 {0, 0},
250 {0, 0}
251 }
252 },
253 {1, 0, 2, 0, /* 0x1b */
254 {{0, 1},
255 {3, 4},
256 {0, 0},
257 {0, 0}
258 }
259 },
260 {0, 0, 1, 0, /* 0x1c */
261 {{2, 4},
262 {0, 0},
263 {0, 0},
264 {0, 0}
265 }
266 },
267 {1, 0, 2, 0, /* 0x1d */
268 {{0, 0},
269 {2, 4},
270 {0, 0},
271 {0, 0}
272 }
273 },
274 {0, 0, 1, 0, /* 0x1e */
275 {{1, 4},
276 {0, 0},
277 {0, 0},
278 {0, 0}
279 }
280 },
281 {1, 0, 1, 0, /* 0x1f */
282 {{0, 4},
283 {0, 0},
284 {0, 0},
285 {0, 0}
286 }
287 },
288 {0, 0, 1, 0, /* 0x20 */
289 {{5, 5},
290 {0, 0},
291 {0, 0},
292 {0, 0}
293 }
294 },
295 {1, 0, 2, 0, /* 0x21 */
296 {{0, 0},
297 {5, 5},
298 {0, 0},
299 {0, 0}
300 }
301 },
302 {0, 0, 2, 0, /* 0x22 */
303 {{1, 1},
304 {5, 5},
305 {0, 0},
306 {0, 0}
307 }
308 },
309 {1, 0, 2, 0, /* 0x23 */
310 {{0, 1},
311 {5, 5},
312 {0, 0},
313 {0, 0}
314 }
315 },
316 {0, 0, 2, 0, /* 0x24 */
317 {{2, 2},
318 {5, 5},
319 {0, 0},
320 {0, 0}
321 }
322 },
323 {1, 0, 3, 0, /* 0x25 */
324 {{0, 0},
325 {2, 2},
326 {5, 5},
327 {0, 0}
328 }
329 },
330 {0, 0, 2, 0, /* 0x26 */
331 {{1, 2},
332 {5, 5},
333 {0, 0},
334 {0, 0}
335 }
336 },
337 {1, 0, 2, 0, /* 0x27 */
338 {{0, 2},
339 {5, 5},
340 {0, 0},
341 {0, 0}
342 }
343 },
344 {0, 0, 2, 0, /* 0x28 */
345 {{3, 3},
346 {5, 5},
347 {0, 0},
348 {0, 0}
349 }
350 },
351 {1, 0, 3, 0, /* 0x29 */
352 {{0, 0},
353 {3, 3},
354 {5, 5},
355 {0, 0}
356 }
357 },
358 {0, 0, 3, 0, /* 0x2a */
359 {{1, 1},
360 {3, 3},
361 {5, 5},
362 {0, 0}
363 }
364 },
365 {1, 0, 3, 0, /* 0x2b */
366 {{0, 1},
367 {3, 3},
368 {5, 5},
369 {0, 0}
370 }
371 },
372 {0, 0, 2, 0, /* 0x2c */
373 {{2, 3},
374 {5, 5},
375 {0, 0},
376 {0, 0}
377 }
378 },
379 {1, 0, 3, 0, /* 0x2d */
380 {{0, 0},
381 {2, 3},
382 {5, 5},
383 {0, 0}
384 }
385 },
386 {0, 0, 2, 0, /* 0x2e */
387 {{1, 3},
388 {5, 5},
389 {0, 0},
390 {0, 0}
391 }
392 },
393 {1, 0, 2, 0, /* 0x2f */
394 {{0, 3},
395 {5, 5},
396 {0, 0},
397 {0, 0}
398 }
399 },
400 {0, 0, 1, 0, /* 0x30 */
401 {{4, 5},
402 {0, 0},
403 {0, 0},
404 {0, 0}
405 }
406 },
407 {1, 0, 2, 0, /* 0x31 */
408 {{0, 0},
409 {4, 5},
410 {0, 0},
411 {0, 0}
412 }
413 },
414 {0, 0, 2, 0, /* 0x32 */
415 {{1, 1},
416 {4, 5},
417 {0, 0},
418 {0, 0}
419 }
420 },
421 {1, 0, 2, 0, /* 0x33 */
422 {{0, 1},
423 {4, 5},
424 {0, 0},
425 {0, 0}
426 }
427 },
428 {0, 0, 2, 0, /* 0x34 */
429 {{2, 2},
430 {4, 5},
431 {0, 0},
432 {0, 0}
433 }
434 },
435 {1, 0, 3, 0, /* 0x35 */
436 {{0, 0},
437 {2, 2},
438 {4, 5},
439 {0, 0}
440 }
441 },
442 {0, 0, 2, 0, /* 0x36 */
443 {{1, 2},
444 {4, 5},
445 {0, 0},
446 {0, 0}
447 }
448 },
449 {1, 0, 2, 0, /* 0x37 */
450 {{0, 2},
451 {4, 5},
452 {0, 0},
453 {0, 0}
454 }
455 },
456 {0, 0, 1, 0, /* 0x38 */
457 {{3, 5},
458 {0, 0},
459 {0, 0},
460 {0, 0}
461 }
462 },
463 {1, 0, 2, 0, /* 0x39 */
464 {{0, 0},
465 {3, 5},
466 {0, 0},
467 {0, 0}
468 }
469 },
470 {0, 0, 2, 0, /* 0x3a */
471 {{1, 1},
472 {3, 5},
473 {0, 0},
474 {0, 0}
475 }
476 },
477 {1, 0, 2, 0, /* 0x3b */
478 {{0, 1},
479 {3, 5},
480 {0, 0},
481 {0, 0}
482 }
483 },
484 {0, 0, 1, 0, /* 0x3c */
485 {{2, 5},
486 {0, 0},
487 {0, 0},
488 {0, 0}
489 }
490 },
491 {1, 0, 2, 0, /* 0x3d */
492 {{0, 0},
493 {2, 5},
494 {0, 0},
495 {0, 0}
496 }
497 },
498 {0, 0, 1, 0, /* 0x3e */
499 {{1, 5},
500 {0, 0},
501 {0, 0},
502 {0, 0}
503 }
504 },
505 {1, 0, 1, 0, /* 0x3f */
506 {{0, 5},
507 {0, 0},
508 {0, 0},
509 {0, 0}
510 }
511 },
512 {0, 0, 1, 0, /* 0x40 */
513 {{6, 6},
514 {0, 0},
515 {0, 0},
516 {0, 0}
517 }
518 },
519 {1, 0, 2, 0, /* 0x41 */
520 {{0, 0},
521 {6, 6},
522 {0, 0},
523 {0, 0}
524 }
525 },
526 {0, 0, 2, 0, /* 0x42 */
527 {{1, 1},
528 {6, 6},
529 {0, 0},
530 {0, 0}
531 }
532 },
533 {1, 0, 2, 0, /* 0x43 */
534 {{0, 1},
535 {6, 6},
536 {0, 0},
537 {0, 0}
538 }
539 },
540 {0, 0, 2, 0, /* 0x44 */
541 {{2, 2},
542 {6, 6},
543 {0, 0},
544 {0, 0}
545 }
546 },
547 {1, 0, 3, 0, /* 0x45 */
548 {{0, 0},
549 {2, 2},
550 {6, 6},
551 {0, 0}
552 }
553 },
554 {0, 0, 2, 0, /* 0x46 */
555 {{1, 2},
556 {6, 6},
557 {0, 0},
558 {0, 0}
559 }
560 },
561 {1, 0, 2, 0, /* 0x47 */
562 {{0, 2},
563 {6, 6},
564 {0, 0},
565 {0, 0}
566 }
567 },
568 {0, 0, 2, 0, /* 0x48 */
569 {{3, 3},
570 {6, 6},
571 {0, 0},
572 {0, 0}
573 }
574 },
575 {1, 0, 3, 0, /* 0x49 */
576 {{0, 0},
577 {3, 3},
578 {6, 6},
579 {0, 0}
580 }
581 },
582 {0, 0, 3, 0, /* 0x4a */
583 {{1, 1},
584 {3, 3},
585 {6, 6},
586 {0, 0}
587 }
588 },
589 {1, 0, 3, 0, /* 0x4b */
590 {{0, 1},
591 {3, 3},
592 {6, 6},
593 {0, 0}
594 }
595 },
596 {0, 0, 2, 0, /* 0x4c */
597 {{2, 3},
598 {6, 6},
599 {0, 0},
600 {0, 0}
601 }
602 },
603 {1, 0, 3, 0, /* 0x4d */
604 {{0, 0},
605 {2, 3},
606 {6, 6},
607 {0, 0}
608 }
609 },
610 {0, 0, 2, 0, /* 0x4e */
611 {{1, 3},
612 {6, 6},
613 {0, 0},
614 {0, 0}
615 }
616 },
617 {1, 0, 2, 0, /* 0x4f */
618 {{0, 3},
619 {6, 6},
620 {0, 0},
621 {0, 0}
622 }
623 },
624 {0, 0, 2, 0, /* 0x50 */
625 {{4, 4},
626 {6, 6},
627 {0, 0},
628 {0, 0}
629 }
630 },
631 {1, 0, 3, 0, /* 0x51 */
632 {{0, 0},
633 {4, 4},
634 {6, 6},
635 {0, 0}
636 }
637 },
638 {0, 0, 3, 0, /* 0x52 */
639 {{1, 1},
640 {4, 4},
641 {6, 6},
642 {0, 0}
643 }
644 },
645 {1, 0, 3, 0, /* 0x53 */
646 {{0, 1},
647 {4, 4},
648 {6, 6},
649 {0, 0}
650 }
651 },
652 {0, 0, 3, 0, /* 0x54 */
653 {{2, 2},
654 {4, 4},
655 {6, 6},
656 {0, 0}
657 }
658 },
659 {1, 0, 4, 0, /* 0x55 */
660 {{0, 0},
661 {2, 2},
662 {4, 4},
663 {6, 6}
664 }
665 },
666 {0, 0, 3, 0, /* 0x56 */
667 {{1, 2},
668 {4, 4},
669 {6, 6},
670 {0, 0}
671 }
672 },
673 {1, 0, 3, 0, /* 0x57 */
674 {{0, 2},
675 {4, 4},
676 {6, 6},
677 {0, 0}
678 }
679 },
680 {0, 0, 2, 0, /* 0x58 */
681 {{3, 4},
682 {6, 6},
683 {0, 0},
684 {0, 0}
685 }
686 },
687 {1, 0, 3, 0, /* 0x59 */
688 {{0, 0},
689 {3, 4},
690 {6, 6},
691 {0, 0}
692 }
693 },
694 {0, 0, 3, 0, /* 0x5a */
695 {{1, 1},
696 {3, 4},
697 {6, 6},
698 {0, 0}
699 }
700 },
701 {1, 0, 3, 0, /* 0x5b */
702 {{0, 1},
703 {3, 4},
704 {6, 6},
705 {0, 0}
706 }
707 },
708 {0, 0, 2, 0, /* 0x5c */
709 {{2, 4},
710 {6, 6},
711 {0, 0},
712 {0, 0}
713 }
714 },
715 {1, 0, 3, 0, /* 0x5d */
716 {{0, 0},
717 {2, 4},
718 {6, 6},
719 {0, 0}
720 }
721 },
722 {0, 0, 2, 0, /* 0x5e */
723 {{1, 4},
724 {6, 6},
725 {0, 0},
726 {0, 0}
727 }
728 },
729 {1, 0, 2, 0, /* 0x5f */
730 {{0, 4},
731 {6, 6},
732 {0, 0},
733 {0, 0}
734 }
735 },
736 {0, 0, 1, 0, /* 0x60 */
737 {{5, 6},
738 {0, 0},
739 {0, 0},
740 {0, 0}
741 }
742 },
743 {1, 0, 2, 0, /* 0x61 */
744 {{0, 0},
745 {5, 6},
746 {0, 0},
747 {0, 0}
748 }
749 },
750 {0, 0, 2, 0, /* 0x62 */
751 {{1, 1},
752 {5, 6},
753 {0, 0},
754 {0, 0}
755 }
756 },
757 {1, 0, 2, 0, /* 0x63 */
758 {{0, 1},
759 {5, 6},
760 {0, 0},
761 {0, 0}
762 }
763 },
764 {0, 0, 2, 0, /* 0x64 */
765 {{2, 2},
766 {5, 6},
767 {0, 0},
768 {0, 0}
769 }
770 },
771 {1, 0, 3, 0, /* 0x65 */
772 {{0, 0},
773 {2, 2},
774 {5, 6},
775 {0, 0}
776 }
777 },
778 {0, 0, 2, 0, /* 0x66 */
779 {{1, 2},
780 {5, 6},
781 {0, 0},
782 {0, 0}
783 }
784 },
785 {1, 0, 2, 0, /* 0x67 */
786 {{0, 2},
787 {5, 6},
788 {0, 0},
789 {0, 0}
790 }
791 },
792 {0, 0, 2, 0, /* 0x68 */
793 {{3, 3},
794 {5, 6},
795 {0, 0},
796 {0, 0}
797 }
798 },
799 {1, 0, 3, 0, /* 0x69 */
800 {{0, 0},
801 {3, 3},
802 {5, 6},
803 {0, 0}
804 }
805 },
806 {0, 0, 3, 0, /* 0x6a */
807 {{1, 1},
808 {3, 3},
809 {5, 6},
810 {0, 0}
811 }
812 },
813 {1, 0, 3, 0, /* 0x6b */
814 {{0, 1},
815 {3, 3},
816 {5, 6},
817 {0, 0}
818 }
819 },
820 {0, 0, 2, 0, /* 0x6c */
821 {{2, 3},
822 {5, 6},
823 {0, 0},
824 {0, 0}
825 }
826 },
827 {1, 0, 3, 0, /* 0x6d */
828 {{0, 0},
829 {2, 3},
830 {5, 6},
831 {0, 0}
832 }
833 },
834 {0, 0, 2, 0, /* 0x6e */
835 {{1, 3},
836 {5, 6},
837 {0, 0},
838 {0, 0}
839 }
840 },
841 {1, 0, 2, 0, /* 0x6f */
842 {{0, 3},
843 {5, 6},
844 {0, 0},
845 {0, 0}
846 }
847 },
848 {0, 0, 1, 0, /* 0x70 */
849 {{4, 6},
850 {0, 0},
851 {0, 0},
852 {0, 0}
853 }
854 },
855 {1, 0, 2, 0, /* 0x71 */
856 {{0, 0},
857 {4, 6},
858 {0, 0},
859 {0, 0}
860 }
861 },
862 {0, 0, 2, 0, /* 0x72 */
863 {{1, 1},
864 {4, 6},
865 {0, 0},
866 {0, 0}
867 }
868 },
869 {1, 0, 2, 0, /* 0x73 */
870 {{0, 1},
871 {4, 6},
872 {0, 0},
873 {0, 0}
874 }
875 },
876 {0, 0, 2, 0, /* 0x74 */
877 {{2, 2},
878 {4, 6},
879 {0, 0},
880 {0, 0}
881 }
882 },
883 {1, 0, 3, 0, /* 0x75 */
884 {{0, 0},
885 {2, 2},
886 {4, 6},
887 {0, 0}
888 }
889 },
890 {0, 0, 2, 0, /* 0x76 */
891 {{1, 2},
892 {4, 6},
893 {0, 0},
894 {0, 0}
895 }
896 },
897 {1, 0, 2, 0, /* 0x77 */
898 {{0, 2},
899 {4, 6},
900 {0, 0},
901 {0, 0}
902 }
903 },
904 {0, 0, 1, 0, /* 0x78 */
905 {{3, 6},
906 {0, 0},
907 {0, 0},
908 {0, 0}
909 }
910 },
911 {1, 0, 2, 0, /* 0x79 */
912 {{0, 0},
913 {3, 6},
914 {0, 0},
915 {0, 0}
916 }
917 },
918 {0, 0, 2, 0, /* 0x7a */
919 {{1, 1},
920 {3, 6},
921 {0, 0},
922 {0, 0}
923 }
924 },
925 {1, 0, 2, 0, /* 0x7b */
926 {{0, 1},
927 {3, 6},
928 {0, 0},
929 {0, 0}
930 }
931 },
932 {0, 0, 1, 0, /* 0x7c */
933 {{2, 6},
934 {0, 0},
935 {0, 0},
936 {0, 0}
937 }
938 },
939 {1, 0, 2, 0, /* 0x7d */
940 {{0, 0},
941 {2, 6},
942 {0, 0},
943 {0, 0}
944 }
945 },
946 {0, 0, 1, 0, /* 0x7e */
947 {{1, 6},
948 {0, 0},
949 {0, 0},
950 {0, 0}
951 }
952 },
953 {1, 0, 1, 0, /* 0x7f */
954 {{0, 6},
955 {0, 0},
956 {0, 0},
957 {0, 0}
958 }
959 },
960 {0, 1, 1, 0, /* 0x80 */
961 {{7, 7},
962 {0, 0},
963 {0, 0},
964 {0, 0}
965 }
966 },
967 {1, 1, 2, 0, /* 0x81 */
968 {{0, 0},
969 {7, 7},
970 {0, 0},
971 {0, 0}
972 }
973 },
974 {0, 1, 2, 0, /* 0x82 */
975 {{1, 1},
976 {7, 7},
977 {0, 0},
978 {0, 0}
979 }
980 },
981 {1, 1, 2, 0, /* 0x83 */
982 {{0, 1},
983 {7, 7},
984 {0, 0},
985 {0, 0}
986 }
987 },
988 {0, 1, 2, 0, /* 0x84 */
989 {{2, 2},
990 {7, 7},
991 {0, 0},
992 {0, 0}
993 }
994 },
995 {1, 1, 3, 0, /* 0x85 */
996 {{0, 0},
997 {2, 2},
998 {7, 7},
999 {0, 0}
1000 }
1001 },
1002 {0, 1, 2, 0, /* 0x86 */
1003 {{1, 2},
1004 {7, 7},
1005 {0, 0},
1006 {0, 0}
1007 }
1008 },
1009 {1, 1, 2, 0, /* 0x87 */
1010 {{0, 2},
1011 {7, 7},
1012 {0, 0},
1013 {0, 0}
1014 }
1015 },
1016 {0, 1, 2, 0, /* 0x88 */
1017 {{3, 3},
1018 {7, 7},
1019 {0, 0},
1020 {0, 0}
1021 }
1022 },
1023 {1, 1, 3, 0, /* 0x89 */
1024 {{0, 0},
1025 {3, 3},
1026 {7, 7},
1027 {0, 0}
1028 }
1029 },
1030 {0, 1, 3, 0, /* 0x8a */
1031 {{1, 1},
1032 {3, 3},
1033 {7, 7},
1034 {0, 0}
1035 }
1036 },
1037 {1, 1, 3, 0, /* 0x8b */
1038 {{0, 1},
1039 {3, 3},
1040 {7, 7},
1041 {0, 0}
1042 }
1043 },
1044 {0, 1, 2, 0, /* 0x8c */
1045 {{2, 3},
1046 {7, 7},
1047 {0, 0},
1048 {0, 0}
1049 }
1050 },
1051 {1, 1, 3, 0, /* 0x8d */
1052 {{0, 0},
1053 {2, 3},
1054 {7, 7},
1055 {0, 0}
1056 }
1057 },
1058 {0, 1, 2, 0, /* 0x8e */
1059 {{1, 3},
1060 {7, 7},
1061 {0, 0},
1062 {0, 0}
1063 }
1064 },
1065 {1, 1, 2, 0, /* 0x8f */
1066 {{0, 3},
1067 {7, 7},
1068 {0, 0},
1069 {0, 0}
1070 }
1071 },
1072 {0, 1, 2, 0, /* 0x90 */
1073 {{4, 4},
1074 {7, 7},
1075 {0, 0},
1076 {0, 0}
1077 }
1078 },
1079 {1, 1, 3, 0, /* 0x91 */
1080 {{0, 0},
1081 {4, 4},
1082 {7, 7},
1083 {0, 0}
1084 }
1085 },
1086 {0, 1, 3, 0, /* 0x92 */
1087 {{1, 1},
1088 {4, 4},
1089 {7, 7},
1090 {0, 0}
1091 }
1092 },
1093 {1, 1, 3, 0, /* 0x93 */
1094 {{0, 1},
1095 {4, 4},
1096 {7, 7},
1097 {0, 0}
1098 }
1099 },
1100 {0, 1, 3, 0, /* 0x94 */
1101 {{2, 2},
1102 {4, 4},
1103 {7, 7},
1104 {0, 0}
1105 }
1106 },
1107 {1, 1, 4, 0, /* 0x95 */
1108 {{0, 0},
1109 {2, 2},
1110 {4, 4},
1111 {7, 7}
1112 }
1113 },
1114 {0, 1, 3, 0, /* 0x96 */
1115 {{1, 2},
1116 {4, 4},
1117 {7, 7},
1118 {0, 0}
1119 }
1120 },
1121 {1, 1, 3, 0, /* 0x97 */
1122 {{0, 2},
1123 {4, 4},
1124 {7, 7},
1125 {0, 0}
1126 }
1127 },
1128 {0, 1, 2, 0, /* 0x98 */
1129 {{3, 4},
1130 {7, 7},
1131 {0, 0},
1132 {0, 0}
1133 }
1134 },
1135 {1, 1, 3, 0, /* 0x99 */
1136 {{0, 0},
1137 {3, 4},
1138 {7, 7},
1139 {0, 0}
1140 }
1141 },
1142 {0, 1, 3, 0, /* 0x9a */
1143 {{1, 1},
1144 {3, 4},
1145 {7, 7},
1146 {0, 0}
1147 }
1148 },
1149 {1, 1, 3, 0, /* 0x9b */
1150 {{0, 1},
1151 {3, 4},
1152 {7, 7},
1153 {0, 0}
1154 }
1155 },
1156 {0, 1, 2, 0, /* 0x9c */
1157 {{2, 4},
1158 {7, 7},
1159 {0, 0},
1160 {0, 0}
1161 }
1162 },
1163 {1, 1, 3, 0, /* 0x9d */
1164 {{0, 0},
1165 {2, 4},
1166 {7, 7},
1167 {0, 0}
1168 }
1169 },
1170 {0, 1, 2, 0, /* 0x9e */
1171 {{1, 4},
1172 {7, 7},
1173 {0, 0},
1174 {0, 0}
1175 }
1176 },
1177 {1, 1, 2, 0, /* 0x9f */
1178 {{0, 4},
1179 {7, 7},
1180 {0, 0},
1181 {0, 0}
1182 }
1183 },
1184 {0, 1, 2, 0, /* 0xa0 */
1185 {{5, 5},
1186 {7, 7},
1187 {0, 0},
1188 {0, 0}
1189 }
1190 },
1191 {1, 1, 3, 0, /* 0xa1 */
1192 {{0, 0},
1193 {5, 5},
1194 {7, 7},
1195 {0, 0}
1196 }
1197 },
1198 {0, 1, 3, 0, /* 0xa2 */
1199 {{1, 1},
1200 {5, 5},
1201 {7, 7},
1202 {0, 0}
1203 }
1204 },
1205 {1, 1, 3, 0, /* 0xa3 */
1206 {{0, 1},
1207 {5, 5},
1208 {7, 7},
1209 {0, 0}
1210 }
1211 },
1212 {0, 1, 3, 0, /* 0xa4 */
1213 {{2, 2},
1214 {5, 5},
1215 {7, 7},
1216 {0, 0}
1217 }
1218 },
1219 {1, 1, 4, 0, /* 0xa5 */
1220 {{0, 0},
1221 {2, 2},
1222 {5, 5},
1223 {7, 7}
1224 }
1225 },
1226 {0, 1, 3, 0, /* 0xa6 */
1227 {{1, 2},
1228 {5, 5},
1229 {7, 7},
1230 {0, 0}
1231 }
1232 },
1233 {1, 1, 3, 0, /* 0xa7 */
1234 {{0, 2},
1235 {5, 5},
1236 {7, 7},
1237 {0, 0}
1238 }
1239 },
1240 {0, 1, 3, 0, /* 0xa8 */
1241 {{3, 3},
1242 {5, 5},
1243 {7, 7},
1244 {0, 0}
1245 }
1246 },
1247 {1, 1, 4, 0, /* 0xa9 */
1248 {{0, 0},
1249 {3, 3},
1250 {5, 5},
1251 {7, 7}
1252 }
1253 },
1254 {0, 1, 4, 0, /* 0xaa */
1255 {{1, 1},
1256 {3, 3},
1257 {5, 5},
1258 {7, 7}
1259 }
1260 },
1261 {1, 1, 4, 0, /* 0xab */
1262 {{0, 1},
1263 {3, 3},
1264 {5, 5},
1265 {7, 7}
1266 }
1267 },
1268 {0, 1, 3, 0, /* 0xac */
1269 {{2, 3},
1270 {5, 5},
1271 {7, 7},
1272 {0, 0}
1273 }
1274 },
1275 {1, 1, 4, 0, /* 0xad */
1276 {{0, 0},
1277 {2, 3},
1278 {5, 5},
1279 {7, 7}
1280 }
1281 },
1282 {0, 1, 3, 0, /* 0xae */
1283 {{1, 3},
1284 {5, 5},
1285 {7, 7},
1286 {0, 0}
1287 }
1288 },
1289 {1, 1, 3, 0, /* 0xaf */
1290 {{0, 3},
1291 {5, 5},
1292 {7, 7},
1293 {0, 0}
1294 }
1295 },
1296 {0, 1, 2, 0, /* 0xb0 */
1297 {{4, 5},
1298 {7, 7},
1299 {0, 0},
1300 {0, 0}
1301 }
1302 },
1303 {1, 1, 3, 0, /* 0xb1 */
1304 {{0, 0},
1305 {4, 5},
1306 {7, 7},
1307 {0, 0}
1308 }
1309 },
1310 {0, 1, 3, 0, /* 0xb2 */
1311 {{1, 1},
1312 {4, 5},
1313 {7, 7},
1314 {0, 0}
1315 }
1316 },
1317 {1, 1, 3, 0, /* 0xb3 */
1318 {{0, 1},
1319 {4, 5},
1320 {7, 7},
1321 {0, 0}
1322 }
1323 },
1324 {0, 1, 3, 0, /* 0xb4 */
1325 {{2, 2},
1326 {4, 5},
1327 {7, 7},
1328 {0, 0}
1329 }
1330 },
1331 {1, 1, 4, 0, /* 0xb5 */
1332 {{0, 0},
1333 {2, 2},
1334 {4, 5},
1335 {7, 7}
1336 }
1337 },
1338 {0, 1, 3, 0, /* 0xb6 */
1339 {{1, 2},
1340 {4, 5},
1341 {7, 7},
1342 {0, 0}
1343 }
1344 },
1345 {1, 1, 3, 0, /* 0xb7 */
1346 {{0, 2},
1347 {4, 5},
1348 {7, 7},
1349 {0, 0}
1350 }
1351 },
1352 {0, 1, 2, 0, /* 0xb8 */
1353 {{3, 5},
1354 {7, 7},
1355 {0, 0},
1356 {0, 0}
1357 }
1358 },
1359 {1, 1, 3, 0, /* 0xb9 */
1360 {{0, 0},
1361 {3, 5},
1362 {7, 7},
1363 {0, 0}
1364 }
1365 },
1366 {0, 1, 3, 0, /* 0xba */
1367 {{1, 1},
1368 {3, 5},
1369 {7, 7},
1370 {0, 0}
1371 }
1372 },
1373 {1, 1, 3, 0, /* 0xbb */
1374 {{0, 1},
1375 {3, 5},
1376 {7, 7},
1377 {0, 0}
1378 }
1379 },
1380 {0, 1, 2, 0, /* 0xbc */
1381 {{2, 5},
1382 {7, 7},
1383 {0, 0},
1384 {0, 0}
1385 }
1386 },
1387 {1, 1, 3, 0, /* 0xbd */
1388 {{0, 0},
1389 {2, 5},
1390 {7, 7},
1391 {0, 0}
1392 }
1393 },
1394 {0, 1, 2, 0, /* 0xbe */
1395 {{1, 5},
1396 {7, 7},
1397 {0, 0},
1398 {0, 0}
1399 }
1400 },
1401 {1, 1, 2, 0, /* 0xbf */
1402 {{0, 5},
1403 {7, 7},
1404 {0, 0},
1405 {0, 0}
1406 }
1407 },
1408 {0, 1, 1, 0, /* 0xc0 */
1409 {{6, 7},
1410 {0, 0},
1411 {0, 0},
1412 {0, 0}
1413 }
1414 },
1415 {1, 1, 2, 0, /* 0xc1 */
1416 {{0, 0},
1417 {6, 7},
1418 {0, 0},
1419 {0, 0}
1420 }
1421 },
1422 {0, 1, 2, 0, /* 0xc2 */
1423 {{1, 1},
1424 {6, 7},
1425 {0, 0},
1426 {0, 0}
1427 }
1428 },
1429 {1, 1, 2, 0, /* 0xc3 */
1430 {{0, 1},
1431 {6, 7},
1432 {0, 0},
1433 {0, 0}
1434 }
1435 },
1436 {0, 1, 2, 0, /* 0xc4 */
1437 {{2, 2},
1438 {6, 7},
1439 {0, 0},
1440 {0, 0}
1441 }
1442 },
1443 {1, 1, 3, 0, /* 0xc5 */
1444 {{0, 0},
1445 {2, 2},
1446 {6, 7},
1447 {0, 0}
1448 }
1449 },
1450 {0, 1, 2, 0, /* 0xc6 */
1451 {{1, 2},
1452 {6, 7},
1453 {0, 0},
1454 {0, 0}
1455 }
1456 },
1457 {1, 1, 2, 0, /* 0xc7 */
1458 {{0, 2},
1459 {6, 7},
1460 {0, 0},
1461 {0, 0}
1462 }
1463 },
1464 {0, 1, 2, 0, /* 0xc8 */
1465 {{3, 3},
1466 {6, 7},
1467 {0, 0},
1468 {0, 0}
1469 }
1470 },
1471 {1, 1, 3, 0, /* 0xc9 */
1472 {{0, 0},
1473 {3, 3},
1474 {6, 7},
1475 {0, 0}
1476 }
1477 },
1478 {0, 1, 3, 0, /* 0xca */
1479 {{1, 1},
1480 {3, 3},
1481 {6, 7},
1482 {0, 0}
1483 }
1484 },
1485 {1, 1, 3, 0, /* 0xcb */
1486 {{0, 1},
1487 {3, 3},
1488 {6, 7},
1489 {0, 0}
1490 }
1491 },
1492 {0, 1, 2, 0, /* 0xcc */
1493 {{2, 3},
1494 {6, 7},
1495 {0, 0},
1496 {0, 0}
1497 }
1498 },
1499 {1, 1, 3, 0, /* 0xcd */
1500 {{0, 0},
1501 {2, 3},
1502 {6, 7},
1503 {0, 0}
1504 }
1505 },
1506 {0, 1, 2, 0, /* 0xce */
1507 {{1, 3},
1508 {6, 7},
1509 {0, 0},
1510 {0, 0}
1511 }
1512 },
1513 {1, 1, 2, 0, /* 0xcf */
1514 {{0, 3},
1515 {6, 7},
1516 {0, 0},
1517 {0, 0}
1518 }
1519 },
1520 {0, 1, 2, 0, /* 0xd0 */
1521 {{4, 4},
1522 {6, 7},
1523 {0, 0},
1524 {0, 0}
1525 }
1526 },
1527 {1, 1, 3, 0, /* 0xd1 */
1528 {{0, 0},
1529 {4, 4},
1530 {6, 7},
1531 {0, 0}
1532 }
1533 },
1534 {0, 1, 3, 0, /* 0xd2 */
1535 {{1, 1},
1536 {4, 4},
1537 {6, 7},
1538 {0, 0}
1539 }
1540 },
1541 {1, 1, 3, 0, /* 0xd3 */
1542 {{0, 1},
1543 {4, 4},
1544 {6, 7},
1545 {0, 0}
1546 }
1547 },
1548 {0, 1, 3, 0, /* 0xd4 */
1549 {{2, 2},
1550 {4, 4},
1551 {6, 7},
1552 {0, 0}
1553 }
1554 },
1555 {1, 1, 4, 0, /* 0xd5 */
1556 {{0, 0},
1557 {2, 2},
1558 {4, 4},
1559 {6, 7}
1560 }
1561 },
1562 {0, 1, 3, 0, /* 0xd6 */
1563 {{1, 2},
1564 {4, 4},
1565 {6, 7},
1566 {0, 0}
1567 }
1568 },
1569 {1, 1, 3, 0, /* 0xd7 */
1570 {{0, 2},
1571 {4, 4},
1572 {6, 7},
1573 {0, 0}
1574 }
1575 },
1576 {0, 1, 2, 0, /* 0xd8 */
1577 {{3, 4},
1578 {6, 7},
1579 {0, 0},
1580 {0, 0}
1581 }
1582 },
1583 {1, 1, 3, 0, /* 0xd9 */
1584 {{0, 0},
1585 {3, 4},
1586 {6, 7},
1587 {0, 0}
1588 }
1589 },
1590 {0, 1, 3, 0, /* 0xda */
1591 {{1, 1},
1592 {3, 4},
1593 {6, 7},
1594 {0, 0}
1595 }
1596 },
1597 {1, 1, 3, 0, /* 0xdb */
1598 {{0, 1},
1599 {3, 4},
1600 {6, 7},
1601 {0, 0}
1602 }
1603 },
1604 {0, 1, 2, 0, /* 0xdc */
1605 {{2, 4},
1606 {6, 7},
1607 {0, 0},
1608 {0, 0}
1609 }
1610 },
1611 {1, 1, 3, 0, /* 0xdd */
1612 {{0, 0},
1613 {2, 4},
1614 {6, 7},
1615 {0, 0}
1616 }
1617 },
1618 {0, 1, 2, 0, /* 0xde */
1619 {{1, 4},
1620 {6, 7},
1621 {0, 0},
1622 {0, 0}
1623 }
1624 },
1625 {1, 1, 2, 0, /* 0xdf */
1626 {{0, 4},
1627 {6, 7},
1628 {0, 0},
1629 {0, 0}
1630 }
1631 },
1632 {0, 1, 1, 0, /* 0xe0 */
1633 {{5, 7},
1634 {0, 0},
1635 {0, 0},
1636 {0, 0}
1637 }
1638 },
1639 {1, 1, 2, 0, /* 0xe1 */
1640 {{0, 0},
1641 {5, 7},
1642 {0, 0},
1643 {0, 0}
1644 }
1645 },
1646 {0, 1, 2, 0, /* 0xe2 */
1647 {{1, 1},
1648 {5, 7},
1649 {0, 0},
1650 {0, 0}
1651 }
1652 },
1653 {1, 1, 2, 0, /* 0xe3 */
1654 {{0, 1},
1655 {5, 7},
1656 {0, 0},
1657 {0, 0}
1658 }
1659 },
1660 {0, 1, 2, 0, /* 0xe4 */
1661 {{2, 2},
1662 {5, 7},
1663 {0, 0},
1664 {0, 0}
1665 }
1666 },
1667 {1, 1, 3, 0, /* 0xe5 */
1668 {{0, 0},
1669 {2, 2},
1670 {5, 7},
1671 {0, 0}
1672 }
1673 },
1674 {0, 1, 2, 0, /* 0xe6 */
1675 {{1, 2},
1676 {5, 7},
1677 {0, 0},
1678 {0, 0}
1679 }
1680 },
1681 {1, 1, 2, 0, /* 0xe7 */
1682 {{0, 2},
1683 {5, 7},
1684 {0, 0},
1685 {0, 0}
1686 }
1687 },
1688 {0, 1, 2, 0, /* 0xe8 */
1689 {{3, 3},
1690 {5, 7},
1691 {0, 0},
1692 {0, 0}
1693 }
1694 },
1695 {1, 1, 3, 0, /* 0xe9 */
1696 {{0, 0},
1697 {3, 3},
1698 {5, 7},
1699 {0, 0}
1700 }
1701 },
1702 {0, 1, 3, 0, /* 0xea */
1703 {{1, 1},
1704 {3, 3},
1705 {5, 7},
1706 {0, 0}
1707 }
1708 },
1709 {1, 1, 3, 0, /* 0xeb */
1710 {{0, 1},
1711 {3, 3},
1712 {5, 7},
1713 {0, 0}
1714 }
1715 },
1716 {0, 1, 2, 0, /* 0xec */
1717 {{2, 3},
1718 {5, 7},
1719 {0, 0},
1720 {0, 0}
1721 }
1722 },
1723 {1, 1, 3, 0, /* 0xed */
1724 {{0, 0},
1725 {2, 3},
1726 {5, 7},
1727 {0, 0}
1728 }
1729 },
1730 {0, 1, 2, 0, /* 0xee */
1731 {{1, 3},
1732 {5, 7},
1733 {0, 0},
1734 {0, 0}
1735 }
1736 },
1737 {1, 1, 2, 0, /* 0xef */
1738 {{0, 3},
1739 {5, 7},
1740 {0, 0},
1741 {0, 0}
1742 }
1743 },
1744 {0, 1, 1, 0, /* 0xf0 */
1745 {{4, 7},
1746 {0, 0},
1747 {0, 0},
1748 {0, 0}
1749 }
1750 },
1751 {1, 1, 2, 0, /* 0xf1 */
1752 {{0, 0},
1753 {4, 7},
1754 {0, 0},
1755 {0, 0}
1756 }
1757 },
1758 {0, 1, 2, 0, /* 0xf2 */
1759 {{1, 1},
1760 {4, 7},
1761 {0, 0},
1762 {0, 0}
1763 }
1764 },
1765 {1, 1, 2, 0, /* 0xf3 */
1766 {{0, 1},
1767 {4, 7},
1768 {0, 0},
1769 {0, 0}
1770 }
1771 },
1772 {0, 1, 2, 0, /* 0xf4 */
1773 {{2, 2},
1774 {4, 7},
1775 {0, 0},
1776 {0, 0}
1777 }
1778 },
1779 {1, 1, 3, 0, /* 0xf5 */
1780 {{0, 0},
1781 {2, 2},
1782 {4, 7},
1783 {0, 0}
1784 }
1785 },
1786 {0, 1, 2, 0, /* 0xf6 */
1787 {{1, 2},
1788 {4, 7},
1789 {0, 0},
1790 {0, 0}
1791 }
1792 },
1793 {1, 1, 2, 0, /* 0xf7 */
1794 {{0, 2},
1795 {4, 7},
1796 {0, 0},
1797 {0, 0}
1798 }
1799 },
1800 {0, 1, 1, 0, /* 0xf8 */
1801 {{3, 7},
1802 {0, 0},
1803 {0, 0},
1804 {0, 0}
1805 }
1806 },
1807 {1, 1, 2, 0, /* 0xf9 */
1808 {{0, 0},
1809 {3, 7},
1810 {0, 0},
1811 {0, 0}
1812 }
1813 },
1814 {0, 1, 2, 0, /* 0xfa */
1815 {{1, 1},
1816 {3, 7},
1817 {0, 0},
1818 {0, 0}
1819 }
1820 },
1821 {1, 1, 2, 0, /* 0xfb */
1822 {{0, 1},
1823 {3, 7},
1824 {0, 0},
1825 {0, 0}
1826 }
1827 },
1828 {0, 1, 1, 0, /* 0xfc */
1829 {{2, 7},
1830 {0, 0},
1831 {0, 0},
1832 {0, 0}
1833 }
1834 },
1835 {1, 1, 2, 0, /* 0xfd */
1836 {{0, 0},
1837 {2, 7},
1838 {0, 0},
1839 {0, 0}
1840 }
1841 },
1842 {0, 1, 1, 0, /* 0xfe */
1843 {{1, 7},
1844 {0, 0},
1845 {0, 0},
1846 {0, 0}
1847 }
1848 },
1849 {1, 1, 1, 0, /* 0xff */
1850 {{0, 7},
1851 {0, 0},
1852 {0, 0},
1853 {0, 0}
1854 }
1855 }
1856};
1857
1858
1859int
1860sctp_is_address_in_scope(struct sctp_ifa *ifa,
1861 int ipv4_addr_legal,
1862 int ipv6_addr_legal,
1863 int loopback_scope,
1864 int ipv4_local_scope,
1865 int local_scope,
1866 int site_scope,
1867 int do_update)
1868{
1869 if ((loopback_scope == 0) &&
1870 (ifa->ifn_p) && SCTP_IFN_IS_IFT_LOOP(ifa->ifn_p)) {
1871 /*
1872 * skip loopback if not in scope *
1873 */
1874 return (0);
1875 }
1876 if ((ifa->address.sa.sa_family == AF_INET) && ipv4_addr_legal) {
1877 struct sockaddr_in *sin;
1863
1878
1879 sin = (struct sockaddr_in *)&ifa->address.sin;
1880 if (sin->sin_addr.s_addr == 0) {
1881 /* not in scope , unspecified */
1882 return (0);
1883 }
1884 if ((ipv4_local_scope == 0) &&
1885 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1886 /* private address not in scope */
1887 return (0);
1888 }
1889 } else if ((ifa->address.sa.sa_family == AF_INET6) && ipv6_addr_legal) {
1890 struct sockaddr_in6 *sin6;
1864
1891
1865extern int sctp_peer_chunk_oh;
1892 /*
1893 * Must update the flags, bummer, which means any IFA locks
1894 * must now be applied HERE <->
1895 */
1896 if (do_update) {
1897 sctp_gather_internal_ifa_flags(ifa);
1898 }
1899 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1900 return (0);
1901 }
1902 /* ok to use deprecated addresses? */
1903 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1904 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1905 /* skip unspecifed addresses */
1906 return (0);
1907 }
1908 if ( /* (local_scope == 0) && */
1909 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
1910 return (0);
1911 }
1912 if ((site_scope == 0) &&
1913 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1914 return (0);
1915 }
1916 } else {
1917 return (0);
1918 }
1919 return (1);
1920}
1866
1921
1922static struct mbuf *
1923sctp_add_addr_to_mbuf(struct mbuf *m, struct sctp_ifa *ifa)
1924{
1925 struct sctp_paramhdr *parmh;
1926 struct mbuf *mret;
1927 int len;
1928
1929 if (ifa->address.sa.sa_family == AF_INET) {
1930 len = sizeof(struct sctp_ipv4addr_param);
1931 } else if (ifa->address.sa.sa_family == AF_INET6) {
1932 len = sizeof(struct sctp_ipv6addr_param);
1933 } else {
1934 /* unknown type */
1935 return (m);
1936 }
1937 if (M_TRAILINGSPACE(m) >= len) {
1938 /* easy side we just drop it on the end */
1939 parmh = (struct sctp_paramhdr *)(SCTP_BUF_AT(m, SCTP_BUF_LEN(m)));
1940 mret = m;
1941 } else {
1942 /* Need more space */
1943 mret = m;
1944 while (SCTP_BUF_NEXT(mret) != NULL) {
1945 mret = SCTP_BUF_NEXT(mret);
1946 }
1947 SCTP_BUF_NEXT(mret) = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
1948 if (SCTP_BUF_NEXT(mret) == NULL) {
1949 /* We are hosed, can't add more addresses */
1950 return (m);
1951 }
1952 mret = SCTP_BUF_NEXT(mret);
1953 parmh = mtod(mret, struct sctp_paramhdr *);
1954 }
1955 /* now add the parameter */
1956 if (ifa->address.sa.sa_family == AF_INET) {
1957 struct sctp_ipv4addr_param *ipv4p;
1958 struct sockaddr_in *sin;
1959
1960 sin = (struct sockaddr_in *)&ifa->address.sin;
1961 ipv4p = (struct sctp_ipv4addr_param *)parmh;
1962 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
1963 parmh->param_length = htons(len);
1964 ipv4p->addr = sin->sin_addr.s_addr;
1965 SCTP_BUF_LEN(mret) += len;
1966 } else if (ifa->address.sa.sa_family == AF_INET6) {
1967 struct sctp_ipv6addr_param *ipv6p;
1968 struct sockaddr_in6 *sin6;
1969
1970 sin6 = (struct sockaddr_in6 *)&ifa->address.sin6;
1971 ipv6p = (struct sctp_ipv6addr_param *)parmh;
1972 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
1973 parmh->param_length = htons(len);
1974 memcpy(ipv6p->addr, &sin6->sin6_addr,
1975 sizeof(ipv6p->addr));
1976 /* clear embedded scope in the address */
1977 in6_clearscope((struct in6_addr *)ipv6p->addr);
1978 SCTP_BUF_LEN(mret) += len;
1979 } else {
1980 return (m);
1981 }
1982 return (mret);
1983}
1984
1985
1986struct mbuf *
1987sctp_add_addresses_to_i_ia(struct sctp_inpcb *inp, struct sctp_scoping *scope,
1988 struct mbuf *m_at, int cnt_inits_to)
1989{
1990 struct sctp_vrf *vrf = NULL;
1991 int cnt, limit_out = 0, total_count;
1992 uint32_t vrf_id;
1993
1994 vrf_id = SCTP_DEFAULT_VRFID;
1995 SCTP_IPI_ADDR_LOCK();
1996 vrf = sctp_find_vrf(vrf_id);
1997 if (vrf == NULL) {
1998 SCTP_IPI_ADDR_UNLOCK();
1999 return (m_at);
2000 }
2001 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2002 struct sctp_ifa *sctp_ifap;
2003 struct sctp_ifn *sctp_ifnp;
2004
2005 cnt = cnt_inits_to;
2006 if (vrf->total_ifa_count > SCTP_COUNT_LIMIT) {
2007 limit_out = 1;
2008 cnt = SCTP_ADDRESS_LIMIT;
2009 goto skip_count;
2010 }
2011 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2012 if ((scope->loopback_scope == 0) &&
2013 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2014 /*
2015 * Skip loopback devices if loopback_scope
2016 * not set
2017 */
2018 continue;
2019 }
2020 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2021 if (sctp_is_address_in_scope(sctp_ifap,
2022 scope->ipv4_addr_legal,
2023 scope->ipv6_addr_legal,
2024 scope->loopback_scope,
2025 scope->ipv4_local_scope,
2026 scope->local_scope,
2027 scope->site_scope, 1) == 0) {
2028 continue;
2029 }
2030 cnt++;
2031 if (cnt > SCTP_ADDRESS_LIMIT) {
2032 break;
2033 }
2034 }
2035 if (cnt > SCTP_ADDRESS_LIMIT) {
2036 break;
2037 }
2038 }
2039skip_count:
2040 if (cnt > 1) {
2041 total_count = 0;
2042 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
2043 cnt = 0;
2044 if ((scope->loopback_scope == 0) &&
2045 SCTP_IFN_IS_IFT_LOOP(sctp_ifnp)) {
2046 /*
2047 * Skip loopback devices if
2048 * loopback_scope not set
2049 */
2050 continue;
2051 }
2052 LIST_FOREACH(sctp_ifap, &sctp_ifnp->ifalist, next_ifa) {
2053 if (sctp_is_address_in_scope(sctp_ifap,
2054 scope->ipv4_addr_legal,
2055 scope->ipv6_addr_legal,
2056 scope->loopback_scope,
2057 scope->ipv4_local_scope,
2058 scope->local_scope,
2059 scope->site_scope, 0) == 0) {
2060 continue;
2061 }
2062 m_at = sctp_add_addr_to_mbuf(m_at, sctp_ifap);
2063 if (limit_out) {
2064 cnt++;
2065 total_count++;
2066 if (cnt >= 2) {
2067 /*
2068 * two from each
2069 * address
2070 */
2071 break;
2072 }
2073 if (total_count > SCTP_ADDRESS_LIMIT) {
2074 /* No more addresses */
2075 break;
2076 }
2077 }
2078 }
2079 }
2080 }
2081 } else {
2082 struct sctp_laddr *laddr;
2083
2084 cnt = cnt_inits_to;
2085 /* First, how many ? */
2086 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2087 if (laddr->ifa == NULL) {
2088 continue;
2089 }
2090 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2091 /*
2092 * Address being deleted by the system, dont
2093 * list.
2094 */
2095 continue;
2096 if (laddr->action == SCTP_DEL_IP_ADDRESS) {
2097 /*
2098 * Address being deleted on this ep don't
2099 * list.
2100 */
2101 continue;
2102 }
2103 if (sctp_is_address_in_scope(laddr->ifa,
2104 scope->ipv4_addr_legal,
2105 scope->ipv6_addr_legal,
2106 scope->loopback_scope,
2107 scope->ipv4_local_scope,
2108 scope->local_scope,
2109 scope->site_scope, 1) == 0) {
2110 continue;
2111 }
2112 cnt++;
2113 }
2114 if (cnt > SCTP_ADDRESS_LIMIT) {
2115 limit_out = 1;
2116 }
2117 /*
2118 * To get through a NAT we only list addresses if we have
2119 * more than one. That way if you just bind a single address
2120 * we let the source of the init dictate our address.
2121 */
2122 if (cnt > 1) {
2123 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2124 cnt = 0;
2125 if (laddr->ifa == NULL) {
2126 continue;
2127 }
2128 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED)
2129 continue;
2130
2131 if (sctp_is_address_in_scope(laddr->ifa,
2132 scope->ipv4_addr_legal,
2133 scope->ipv6_addr_legal,
2134 scope->loopback_scope,
2135 scope->ipv4_local_scope,
2136 scope->local_scope,
2137 scope->site_scope, 0) == 0) {
2138 continue;
2139 }
2140 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa);
2141 cnt++;
2142 if (cnt >= SCTP_ADDRESS_LIMIT) {
2143 break;
2144 }
2145 }
2146 }
2147 }
2148 SCTP_IPI_ADDR_UNLOCK();
2149 return (m_at);
2150}
2151
2152static struct sctp_ifa *
2153sctp_is_ifa_addr_prefered(struct sctp_ifa *ifa,
2154 uint8_t dest_is_loop,
2155 uint8_t dest_is_priv,
2156 sa_family_t fam)
2157{
2158 uint8_t dest_is_global = 0;
2159
2160 /*
2161 * is_scope -> dest_is_priv is true if destination is a private
2162 * address
2163 */
2164 /* dest_is_loop is true if destination is a loopback addresses */
2165
2166 /*
2167 * Here we determine if its a prefered address. A prefered address
2168 * means it is the same scope or higher scope then the destination.
2169 * L = loopback, P = private, G = global
2170 * ----------------------------------------- src | dest | result
2171 * ---------------------------------------- L | L | yes
2172 * ----------------------------------------- P | L |
2173 * yes-v4 no-v6 ----------------------------------------- G |
2174 * L | yes-v4 no-v6 ----------------------------------------- L
2175 * | P | no ----------------------------------------- P |
2176 * P | yes ----------------------------------------- G |
2177 * P | no ----------------------------------------- L | G
2178 * | no ----------------------------------------- P | G |
2179 * no ----------------------------------------- G | G |
2180 * yes -----------------------------------------
2181 */
2182
2183 if (ifa->address.sa.sa_family != fam) {
2184 /* forget mis-matched family */
2185 return (NULL);
2186 }
2187 if ((dest_is_priv == 0) && (dest_is_loop == 0)) {
2188 dest_is_global = 1;
2189 }
2190 /* Ok the address may be ok */
2191 if (fam == AF_INET6) {
2192 /* ok to use deprecated addresses? */
2193 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2194 return (NULL);
2195 }
2196 if (ifa->src_is_priv) {
2197 if (dest_is_loop) {
2198 return (NULL);
2199 }
2200 }
2201 if (ifa->src_is_glob) {
2202
2203 if (dest_is_loop) {
2204 return (NULL);
2205 }
2206 }
2207 }
2208 /*
2209 * Now that we know what is what, implement or table this could in
2210 * theory be done slicker (it used to be), but this is
2211 * straightforward and easier to validate :-)
2212 */
2213 if ((ifa->src_is_loop) && (dest_is_priv)) {
2214 return (NULL);
2215 }
2216 if ((ifa->src_is_glob) && (dest_is_priv)) {
2217 return (NULL);
2218 }
2219 if ((ifa->src_is_loop) && (dest_is_global)) {
2220 return (NULL);
2221 }
2222 if ((ifa->src_is_priv) && (dest_is_global)) {
2223 return (NULL);
2224 }
2225 /* its a prefered address */
2226 return (ifa);
2227}
2228
2229static struct sctp_ifa *
2230sctp_is_ifa_addr_acceptable(struct sctp_ifa *ifa,
2231 uint8_t dest_is_loop,
2232 uint8_t dest_is_priv,
2233 sa_family_t fam)
2234{
2235 uint8_t dest_is_global = 0;
2236
2237
2238 /*
2239 * Here we determine if its a acceptable address. A acceptable
2240 * address means it is the same scope or higher scope but we can
2241 * allow for NAT which means its ok to have a global dest and a
2242 * private src.
2243 *
2244 * L = loopback, P = private, G = global
2245 * ----------------------------------------- src | dest | result
2246 * ----------------------------------------- L | L | yes
2247 * ----------------------------------------- P | L |
2248 * yes-v4 no-v6 ----------------------------------------- G |
2249 * L | yes ----------------------------------------- L |
2250 * P | no ----------------------------------------- P | P
2251 * | yes ----------------------------------------- G | P
2252 * | yes - May not work -----------------------------------------
2253 * L | G | no ----------------------------------------- P
2254 * | G | yes - May not work
2255 * ----------------------------------------- G | G | yes
2256 * -----------------------------------------
2257 */
2258
2259 if (ifa->address.sa.sa_family != fam) {
2260 /* forget non matching family */
2261 return (NULL);
2262 }
2263 /* Ok the address may be ok */
2264 if ((dest_is_loop == 0) && (dest_is_priv == 0)) {
2265 dest_is_global = 1;
2266 }
2267 if (fam == AF_INET6) {
2268 /* ok to use deprecated addresses? */
2269 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
2270 return (NULL);
2271 }
2272 if (ifa->src_is_priv) {
2273 /* Special case, linklocal to loop */
2274 if (dest_is_loop)
2275 return (NULL);
2276 }
2277 }
2278 /*
2279 * Now that we know what is what, implement or table this could in
2280 * theory be done slicker (it used to be), but this is
2281 * straightforward and easier to validate :-)
2282 */
2283
2284 if ((ifa->src_is_loop == 0) && (dest_is_priv)) {
2285 return (NULL);
2286 }
2287 if ((ifa->src_is_loop == 0) && (dest_is_global)) {
2288 return (NULL);
2289 }
2290 /* its an acceptable address */
2291 return (ifa);
2292}
2293
2294int
2295sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
2296{
2297 struct sctp_laddr *laddr;
2298
2299 if (stcb == NULL) {
2300 /* There are no restrictions, no TCB :-) */
2301 return (0);
2302 }
2303 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
2304 if (laddr->ifa == NULL) {
2305#ifdef SCTP_DEBUG
2306 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2307 printf("Help I have fallen and I can't get up!\n");
2308 }
2309#endif
2310 continue;
2311 }
2312 if (laddr->ifa == ifa) {
2313 /* Yes it is on the list */
2314 return (1);
2315 }
2316 }
2317 return (0);
2318}
2319
2320
2321int
2322sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
2323{
2324 struct sctp_laddr *laddr;
2325
2326 if (ifa == NULL)
2327 return (0);
2328 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2329 if (laddr->ifa == NULL) {
2330#ifdef SCTP_DEBUG
2331 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2332 printf("Help I have fallen and I can't get up!\n");
2333 }
2334#endif
2335 continue;
2336 }
2337 if ((laddr->ifa == ifa) && laddr->action == 0)
2338 /* same pointer */
2339 return (1);
2340 }
2341 return (0);
2342}
2343
2344
2345
2346static struct sctp_ifa *
2347sctp_choose_boundspecific_inp(struct sctp_inpcb *inp,
2348 struct route *ro,
2349 uint32_t vrf_id,
2350 int non_asoc_addr_ok,
2351 uint8_t dest_is_priv,
2352 uint8_t dest_is_loop,
2353 sa_family_t fam)
2354{
2355 struct sctp_laddr *laddr, *starting_point;
2356 void *ifn;
2357 int resettotop = 0;
2358 struct sctp_ifn *sctp_ifn;
2359 struct sctp_ifa *sctp_ifa, *pass;
2360 struct sctp_vrf *vrf;
2361 uint32_t ifn_index;
2362
2363 vrf = sctp_find_vrf(vrf_id);
2364 if (vrf == NULL)
2365 return (NULL);
2366
2367 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2368 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2369 sctp_ifn = sctp_find_ifn(vrf, ifn, ifn_index);
2370 /*
2371 * first question, is the ifn we will emit on in our list, if so, we
2372 * want such an address. Note that we first looked for a prefered
2373 * address.
2374 */
2375 if (sctp_ifn) {
2376 /* is a prefered one on the interface we route out? */
2377 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2378 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2379 continue;
2380 pass = sctp_is_ifa_addr_prefered(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2381 if (pass == NULL)
2382 continue;
2383 if (sctp_is_addr_in_ep(inp, pass)) {
2384 atomic_add_int(&pass->refcount, 1);
2385 return (pass);
2386 }
2387 }
2388 }
2389 /*
2390 * ok, now we now need to find one on the list of the addresses. We
2391 * can't get one on the emitting interface so lets find first a
2392 * prefered one. If not that a acceptable one otherwise... we return
2393 * NULL.
2394 */
2395 starting_point = inp->next_addr_touse;
2396once_again:
2397 if (inp->next_addr_touse == NULL) {
2398 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2399 resettotop = 1;
2400 }
2401 for (laddr = inp->next_addr_touse; laddr; laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2402 if (laddr->ifa == NULL) {
2403 /* address has been removed */
2404 continue;
2405 }
2406 pass = sctp_is_ifa_addr_prefered(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2407 if (pass == NULL)
2408 continue;
2409 atomic_add_int(&pass->refcount, 1);
2410 return (pass);
2411 }
2412 if (resettotop == 0) {
2413 inp->next_addr_touse = NULL;
2414 goto once_again;
2415 }
2416 inp->next_addr_touse = starting_point;
2417 resettotop = 0;
2418once_again_too:
2419 if (inp->next_addr_touse == NULL) {
2420 inp->next_addr_touse = LIST_FIRST(&inp->sctp_addr_list);
2421 resettotop = 1;
2422 }
2423 /* ok, what about an acceptable address in the inp */
2424 for (laddr = inp->next_addr_touse; laddr; laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2425 if (laddr->ifa == NULL) {
2426 /* address has been removed */
2427 continue;
2428 }
2429 pass = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2430 if (pass == NULL)
2431 continue;
2432 atomic_add_int(&pass->refcount, 1);
2433 return (pass);
2434 }
2435 if (resettotop == 0) {
2436 inp->next_addr_touse = NULL;
2437 goto once_again_too;
2438 }
2439 /*
2440 * no address bound can be a source for the destination we are in
2441 * trouble
2442 */
2443 return (NULL);
2444}
2445
2446
2447
2448static struct sctp_ifa *
2449sctp_choose_boundspecific_stcb(struct sctp_inpcb *inp,
2450 struct sctp_tcb *stcb,
2451 struct sctp_nets *net,
2452 struct route *ro,
2453 uint32_t vrf_id,
2454 uint8_t dest_is_priv,
2455 uint8_t dest_is_loop,
2456 int non_asoc_addr_ok,
2457 sa_family_t fam)
2458{
2459 struct sctp_laddr *laddr, *starting_point;
2460 void *ifn;
2461 struct sctp_ifn *sctp_ifn;
2462 struct sctp_ifa *sctp_ifa, *pass;
2463 uint8_t start_at_beginning = 0;
2464 struct sctp_vrf *vrf;
2465 uint32_t ifn_index;
2466
2467 /*
2468 * first question, is the ifn we will emit on in our list, if so, we
2469 * want that one.
2470 */
2471 vrf = sctp_find_vrf(vrf_id);
2472 if (vrf == NULL)
2473 return (NULL);
2474
2475 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2476 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2477 sctp_ifn = sctp_find_ifn(vrf, ifn, ifn_index);
2478
2479 /*
2480 * first question, is the ifn we will emit on in our list, if so, we
2481 * want that one.. First we look for a prefered. Second we go for an
2482 * acceptable.
2483 */
2484 if (sctp_ifn) {
2485 /* first try for an prefered address on the ep */
2486 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2487 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2488 continue;
2489 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2490 pass = sctp_is_ifa_addr_prefered(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2491 if (pass == NULL)
2492 continue;
2493 if ((non_asoc_addr_ok == 0) &&
2494 (sctp_is_addr_restricted(stcb, pass))) {
2495 /* on the no-no list */
2496 continue;
2497 }
2498 atomic_add_int(&pass->refcount, 1);
2499 return (pass);
2500 }
2501 }
2502 /* next try for an acceptable address on the ep */
2503 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2504 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2505 continue;
2506 if (sctp_is_addr_in_ep(inp, sctp_ifa)) {
2507 pass = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2508 if (pass == NULL)
2509 continue;
2510 if ((non_asoc_addr_ok == 0) &&
2511 (sctp_is_addr_restricted(stcb, pass))) {
2512 /* on the no-no list */
2513 continue;
2514 }
2515 atomic_add_int(&pass->refcount, 1);
2516 return (pass);
2517 }
2518 }
2519
2520 }
2521 /*
2522 * if we can't find one like that then we must look at all addresses
2523 * bound to pick one at first prefereable then secondly acceptable.
2524 */
2525 starting_point = stcb->asoc.last_used_address;
2526sctp_from_the_top:
2527 if (stcb->asoc.last_used_address == NULL) {
2528 start_at_beginning = 1;
2529 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2530 }
2531 /* search beginning with the last used address */
2532 for (laddr = stcb->asoc.last_used_address; laddr;
2533 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2534 if (laddr->ifa == NULL) {
2535 /* address has been removed */
2536 continue;
2537 }
2538 pass = sctp_is_ifa_addr_prefered(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2539 if (pass == NULL)
2540 continue;
2541 if ((non_asoc_addr_ok == 0) &&
2542 (sctp_is_addr_restricted(stcb, pass))) {
2543 /* on the no-no list */
2544 continue;
2545 }
2546 stcb->asoc.last_used_address = laddr;
2547 atomic_add_int(&pass->refcount, 1);
2548 return (pass);
2549
2550 }
2551 if (start_at_beginning == 0) {
2552 stcb->asoc.last_used_address = NULL;
2553 goto sctp_from_the_top;
2554 }
2555 /* now try for any higher scope than the destination */
2556 stcb->asoc.last_used_address = starting_point;
2557 start_at_beginning = 0;
2558sctp_from_the_top2:
2559 if (stcb->asoc.last_used_address == NULL) {
2560 start_at_beginning = 1;
2561 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
2562 }
2563 /* search beginning with the last used address */
2564 for (laddr = stcb->asoc.last_used_address; laddr;
2565 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
2566 if (laddr->ifa == NULL) {
2567 /* address has been removed */
2568 continue;
2569 }
2570 pass = sctp_is_ifa_addr_acceptable(laddr->ifa, dest_is_loop, dest_is_priv, fam);
2571 if (pass == NULL)
2572 continue;
2573 if ((non_asoc_addr_ok == 0) &&
2574 (sctp_is_addr_restricted(stcb, pass))) {
2575 /* on the no-no list */
2576 continue;
2577 }
2578 stcb->asoc.last_used_address = laddr;
2579 atomic_add_int(&pass->refcount, 1);
2580 return (pass);
2581 }
2582 if (start_at_beginning == 0) {
2583 stcb->asoc.last_used_address = NULL;
2584 goto sctp_from_the_top2;
2585 }
2586 return (NULL);
2587}
2588
2589static struct sctp_ifa *
2590sctp_select_nth_prefered_addr_from_ifn_boundall(struct sctp_ifn *ifn,
2591 struct sctp_tcb *stcb,
2592 int non_asoc_addr_ok,
2593 uint8_t dest_is_loop,
2594 uint8_t dest_is_priv,
2595 int addr_wanted,
2596 sa_family_t fam)
2597{
2598 struct sctp_ifa *ifa, *pass;
2599 int num_eligible_addr = 0;
2600
2601 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2602 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2603 continue;
2604 pass = sctp_is_ifa_addr_prefered(ifa, dest_is_loop, dest_is_priv, fam);
2605 if (pass == NULL)
2606 continue;
2607 if (stcb) {
2608 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, pass)) {
2609 /*
2610 * It is restricted for some reason..
2611 * probably not yet added.
2612 */
2613 continue;
2614 }
2615 }
2616 if (num_eligible_addr >= addr_wanted) {
2617 return (pass);
2618 }
2619 num_eligible_addr++;
2620 }
2621 return (NULL);
2622}
2623
2624
1867static int
2625static int
2626sctp_count_num_prefered_boundall(struct sctp_ifn *ifn,
2627 struct sctp_tcb *stcb,
2628 int non_asoc_addr_ok,
2629 uint8_t dest_is_loop,
2630 uint8_t dest_is_priv,
2631 sa_family_t fam)
2632{
2633 struct sctp_ifa *ifa, *pass;
2634 int num_eligible_addr = 0;
2635
2636 LIST_FOREACH(ifa, &ifn->ifalist, next_ifa) {
2637 if ((ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0)) {
2638 continue;
2639 }
2640 pass = sctp_is_ifa_addr_prefered(ifa, dest_is_loop, dest_is_priv, fam);
2641 if (pass == NULL) {
2642 continue;
2643 }
2644 if (stcb) {
2645 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, pass)) {
2646 /*
2647 * It is restricted for some reason..
2648 * probably not yet added.
2649 */
2650 continue;
2651 }
2652 }
2653 num_eligible_addr++;
2654 }
2655 return (num_eligible_addr);
2656}
2657
2658static struct sctp_ifa *
2659sctp_choose_boundall(struct sctp_inpcb *inp,
2660 struct sctp_tcb *stcb,
2661 struct sctp_nets *net,
2662 struct route *ro,
2663 uint32_t vrf_id,
2664 uint8_t dest_is_priv,
2665 uint8_t dest_is_loop,
2666 int non_asoc_addr_ok,
2667 sa_family_t fam)
2668{
2669 int cur_addr_num = 0, num_prefered = 0;
2670 void *ifn;
2671 struct sctp_ifn *sctp_ifn, *looked_at = NULL, *emit_ifn;
2672 struct sctp_ifa *sctp_ifa, *pass;
2673 uint32_t ifn_index;
2674 struct sctp_vrf *vrf;
2675
2676 /*
2677 * For boundall we can use any address in the association. If
2678 * non_asoc_addr_ok is set we can use any address (at least in
2679 * theory). So we look for prefered addresses first. If we find one,
2680 * we use it. Otherwise we next try to get an address on the
2681 * interface, which we should be able to do (unless non_asoc_addr_ok
2682 * is false and we are routed out that way). In these cases where we
2683 * can't use the address of the interface we go through all the
2684 * ifn's looking for an address we can use and fill that in. Punting
2685 * means we send back address 0, which will probably cause problems
2686 * actually since then IP will fill in the address of the route ifn,
2687 * which means we probably already rejected it.. i.e. here comes an
2688 * abort :-<.
2689 */
2690 vrf = sctp_find_vrf(vrf_id);
2691 if (vrf == NULL)
2692 return (NULL);
2693
2694 ifn = SCTP_GET_IFN_VOID_FROM_ROUTE(ro);
2695 ifn_index = SCTP_GET_IF_INDEX_FROM_ROUTE(ro);
2696
2697 emit_ifn = looked_at = sctp_ifn = sctp_find_ifn(vrf, ifn, ifn_index);
2698 if (sctp_ifn == NULL) {
2699 /* ?? We don't have this guy ?? */
2700 goto bound_all_plan_b;
2701 }
2702 if (net) {
2703 cur_addr_num = net->indx_of_eligible_next_to_use;
2704 }
2705 num_prefered = sctp_count_num_prefered_boundall(sctp_ifn,
2706 stcb,
2707 non_asoc_addr_ok,
2708 dest_is_loop,
2709 dest_is_priv, fam);
2710#ifdef SCTP_DEBUG
2711 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2712 printf("Found %d prefered source addresses\n", num_prefered);
2713 }
2714#endif
2715 if (num_prefered == 0) {
2716 /*
2717 * no eligible addresses, we must use some other interface
2718 * address if we can find one.
2719 */
2720 goto bound_all_plan_b;
2721 }
2722 /*
2723 * Ok we have num_eligible_addr set with how many we can use, this
2724 * may vary from call to call due to addresses being deprecated
2725 * etc..
2726 */
2727 if (cur_addr_num >= num_prefered) {
2728 cur_addr_num = 0;
2729 }
2730 /*
2731 * select the nth address from the list (where cur_addr_num is the
2732 * nth) and 0 is the first one, 1 is the second one etc...
2733 */
2734#ifdef SCTP_DEBUG
2735 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2736 printf("cur_addr_num:%d\n", cur_addr_num);
2737 }
2738#endif
2739 sctp_ifa = sctp_select_nth_prefered_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2740 dest_is_priv, cur_addr_num, fam);
2741
2742 /* if sctp_ifa is NULL something changed??, fall to plan b. */
2743 if (sctp_ifa) {
2744 atomic_add_int(&sctp_ifa->refcount, 1);
2745 if (net) {
2746 /* save off where the next one we will want */
2747 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2748 }
2749 return (sctp_ifa);
2750 }
2751 /*
2752 * plan_b: Look at all interfaces and find a prefered address. If no
2753 * prefered fall through to plan_c.
2754 */
2755bound_all_plan_b:
2756 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
2757 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
2758 /* wrong base scope */
2759 continue;
2760 }
2761 if ((sctp_ifn == looked_at) && looked_at)
2762 /* already looked at this guy */
2763 continue;
2764 num_prefered = sctp_count_num_prefered_boundall(sctp_ifn, stcb, non_asoc_addr_ok,
2765 dest_is_loop, dest_is_priv, fam);
2766#ifdef SCTP_DEBUG
2767 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2768 printf("Found ifn:%p %d prefered source addresses\n", ifn, num_prefered);
2769 }
2770#endif
2771 if (num_prefered == 0) {
2772 /*
2773 * None on this interface.
2774 */
2775 continue;
2776 }
2777#ifdef SCTP_DEBUG
2778 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
2779 printf("num prefered:%d on interface:%p cur_addr_num:%d\n",
2780 num_prefered,
2781 sctp_ifn,
2782 cur_addr_num);
2783 }
2784#endif
2785
2786 /*
2787 * Ok we have num_eligible_addr set with how many we can
2788 * use, this may vary from call to call due to addresses
2789 * being deprecated etc..
2790 */
2791 if (cur_addr_num >= num_prefered) {
2792 cur_addr_num = 0;
2793 }
2794 pass = sctp_select_nth_prefered_addr_from_ifn_boundall(sctp_ifn, stcb, non_asoc_addr_ok, dest_is_loop,
2795 dest_is_priv, cur_addr_num, fam);
2796 if (pass == NULL)
2797 continue;
2798 if (net) {
2799 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
2800#ifdef SCTP_DEBUG
2801 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
2802 printf("we selected %d\n", cur_addr_num);
2803 printf("Source:");
2804 sctp_print_address(&pass->address.sa);
2805 printf("Dest:");
2806 sctp_print_address(&net->ro._l_addr.sa);
2807 }
2808#endif
2809 }
2810 atomic_add_int(&pass->refcount, 1);
2811 return (pass);
2812
2813 }
2814
2815 /*
2816 * plan_c: See if we have an acceptable address on the emit
2817 * interface
2818 */
2819#ifdef SCTP_DEBUG
2820 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
2821 if (net) {
2822 printf("Plan C no prefered for Dest:");
2823 sctp_print_address(&net->ro._l_addr.sa);
2824 }
2825 }
2826#endif
2827
2828 LIST_FOREACH(sctp_ifa, &emit_ifn->ifalist, next_ifa) {
2829 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2830 continue;
2831 pass = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2832 if (pass == NULL)
2833 continue;
2834 if (stcb) {
2835 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, pass)) {
2836 /*
2837 * It is restricted for some reason..
2838 * probably not yet added.
2839 */
2840 continue;
2841 }
2842 }
2843 atomic_add_int(&pass->refcount, 1);
2844 return (pass);
2845 }
2846
2847 /*
2848 * plan_d: We are in trouble. No prefered address on the emit
2849 * interface. And not even a perfered address on all interfaces. Go
2850 * out and see if we can find an acceptable address somewhere
2851 * amongst all interfaces.
2852 */
2853 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
2854 if (dest_is_loop == 0 && SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
2855 /* wrong base scope */
2856 continue;
2857 }
2858 if ((sctp_ifn == looked_at) && looked_at)
2859 /* already looked at this guy */
2860 continue;
2861
2862 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
2863 if ((sctp_ifa->localifa_flags & SCTP_ADDR_DEFER_USE) && (non_asoc_addr_ok == 0))
2864 continue;
2865 pass = sctp_is_ifa_addr_acceptable(sctp_ifa, dest_is_loop, dest_is_priv, fam);
2866 if (pass == NULL)
2867 continue;
2868 if (stcb) {
2869 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, pass)) {
2870 /*
2871 * It is restricted for some
2872 * reason.. probably not yet added.
2873 */
2874 continue;
2875 }
2876 }
2877 atomic_add_int(&pass->refcount, 1);
2878 return (pass);
2879 }
2880 }
2881 /*
2882 * Ok we can find NO address to source from that is not on our
2883 * negative list and non_asoc_address is NOT ok, or its on our
2884 * negative list. We cant source to it :-(
2885 */
2886 return (NULL);
2887}
2888
2889
2890
2891/* tcb may be NULL */
2892struct sctp_ifa *
2893sctp_source_address_selection(struct sctp_inpcb *inp,
2894 struct sctp_tcb *stcb,
2895 struct route *ro,
2896 struct sctp_nets *net,
2897 int non_asoc_addr_ok, uint32_t vrf_id)
2898{
2899
2900 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
2901 struct sockaddr_in6 *to6 = (struct sockaddr_in6 *)&ro->ro_dst;
2902 struct sctp_ifa *answer;
2903 uint8_t dest_is_priv, dest_is_loop;
2904 int did_rtalloc = 0;
2905 sa_family_t fam;
2906
2907 /*
2908 * Rules: - Find the route if needed, cache if I can. - Look at
2909 * interface address in route, Is it in the bound list. If so we
2910 * have the best source. - If not we must rotate amongst the
2911 * addresses.
2912 *
2913 * Cavets and issues
2914 *
2915 * Do we need to pay attention to scope. We can have a private address
2916 * or a global address we are sourcing or sending to. So if we draw
2917 * it out zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
2918 * For V4 ------------------------------------------ source *
2919 * dest * result ----------------------------------------- <a>
2920 * Private * Global * NAT
2921 * ----------------------------------------- <b> Private *
2922 * Private * No problem -----------------------------------------
2923 * <c> Global * Private * Huh, How will this work?
2924 * ----------------------------------------- <d> Global *
2925 * Global * No Problem ------------------------------------------
2926 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz For V6
2927 * ------------------------------------------ source * dest *
2928 * result ----------------------------------------- <a> Linklocal *
2929 * Global * ----------------------------------------- <b>
2930 * Linklocal * Linklocal * No problem
2931 * ----------------------------------------- <c> Global *
2932 * Linklocal * Huh, How will this work?
2933 * ----------------------------------------- <d> Global *
2934 * Global * No Problem ------------------------------------------
2935 * zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz
2936 *
2937 * And then we add to that what happens if there are multiple addresses
2938 * assigned to an interface. Remember the ifa on a ifn is a linked
2939 * list of addresses. So one interface can have more than one IP
2940 * address. What happens if we have both a private and a global
2941 * address? Do we then use context of destination to sort out which
2942 * one is best? And what about NAT's sending P->G may get you a NAT
2943 * translation, or should you select the G thats on the interface in
2944 * preference.
2945 *
2946 * Decisions:
2947 *
2948 * - count the number of addresses on the interface. - if its one, no
2949 * problem except case <c>. For <a> we will assume a NAT out there.
2950 * - if there are more than one, then we need to worry about scope P
2951 * or G. We should prefer G -> G and P -> P if possible. Then as a
2952 * secondary fall back to mixed types G->P being a last ditch one. -
2953 * The above all works for bound all, but bound specific we need to
2954 * use the same concept but instead only consider the bound
2955 * addresses. If the bound set is NOT assigned to the interface then
2956 * we must use rotation amongst the bound addresses..
2957 *
2958 */
2959 if (ro->ro_rt == NULL) {
2960 /*
2961 * Need a route to cache.
2962 *
2963 */
2964 rtalloc_ign(ro, 0UL);
2965 did_rtalloc = 1;
2966 }
2967 if (ro->ro_rt == NULL) {
2968 return (NULL);
2969 }
2970 fam = to->sin_family;
2971 dest_is_priv = dest_is_loop = 0;
2972 /* Setup our scopes for the destination */
2973 if (fam == AF_INET) {
2974 /* Scope based on outbound address */
2975 if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
2976 dest_is_priv = 1;
2977 } else if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
2978 dest_is_loop = 1;
2979 if (net != NULL) {
2980 /* mark it as local */
2981 net->addr_is_local = 1;
2982 }
2983 }
2984 } else if (fam == AF_INET6) {
2985 /* Scope based on outbound address */
2986 if (IN6_IS_ADDR_LOOPBACK(&to6->sin6_addr)) {
2987 /*
2988 * If the route goes to the loopback address OR the
2989 * address is a loopback address, we are loopback
2990 * scope. But we don't use dest_is_priv (link local
2991 * addresses).
2992 */
2993 dest_is_loop = 1;
2994 if (net != NULL) {
2995 /* mark it as local */
2996 net->addr_is_local = 1;
2997 }
2998 } else if (IN6_IS_ADDR_LINKLOCAL(&to6->sin6_addr)) {
2999 dest_is_priv = 1;
3000 }
3001 }
3002 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3003 /*
3004 * When bound to all if the address list is set it is a
3005 * negative list. Addresses being added by asconf.
3006 */
3007 answer = sctp_choose_boundall(inp, stcb, net, ro, vrf_id,
3008 dest_is_priv,
3009 dest_is_loop,
3010 non_asoc_addr_ok,
3011 fam);
3012 return (answer);
3013 }
3014 /*
3015 * Three possiblities here:
3016 *
3017 * a) stcb is NULL, which means we operate only from the list of
3018 * addresses (ifa's) bound to the endpoint and we care not about the
3019 * list. b) stcb is NOT-NULL, which means we have an assoc structure
3020 * and auto-asconf is on. This means that the list of addresses is a
3021 * NOT list. We use the list from the inp, but any listed address in
3022 * our list is NOT yet added. However if the non_asoc_addr_ok is set
3023 * we CAN use an address NOT available (i.e. being added). Its a
3024 * negative list. c) stcb is NOT-NULL, which means we have an assoc
3025 * structure and auto-asconf is off. This means that the list of
3026 * addresses is the ONLY addresses I can use.. its positive.
3027 *
3028 * Note we collapse b & c into the same function just like in the v6
3029 * address selection.
3030 */
3031 if (stcb) {
3032 answer = sctp_choose_boundspecific_stcb(inp, stcb, net, ro, vrf_id,
3033 dest_is_priv, dest_is_loop, non_asoc_addr_ok, fam);
3034
3035 } else {
3036 answer = sctp_choose_boundspecific_inp(inp, ro, vrf_id, non_asoc_addr_ok, dest_is_priv, dest_is_loop, fam);
3037
3038 }
3039 return (answer);
3040}
3041
3042static int
1868sctp_find_cmsg(int c_type, void *data, struct mbuf *control, int cpsize)
1869{
1870 struct cmsghdr cmh;
1871 int tlen, at;
1872
1873 tlen = SCTP_BUF_LEN(control);
1874 at = 0;
1875 /*
1876 * Independent of how many mbufs, find the c_type inside the control
1877 * structure and copy out the data.
1878 */
1879 while (at < tlen) {
1880 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
1881 /* not enough room for one more we are done. */
1882 return (0);
1883 }
1884 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
1885 if ((cmh.cmsg_len + at) > tlen) {
1886 /*
1887 * this is real messed up since there is not enough
1888 * data here to cover the cmsg header. We are done.
1889 */
1890 return (0);
1891 }
1892 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
1893 (c_type == cmh.cmsg_type)) {
1894 /* found the one we want, copy it out */
1895 at += CMSG_ALIGN(sizeof(struct cmsghdr));
1896 if ((int)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) {
1897 /*
1898 * space of cmsg_len after header not big
1899 * enough
1900 */
1901 return (0);
1902 }
1903 m_copydata(control, at, cpsize, data);
1904 return (1);
1905 } else {
1906 at += CMSG_ALIGN(cmh.cmsg_len);
1907 if (cmh.cmsg_len == 0) {
1908 break;
1909 }
1910 }
1911 }
1912 /* not found */
1913 return (0);
1914}
1915
1916
3043sctp_find_cmsg(int c_type, void *data, struct mbuf *control, int cpsize)
3044{
3045 struct cmsghdr cmh;
3046 int tlen, at;
3047
3048 tlen = SCTP_BUF_LEN(control);
3049 at = 0;
3050 /*
3051 * Independent of how many mbufs, find the c_type inside the control
3052 * structure and copy out the data.
3053 */
3054 while (at < tlen) {
3055 if ((tlen - at) < (int)CMSG_ALIGN(sizeof(cmh))) {
3056 /* not enough room for one more we are done. */
3057 return (0);
3058 }
3059 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
3060 if ((cmh.cmsg_len + at) > tlen) {
3061 /*
3062 * this is real messed up since there is not enough
3063 * data here to cover the cmsg header. We are done.
3064 */
3065 return (0);
3066 }
3067 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
3068 (c_type == cmh.cmsg_type)) {
3069 /* found the one we want, copy it out */
3070 at += CMSG_ALIGN(sizeof(struct cmsghdr));
3071 if ((int)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) {
3072 /*
3073 * space of cmsg_len after header not big
3074 * enough
3075 */
3076 return (0);
3077 }
3078 m_copydata(control, at, cpsize, data);
3079 return (1);
3080 } else {
3081 at += CMSG_ALIGN(cmh.cmsg_len);
3082 if (cmh.cmsg_len == 0) {
3083 break;
3084 }
3085 }
3086 }
3087 /* not found */
3088 return (0);
3089}
3090
3091
1917extern int sctp_mbuf_threshold_count;
1918
1919
1920__inline struct mbuf *
3092struct mbuf *
1921sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
1922 int how, int allonebuf, int type)
1923{
1924 struct mbuf *m = NULL;
1925 int aloc_size;
1926 int index = 0;
1927 int mbuf_threshold;
1928
1929 if (want_header) {
1930 MGETHDR(m, how, type);
1931 } else {
1932 MGET(m, how, type);
1933 }
1934 if (m == NULL) {
1935 return (NULL);
1936 }
1937 if (allonebuf == 0)
1938 mbuf_threshold = sctp_mbuf_threshold_count;
1939 else
1940 mbuf_threshold = 1;
1941
1942
1943 if (space_needed > (((mbuf_threshold - 1) * MLEN) + MHLEN)) {
1944try_again:
1945 index = 4;
1946 if (space_needed <= MCLBYTES) {
1947 aloc_size = MCLBYTES;
1948 } else if (space_needed <= MJUMPAGESIZE) {
1949 aloc_size = MJUMPAGESIZE;
1950 index = 5;
1951 } else if (space_needed <= MJUM9BYTES) {
1952 aloc_size = MJUM9BYTES;
1953 index = 6;
1954 } else {
1955 aloc_size = MJUM16BYTES;
1956 index = 7;
1957 }
1958 m_cljget(m, how, aloc_size);
1959 if (m == NULL) {
1960 return (NULL);
1961 }
1962 if (SCTP_BUF_IS_EXTENDED(m) == 0) {
1963 if ((aloc_size != MCLBYTES) &&
1964 (allonebuf == 0)) {
1965 aloc_size -= 10;
1966 goto try_again;
1967 }
1968 sctp_m_freem(m);
1969 return (NULL);
1970 }
1971 }
1972 SCTP_BUF_LEN(m) = 0;
1973 SCTP_BUF_NEXT(m) = SCTP_BUF_NEXT_PKT(m) = NULL;
1974#ifdef SCTP_MBUF_LOGGING
1975 if (SCTP_BUF_IS_EXTENDED(m)) {
1976 sctp_log_mb(m, SCTP_MBUF_IALLOC);
1977 }
1978#endif
1979 return (m);
1980}
1981
1982
1983static struct mbuf *
1984sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
1985 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in)
1986{
1987 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
1988 struct sctp_state_cookie *stc;
1989 struct sctp_paramhdr *ph;
1990 uint8_t *signature;
1991 int sig_offset;
1992 uint16_t cookie_sz;
1993
1994 mret = NULL;
1995
1996
1997 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
1998 sizeof(struct sctp_paramhdr)), 0, M_DONTWAIT, 1, MT_DATA);
1999 if (mret == NULL) {
2000 return (NULL);
2001 }
2002 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT);
2003 if (copy_init == NULL) {
2004 sctp_m_freem(mret);
2005 return (NULL);
2006 }
2007 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
2008 M_DONTWAIT);
2009 if (copy_initack == NULL) {
2010 sctp_m_freem(mret);
2011 sctp_m_freem(copy_init);
2012 return (NULL);
2013 }
2014 /* easy side we just drop it on the end */
2015 ph = mtod(mret, struct sctp_paramhdr *);
2016 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
2017 sizeof(struct sctp_paramhdr);
2018 stc = (struct sctp_state_cookie *)((caddr_t)ph +
2019 sizeof(struct sctp_paramhdr));
2020 ph->param_type = htons(SCTP_STATE_COOKIE);
2021 ph->param_length = 0; /* fill in at the end */
2022 /* Fill in the stc cookie data */
2023 *stc = *stc_in;
2024
2025 /* tack the INIT and then the INIT-ACK onto the chain */
2026 cookie_sz = 0;
2027 m_at = mret;
2028 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2029 cookie_sz += SCTP_BUF_LEN(m_at);
2030 if (SCTP_BUF_NEXT(m_at) == NULL) {
2031 SCTP_BUF_NEXT(m_at) = copy_init;
2032 break;
2033 }
2034 }
2035
2036 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2037 cookie_sz += SCTP_BUF_LEN(m_at);
2038 if (SCTP_BUF_NEXT(m_at) == NULL) {
2039 SCTP_BUF_NEXT(m_at) = copy_initack;
2040 break;
2041 }
2042 }
2043
2044 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2045 cookie_sz += SCTP_BUF_LEN(m_at);
2046 if (SCTP_BUF_NEXT(m_at) == NULL) {
2047 break;
2048 }
2049 }
2050 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA);
2051 if (sig == NULL) {
2052 /* no space, so free the entire chain */
2053 sctp_m_freem(mret);
2054 return (NULL);
2055 }
2056 SCTP_BUF_LEN(sig) = 0;
2057 SCTP_BUF_NEXT(m_at) = sig;
2058 sig_offset = 0;
2059 signature = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
2060 /* Time to sign the cookie */
2061 sctp_hmac_m(SCTP_HMAC,
2062 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
2063 SCTP_SECRET_SIZE, mret, sizeof(struct sctp_paramhdr),
2064 (uint8_t *) signature);
2065 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
2066 cookie_sz += SCTP_SIGNATURE_SIZE;
2067
2068 ph->param_length = htons(cookie_sz);
2069 return (mret);
2070}
2071
2072
2073static __inline uint8_t
2074sctp_get_ect(struct sctp_tcb *stcb,
2075 struct sctp_tmit_chunk *chk)
2076{
2077 uint8_t this_random;
2078
2079 /* Huh? */
2080 if (sctp_ecn_enable == 0)
2081 return (0);
2082
2083 if (sctp_ecn_nonce == 0)
2084 /* no nonce, always return ECT0 */
2085 return (SCTP_ECT0_BIT);
2086
2087 if (stcb->asoc.peer_supports_ecn_nonce == 0) {
2088 /* Peer does NOT support it, so we send a ECT0 only */
2089 return (SCTP_ECT0_BIT);
2090 }
2091 if (chk == NULL)
2092 return (SCTP_ECT0_BIT);
2093
2094 if (((stcb->asoc.hb_random_idx == 3) &&
2095 (stcb->asoc.hb_ect_randombit > 7)) ||
2096 (stcb->asoc.hb_random_idx > 3)) {
2097 uint32_t rndval;
2098
2099 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
2100 memcpy(stcb->asoc.hb_random_values, &rndval,
2101 sizeof(stcb->asoc.hb_random_values));
2102 this_random = stcb->asoc.hb_random_values[0];
2103 stcb->asoc.hb_random_idx = 0;
2104 stcb->asoc.hb_ect_randombit = 0;
2105 } else {
2106 if (stcb->asoc.hb_ect_randombit > 7) {
2107 stcb->asoc.hb_ect_randombit = 0;
2108 stcb->asoc.hb_random_idx++;
2109 }
2110 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2111 }
2112 if ((this_random >> stcb->asoc.hb_ect_randombit) & 0x01) {
2113 if (chk != NULL)
2114 /* ECN Nonce stuff */
2115 chk->rec.data.ect_nonce = SCTP_ECT1_BIT;
2116 stcb->asoc.hb_ect_randombit++;
2117 return (SCTP_ECT1_BIT);
2118 } else {
2119 stcb->asoc.hb_ect_randombit++;
2120 return (SCTP_ECT0_BIT);
2121 }
2122}
2123
3093sctp_get_mbuf_for_msg(unsigned int space_needed, int want_header,
3094 int how, int allonebuf, int type)
3095{
3096 struct mbuf *m = NULL;
3097 int aloc_size;
3098 int index = 0;
3099 int mbuf_threshold;
3100
3101 if (want_header) {
3102 MGETHDR(m, how, type);
3103 } else {
3104 MGET(m, how, type);
3105 }
3106 if (m == NULL) {
3107 return (NULL);
3108 }
3109 if (allonebuf == 0)
3110 mbuf_threshold = sctp_mbuf_threshold_count;
3111 else
3112 mbuf_threshold = 1;
3113
3114
3115 if (space_needed > (((mbuf_threshold - 1) * MLEN) + MHLEN)) {
3116try_again:
3117 index = 4;
3118 if (space_needed <= MCLBYTES) {
3119 aloc_size = MCLBYTES;
3120 } else if (space_needed <= MJUMPAGESIZE) {
3121 aloc_size = MJUMPAGESIZE;
3122 index = 5;
3123 } else if (space_needed <= MJUM9BYTES) {
3124 aloc_size = MJUM9BYTES;
3125 index = 6;
3126 } else {
3127 aloc_size = MJUM16BYTES;
3128 index = 7;
3129 }
3130 m_cljget(m, how, aloc_size);
3131 if (m == NULL) {
3132 return (NULL);
3133 }
3134 if (SCTP_BUF_IS_EXTENDED(m) == 0) {
3135 if ((aloc_size != MCLBYTES) &&
3136 (allonebuf == 0)) {
3137 aloc_size -= 10;
3138 goto try_again;
3139 }
3140 sctp_m_freem(m);
3141 return (NULL);
3142 }
3143 }
3144 SCTP_BUF_LEN(m) = 0;
3145 SCTP_BUF_NEXT(m) = SCTP_BUF_NEXT_PKT(m) = NULL;
3146#ifdef SCTP_MBUF_LOGGING
3147 if (SCTP_BUF_IS_EXTENDED(m)) {
3148 sctp_log_mb(m, SCTP_MBUF_IALLOC);
3149 }
3150#endif
3151 return (m);
3152}
3153
3154
3155static struct mbuf *
3156sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
3157 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in)
3158{
3159 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
3160 struct sctp_state_cookie *stc;
3161 struct sctp_paramhdr *ph;
3162 uint8_t *signature;
3163 int sig_offset;
3164 uint16_t cookie_sz;
3165
3166 mret = NULL;
3167
3168
3169 mret = sctp_get_mbuf_for_msg((sizeof(struct sctp_state_cookie) +
3170 sizeof(struct sctp_paramhdr)), 0, M_DONTWAIT, 1, MT_DATA);
3171 if (mret == NULL) {
3172 return (NULL);
3173 }
3174 copy_init = SCTP_M_COPYM(init, init_offset, M_COPYALL, M_DONTWAIT);
3175 if (copy_init == NULL) {
3176 sctp_m_freem(mret);
3177 return (NULL);
3178 }
3179 copy_initack = SCTP_M_COPYM(initack, initack_offset, M_COPYALL,
3180 M_DONTWAIT);
3181 if (copy_initack == NULL) {
3182 sctp_m_freem(mret);
3183 sctp_m_freem(copy_init);
3184 return (NULL);
3185 }
3186 /* easy side we just drop it on the end */
3187 ph = mtod(mret, struct sctp_paramhdr *);
3188 SCTP_BUF_LEN(mret) = sizeof(struct sctp_state_cookie) +
3189 sizeof(struct sctp_paramhdr);
3190 stc = (struct sctp_state_cookie *)((caddr_t)ph +
3191 sizeof(struct sctp_paramhdr));
3192 ph->param_type = htons(SCTP_STATE_COOKIE);
3193 ph->param_length = 0; /* fill in at the end */
3194 /* Fill in the stc cookie data */
3195 *stc = *stc_in;
3196
3197 /* tack the INIT and then the INIT-ACK onto the chain */
3198 cookie_sz = 0;
3199 m_at = mret;
3200 for (m_at = mret; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3201 cookie_sz += SCTP_BUF_LEN(m_at);
3202 if (SCTP_BUF_NEXT(m_at) == NULL) {
3203 SCTP_BUF_NEXT(m_at) = copy_init;
3204 break;
3205 }
3206 }
3207
3208 for (m_at = copy_init; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3209 cookie_sz += SCTP_BUF_LEN(m_at);
3210 if (SCTP_BUF_NEXT(m_at) == NULL) {
3211 SCTP_BUF_NEXT(m_at) = copy_initack;
3212 break;
3213 }
3214 }
3215
3216 for (m_at = copy_initack; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
3217 cookie_sz += SCTP_BUF_LEN(m_at);
3218 if (SCTP_BUF_NEXT(m_at) == NULL) {
3219 break;
3220 }
3221 }
3222 sig = sctp_get_mbuf_for_msg(SCTP_SECRET_SIZE, 0, M_DONTWAIT, 1, MT_DATA);
3223 if (sig == NULL) {
3224 /* no space, so free the entire chain */
3225 sctp_m_freem(mret);
3226 return (NULL);
3227 }
3228 SCTP_BUF_LEN(sig) = 0;
3229 SCTP_BUF_NEXT(m_at) = sig;
3230 sig_offset = 0;
3231 signature = (uint8_t *) (mtod(sig, caddr_t)+sig_offset);
3232 /* Time to sign the cookie */
3233 sctp_hmac_m(SCTP_HMAC,
3234 (uint8_t *) inp->sctp_ep.secret_key[(int)(inp->sctp_ep.current_secret_number)],
3235 SCTP_SECRET_SIZE, mret, sizeof(struct sctp_paramhdr),
3236 (uint8_t *) signature);
3237 SCTP_BUF_LEN(sig) += SCTP_SIGNATURE_SIZE;
3238 cookie_sz += SCTP_SIGNATURE_SIZE;
3239
3240 ph->param_length = htons(cookie_sz);
3241 return (mret);
3242}
3243
3244
3245static __inline uint8_t
3246sctp_get_ect(struct sctp_tcb *stcb,
3247 struct sctp_tmit_chunk *chk)
3248{
3249 uint8_t this_random;
3250
3251 /* Huh? */
3252 if (sctp_ecn_enable == 0)
3253 return (0);
3254
3255 if (sctp_ecn_nonce == 0)
3256 /* no nonce, always return ECT0 */
3257 return (SCTP_ECT0_BIT);
3258
3259 if (stcb->asoc.peer_supports_ecn_nonce == 0) {
3260 /* Peer does NOT support it, so we send a ECT0 only */
3261 return (SCTP_ECT0_BIT);
3262 }
3263 if (chk == NULL)
3264 return (SCTP_ECT0_BIT);
3265
3266 if (((stcb->asoc.hb_random_idx == 3) &&
3267 (stcb->asoc.hb_ect_randombit > 7)) ||
3268 (stcb->asoc.hb_random_idx > 3)) {
3269 uint32_t rndval;
3270
3271 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
3272 memcpy(stcb->asoc.hb_random_values, &rndval,
3273 sizeof(stcb->asoc.hb_random_values));
3274 this_random = stcb->asoc.hb_random_values[0];
3275 stcb->asoc.hb_random_idx = 0;
3276 stcb->asoc.hb_ect_randombit = 0;
3277 } else {
3278 if (stcb->asoc.hb_ect_randombit > 7) {
3279 stcb->asoc.hb_ect_randombit = 0;
3280 stcb->asoc.hb_random_idx++;
3281 }
3282 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
3283 }
3284 if ((this_random >> stcb->asoc.hb_ect_randombit) & 0x01) {
3285 if (chk != NULL)
3286 /* ECN Nonce stuff */
3287 chk->rec.data.ect_nonce = SCTP_ECT1_BIT;
3288 stcb->asoc.hb_ect_randombit++;
3289 return (SCTP_ECT1_BIT);
3290 } else {
3291 stcb->asoc.hb_ect_randombit++;
3292 return (SCTP_ECT0_BIT);
3293 }
3294}
3295
2124extern int sctp_no_csum_on_loopback;
2125
2126static int
2127sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
2128 struct sctp_tcb *stcb, /* may be NULL */
2129 struct sctp_nets *net,
2130 struct sockaddr *to,
2131 struct mbuf *m,
2132 uint32_t auth_offset,
2133 struct sctp_auth_chunk *auth,
2134 int nofragment_flag,
2135 int ecn_ok,
2136 struct sctp_tmit_chunk *chk,
2137 int out_of_asoc_ok)
2138/* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
2139{
2140 /*
2141 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet
2142 * header WITH a SCTPHDR but no IP header, endpoint inp and sa
2143 * structure. - fill in the HMAC digest of any AUTH chunk in the
2144 * packet - calculate SCTP checksum and fill in - prepend a IP
2145 * address header - if boundall use INADDR_ANY - if boundspecific do
2146 * source address selection - set fragmentation option for ipV4 - On
2147 * return from IP output, check/adjust mtu size - of output
2148 * interface and smallest_mtu size as well.
2149 */
2150 /* Will need ifdefs around this */
2151 struct mbuf *o_pak;
2152
2153 struct sctphdr *sctphdr;
2154 int packet_length;
2155 int o_flgs;
2156 uint32_t csum;
2157 int ret;
2158 unsigned int have_mtu;
3296static int
3297sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
3298 struct sctp_tcb *stcb, /* may be NULL */
3299 struct sctp_nets *net,
3300 struct sockaddr *to,
3301 struct mbuf *m,
3302 uint32_t auth_offset,
3303 struct sctp_auth_chunk *auth,
3304 int nofragment_flag,
3305 int ecn_ok,
3306 struct sctp_tmit_chunk *chk,
3307 int out_of_asoc_ok)
3308/* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
3309{
3310 /*
3311 * Given a mbuf chain (via SCTP_BUF_NEXT()) that holds a packet
3312 * header WITH a SCTPHDR but no IP header, endpoint inp and sa
3313 * structure. - fill in the HMAC digest of any AUTH chunk in the
3314 * packet - calculate SCTP checksum and fill in - prepend a IP
3315 * address header - if boundall use INADDR_ANY - if boundspecific do
3316 * source address selection - set fragmentation option for ipV4 - On
3317 * return from IP output, check/adjust mtu size - of output
3318 * interface and smallest_mtu size as well.
3319 */
3320 /* Will need ifdefs around this */
3321 struct mbuf *o_pak;
3322
3323 struct sctphdr *sctphdr;
3324 int packet_length;
3325 int o_flgs;
3326 uint32_t csum;
3327 int ret;
3328 unsigned int have_mtu;
3329 uint32_t vrf_id;
2159 struct route *ro;
2160
3330 struct route *ro;
3331
3332
2161 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
2162 sctp_m_freem(m);
2163 return (EFAULT);
2164 }
3333 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
3334 sctp_m_freem(m);
3335 return (EFAULT);
3336 }
3337 if (stcb == NULL) {
3338 vrf_id = SCTP_DEFAULT_VRFID;
3339 } else {
3340 vrf_id = stcb->asoc.vrf_id;
3341 }
3342
2165 /* fill in the HMAC digest for any AUTH chunk in the packet */
2166 if ((auth != NULL) && (stcb != NULL)) {
2167 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb);
2168 }
2169 /* Calculate the csum and fill in the length of the packet */
2170 sctphdr = mtod(m, struct sctphdr *);
2171 have_mtu = 0;
2172 if (sctp_no_csum_on_loopback &&
2173 (stcb) &&
2174 (stcb->asoc.loopback_scope)) {
2175 sctphdr->checksum = 0;
2176 /*
2177 * This can probably now be taken out since my audit shows
2178 * no more bad pktlen's coming in. But we will wait a while
2179 * yet.
2180 */
2181 packet_length = sctp_calculate_len(m);
2182 } else {
2183 sctphdr->checksum = 0;
2184 csum = sctp_calculate_sum(m, &packet_length, 0);
2185 sctphdr->checksum = csum;
2186 }
2187
2188 if (to->sa_family == AF_INET) {
3343 /* fill in the HMAC digest for any AUTH chunk in the packet */
3344 if ((auth != NULL) && (stcb != NULL)) {
3345 sctp_fill_hmac_digest_m(m, auth_offset, auth, stcb);
3346 }
3347 /* Calculate the csum and fill in the length of the packet */
3348 sctphdr = mtod(m, struct sctphdr *);
3349 have_mtu = 0;
3350 if (sctp_no_csum_on_loopback &&
3351 (stcb) &&
3352 (stcb->asoc.loopback_scope)) {
3353 sctphdr->checksum = 0;
3354 /*
3355 * This can probably now be taken out since my audit shows
3356 * no more bad pktlen's coming in. But we will wait a while
3357 * yet.
3358 */
3359 packet_length = sctp_calculate_len(m);
3360 } else {
3361 sctphdr->checksum = 0;
3362 csum = sctp_calculate_sum(m, &packet_length, 0);
3363 sctphdr->checksum = csum;
3364 }
3365
3366 if (to->sa_family == AF_INET) {
2189 struct ip *ip;
3367 struct ip *ip = NULL;
2190 struct route iproute;
2191 uint8_t tos_value;
2192
2193 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip));
2194 if (o_pak == NULL) {
2195 /* failed to prepend data, give up */
2196 sctp_m_freem(m);
2197 return (ENOMEM);
2198 }
2199 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip);
2200 packet_length += sizeof(struct ip);
2201 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
2202 ip = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip *);
2203 ip->ip_v = IPVERSION;
2204 ip->ip_hl = (sizeof(struct ip) >> 2);
2205 if (net) {
2206 tos_value = net->tos_flowlabel & 0x000000ff;
2207 } else {
2208 tos_value = inp->ip_inp.inp.inp_ip_tos;
2209 }
2210 if (nofragment_flag) {
2211#if defined(WITH_CONVERT_IP_OFF) || defined(__FreeBSD__) || defined(__APPLE__)
2212 ip->ip_off = IP_DF;
2213#else
2214 ip->ip_off = htons(IP_DF);
2215#endif
2216 } else
2217 ip->ip_off = 0;
2218
2219
2220 /* FreeBSD has a function for ip_id's */
2221 ip->ip_id = ip_newid();
2222
2223 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
2224 ip->ip_len = SCTP_HEADER_LEN(o_pak);
2225 if (stcb) {
2226 if ((stcb->asoc.ecn_allowed) && ecn_ok) {
2227 /* Enable ECN */
2228 ip->ip_tos = ((u_char)(tos_value & 0xfc) | sctp_get_ect(stcb, chk));
2229 } else {
2230 /* No ECN */
2231 ip->ip_tos = (u_char)(tos_value & 0xfc);
2232 }
2233 } else {
2234 /* no association at all */
2235 ip->ip_tos = (tos_value & 0xfc);
2236 }
2237 ip->ip_p = IPPROTO_SCTP;
2238 ip->ip_sum = 0;
2239 if (net == NULL) {
2240 ro = &iproute;
2241 memset(&iproute, 0, sizeof(iproute));
2242 memcpy(&ro->ro_dst, to, to->sa_len);
2243 } else {
2244 ro = (struct route *)&net->ro;
2245 }
2246 /* Now the address selection part */
2247 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
2248
2249 /* call the routine to select the src address */
2250 if (net) {
2251 if (net->src_addr_selected == 0) {
2252 /* Cache the source address */
3368 struct route iproute;
3369 uint8_t tos_value;
3370
3371 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip));
3372 if (o_pak == NULL) {
3373 /* failed to prepend data, give up */
3374 sctp_m_freem(m);
3375 return (ENOMEM);
3376 }
3377 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip);
3378 packet_length += sizeof(struct ip);
3379 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
3380 ip = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip *);
3381 ip->ip_v = IPVERSION;
3382 ip->ip_hl = (sizeof(struct ip) >> 2);
3383 if (net) {
3384 tos_value = net->tos_flowlabel & 0x000000ff;
3385 } else {
3386 tos_value = inp->ip_inp.inp.inp_ip_tos;
3387 }
3388 if (nofragment_flag) {
3389#if defined(WITH_CONVERT_IP_OFF) || defined(__FreeBSD__) || defined(__APPLE__)
3390 ip->ip_off = IP_DF;
3391#else
3392 ip->ip_off = htons(IP_DF);
3393#endif
3394 } else
3395 ip->ip_off = 0;
3396
3397
3398 /* FreeBSD has a function for ip_id's */
3399 ip->ip_id = ip_newid();
3400
3401 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
3402 ip->ip_len = SCTP_HEADER_LEN(o_pak);
3403 if (stcb) {
3404 if ((stcb->asoc.ecn_allowed) && ecn_ok) {
3405 /* Enable ECN */
3406 ip->ip_tos = ((u_char)(tos_value & 0xfc) | sctp_get_ect(stcb, chk));
3407 } else {
3408 /* No ECN */
3409 ip->ip_tos = (u_char)(tos_value & 0xfc);
3410 }
3411 } else {
3412 /* no association at all */
3413 ip->ip_tos = (tos_value & 0xfc);
3414 }
3415 ip->ip_p = IPPROTO_SCTP;
3416 ip->ip_sum = 0;
3417 if (net == NULL) {
3418 ro = &iproute;
3419 memset(&iproute, 0, sizeof(iproute));
3420 memcpy(&ro->ro_dst, to, to->sa_len);
3421 } else {
3422 ro = (struct route *)&net->ro;
3423 }
3424 /* Now the address selection part */
3425 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
3426
3427 /* call the routine to select the src address */
3428 if (net) {
3429 if (net->src_addr_selected == 0) {
3430 /* Cache the source address */
2253 ((struct sockaddr_in *)&net->ro._s_addr)->sin_addr = sctp_ipv4_source_address_selection(inp,
2254 stcb,
2255 ro, net, out_of_asoc_ok);
2256 if (ro->ro_rt)
2257 net->src_addr_selected = 1;
3431 net->ro._s_addr = sctp_source_address_selection(inp, stcb,
3432 ro, net, out_of_asoc_ok, vrf_id);
3433 if (net->ro._s_addr == NULL) {
3434 /* No route to host */
3435 goto no_route;
3436 }
3437 net->src_addr_selected = 1;
2258 }
3438 }
2259 ip->ip_src = ((struct sockaddr_in *)&net->ro._s_addr)->sin_addr;
3439 ip->ip_src = net->ro._s_addr->address.sin.sin_addr;
2260 } else {
3440 } else {
2261 ip->ip_src = sctp_ipv4_source_address_selection(inp,
2262 stcb, ro, net, out_of_asoc_ok);
3441 struct sctp_ifa *_lsrc;
3442
3443 _lsrc = sctp_source_address_selection(inp,
3444 stcb, ro, net, out_of_asoc_ok, vrf_id);
3445 if (_lsrc == NULL) {
3446 goto no_route;
3447 }
3448 ip->ip_src = _lsrc->address.sin.sin_addr;
3449 sctp_free_ifa(_lsrc);
2263 }
2264
2265 /*
2266 * If source address selection fails and we find no route
2267 * then the ip_output should fail as well with a
2268 * NO_ROUTE_TO_HOST type error. We probably should catch
2269 * that somewhere and abort the association right away
2270 * (assuming this is an INIT being sent).
2271 */
2272 if ((ro->ro_rt == NULL)) {
2273 /*
2274 * src addr selection failed to find a route (or
2275 * valid source addr), so we can't get there from
3450 }
3451
3452 /*
3453 * If source address selection fails and we find no route
3454 * then the ip_output should fail as well with a
3455 * NO_ROUTE_TO_HOST type error. We probably should catch
3456 * that somewhere and abort the association right away
3457 * (assuming this is an INIT being sent).
3458 */
3459 if ((ro->ro_rt == NULL)) {
3460 /*
3461 * src addr selection failed to find a route (or
3462 * valid source addr), so we can't get there from
2276 * here!
3463 * here (yet)!
2277 */
3464 */
3465 no_route:
2278#ifdef SCTP_DEBUG
2279 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
3466#ifdef SCTP_DEBUG
3467 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2280 printf("low_level_output: dropped v4 packet- no valid source addr\n");
2281 printf("Destination was %x\n", (uint32_t) (ntohl(ip->ip_dst.s_addr)));
3468 printf("low_level_output: dropped packet - no valid source addr\n");
3469 if (net) {
3470 printf("Destination was ");
3471 sctp_print_address(&net->ro._l_addr.sa);
3472 }
2282 }
2283#endif /* SCTP_DEBUG */
2284 if (net) {
3473 }
3474#endif /* SCTP_DEBUG */
3475 if (net) {
2285 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb)
2286 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
2287 stcb,
2288 SCTP_FAILED_THRESHOLD,
2289 (void *)net);
2290 net->dest_state &= ~SCTP_ADDR_REACHABLE;
2291 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
3476 if (net->dest_state & SCTP_ADDR_CONFIRMED) {
3477 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb) {
3478 printf("no route takes interface %p down\n", net);
3479 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
3480 stcb,
3481 SCTP_FAILED_THRESHOLD,
3482 (void *)net);
3483 net->dest_state &= ~SCTP_ADDR_REACHABLE;
3484 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
3485 }
3486 }
2292 if (stcb) {
2293 if (net == stcb->asoc.primary_destination) {
2294 /* need a new primary */
2295 struct sctp_nets *alt;
2296
2297 alt = sctp_find_alternate_net(stcb, net, 0);
2298 if (alt != net) {
2299 if (sctp_set_primary_addr(stcb,
2300 (struct sockaddr *)NULL,
2301 alt) == 0) {
2302 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
3487 if (stcb) {
3488 if (net == stcb->asoc.primary_destination) {
3489 /* need a new primary */
3490 struct sctp_nets *alt;
3491
3492 alt = sctp_find_alternate_net(stcb, net, 0);
3493 if (alt != net) {
3494 if (sctp_set_primary_addr(stcb,
3495 (struct sockaddr *)NULL,
3496 alt) == 0) {
3497 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
3498 if (net->ro._s_addr) {
3499 sctp_free_ifa(net->ro._s_addr);
3500 net->ro._s_addr = NULL;
3501 }
2303 net->src_addr_selected = 0;
2304 }
2305 }
2306 }
2307 }
2308 }
2309 sctp_m_freem(o_pak);
2310 return (EHOSTUNREACH);
2311 } else {
2312 have_mtu = ro->ro_rt->rt_ifp->if_mtu;
2313 }
2314 if (inp->sctp_socket) {
2315 o_flgs = (IP_RAWOUTPUT | (inp->sctp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST)));
2316 } else {
2317 o_flgs = IP_RAWOUTPUT;
2318 }
2319#ifdef SCTP_DEBUG
2320 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
2321 printf("Calling ipv4 output routine from low level src addr:%x\n",
2322 (uint32_t) (ntohl(ip->ip_src.s_addr)));
2323 printf("Destination is %x\n", (uint32_t) (ntohl(ip->ip_dst.s_addr)));
2324 printf("RTP route is %p through\n", ro->ro_rt);
2325 }
2326#endif
2327
2328 if ((have_mtu) && (net) && (have_mtu > net->mtu)) {
2329 ro->ro_rt->rt_ifp->if_mtu = net->mtu;
2330 }
2331 if (ro != &iproute) {
2332 memcpy(&iproute, ro, sizeof(*ro));
2333 }
2334 ret = ip_output(o_pak, inp->ip_inp.inp.inp_options,
2335 ro, o_flgs, inp->ip_inp.inp.inp_moptions
2336 ,(struct inpcb *)NULL
2337 );
2338 if ((ro->ro_rt) && (have_mtu) && (net) && (have_mtu > net->mtu)) {
2339 ro->ro_rt->rt_ifp->if_mtu = have_mtu;
2340 }
2341 SCTP_STAT_INCR(sctps_sendpackets);
2342 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
2343 if (ret)
2344 SCTP_STAT_INCR(sctps_senderrors);
2345#ifdef SCTP_DEBUG
2346 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
2347 printf("Ip output returns %d\n", ret);
2348 }
2349#endif
2350 if (net == NULL) {
2351 /* free tempy routes */
3502 net->src_addr_selected = 0;
3503 }
3504 }
3505 }
3506 }
3507 }
3508 sctp_m_freem(o_pak);
3509 return (EHOSTUNREACH);
3510 } else {
3511 have_mtu = ro->ro_rt->rt_ifp->if_mtu;
3512 }
3513 if (inp->sctp_socket) {
3514 o_flgs = (IP_RAWOUTPUT | (inp->sctp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST)));
3515 } else {
3516 o_flgs = IP_RAWOUTPUT;
3517 }
3518#ifdef SCTP_DEBUG
3519 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
3520 printf("Calling ipv4 output routine from low level src addr:%x\n",
3521 (uint32_t) (ntohl(ip->ip_src.s_addr)));
3522 printf("Destination is %x\n", (uint32_t) (ntohl(ip->ip_dst.s_addr)));
3523 printf("RTP route is %p through\n", ro->ro_rt);
3524 }
3525#endif
3526
3527 if ((have_mtu) && (net) && (have_mtu > net->mtu)) {
3528 ro->ro_rt->rt_ifp->if_mtu = net->mtu;
3529 }
3530 if (ro != &iproute) {
3531 memcpy(&iproute, ro, sizeof(*ro));
3532 }
3533 ret = ip_output(o_pak, inp->ip_inp.inp.inp_options,
3534 ro, o_flgs, inp->ip_inp.inp.inp_moptions
3535 ,(struct inpcb *)NULL
3536 );
3537 if ((ro->ro_rt) && (have_mtu) && (net) && (have_mtu > net->mtu)) {
3538 ro->ro_rt->rt_ifp->if_mtu = have_mtu;
3539 }
3540 SCTP_STAT_INCR(sctps_sendpackets);
3541 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
3542 if (ret)
3543 SCTP_STAT_INCR(sctps_senderrors);
3544#ifdef SCTP_DEBUG
3545 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
3546 printf("Ip output returns %d\n", ret);
3547 }
3548#endif
3549 if (net == NULL) {
3550 /* free tempy routes */
2352 if (ro->ro_rt)
3551 if (ro->ro_rt) {
2353 RTFREE(ro->ro_rt);
3552 RTFREE(ro->ro_rt);
3553 ro->ro_rt = NULL;
3554 }
2354 } else {
2355 /* PMTU check versus smallest asoc MTU goes here */
2356 if (ro->ro_rt != NULL) {
2357 if (ro->ro_rt->rt_rmx.rmx_mtu &&
2358 (stcb->asoc.smallest_mtu > ro->ro_rt->rt_rmx.rmx_mtu)) {
2359 sctp_mtu_size_reset(inp, &stcb->asoc,
2360 ro->ro_rt->rt_rmx.rmx_mtu);
2361 }
2362 } else {
2363 /* route was freed */
3555 } else {
3556 /* PMTU check versus smallest asoc MTU goes here */
3557 if (ro->ro_rt != NULL) {
3558 if (ro->ro_rt->rt_rmx.rmx_mtu &&
3559 (stcb->asoc.smallest_mtu > ro->ro_rt->rt_rmx.rmx_mtu)) {
3560 sctp_mtu_size_reset(inp, &stcb->asoc,
3561 ro->ro_rt->rt_rmx.rmx_mtu);
3562 }
3563 } else {
3564 /* route was freed */
3565 if (net->ro._s_addr &&
3566 net->src_addr_selected) {
3567 sctp_free_ifa(net->ro._s_addr);
3568 net->ro._s_addr = NULL;
3569 }
2364 net->src_addr_selected = 0;
2365 }
2366 }
2367 return (ret);
2368 }
2369#ifdef INET6
2370 else if (to->sa_family == AF_INET6) {
2371 uint32_t flowlabel;
2372 struct ip6_hdr *ip6h;
2373
2374 struct route_in6 ip6route;
2375 struct ifnet *ifp;
2376 u_char flowTop;
2377 uint16_t flowBottom;
2378 u_char tosBottom, tosTop;
2379 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
2380 struct sockaddr_in6 lsa6_storage;
2381 int prev_scope = 0;
2382 int error;
2383 u_short prev_port = 0;
2384
2385 if (net != NULL) {
2386 flowlabel = net->tos_flowlabel;
2387 } else {
2388 flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
2389 }
2390 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip6_hdr));
2391 if (o_pak == NULL) {
2392 /* failed to prepend data, give up */
2393 sctp_m_freem(m);
2394 return (ENOMEM);
2395 }
2396 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip6_hdr);
2397 packet_length += sizeof(struct ip6_hdr);
2398 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
2399 ip6h = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip6_hdr *);
2400 /*
2401 * We assume here that inp_flow is in host byte order within
2402 * the TCB!
2403 */
2404 flowBottom = flowlabel & 0x0000ffff;
2405 flowTop = ((flowlabel & 0x000f0000) >> 16);
2406 tosTop = (((flowlabel & 0xf0) >> 4) | IPV6_VERSION);
2407 /* protect *sin6 from overwrite */
2408 sin6 = (struct sockaddr_in6 *)to;
2409 tmp = *sin6;
2410 sin6 = &tmp;
2411
2412 /* KAME hack: embed scopeid */
2413 if (sa6_embedscope(sin6, ip6_use_defzone) != 0)
2414 return (EINVAL);
2415 if (net == NULL) {
2416 memset(&ip6route, 0, sizeof(ip6route));
2417 ro = (struct route *)&ip6route;
2418 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
2419 } else {
2420 ro = (struct route *)&net->ro;
2421 }
2422 if (stcb != NULL) {
2423 if ((stcb->asoc.ecn_allowed) && ecn_ok) {
2424 /* Enable ECN */
2425 tosBottom = (((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) | sctp_get_ect(stcb, chk)) << 4);
2426 } else {
2427 /* No ECN */
2428 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
2429 }
2430 } else {
2431 /* we could get no asoc if it is a O-O-T-B packet */
2432 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
2433 }
2434 ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom | flowTop) << 16) | flowBottom));
2435 ip6h->ip6_nxt = IPPROTO_SCTP;
2436 ip6h->ip6_plen = (SCTP_HEADER_LEN(o_pak) - sizeof(struct ip6_hdr));
2437 ip6h->ip6_dst = sin6->sin6_addr;
2438
2439 /*
2440 * Add SRC address selection here: we can only reuse to a
2441 * limited degree the kame src-addr-sel, since we can try
2442 * their selection but it may not be bound.
2443 */
2444 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
2445 lsa6_tmp.sin6_family = AF_INET6;
2446 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
2447 lsa6 = &lsa6_tmp;
2448 if (net) {
2449 if (net->src_addr_selected == 0) {
2450 /* Cache the source address */
3570 net->src_addr_selected = 0;
3571 }
3572 }
3573 return (ret);
3574 }
3575#ifdef INET6
3576 else if (to->sa_family == AF_INET6) {
3577 uint32_t flowlabel;
3578 struct ip6_hdr *ip6h;
3579
3580 struct route_in6 ip6route;
3581 struct ifnet *ifp;
3582 u_char flowTop;
3583 uint16_t flowBottom;
3584 u_char tosBottom, tosTop;
3585 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
3586 struct sockaddr_in6 lsa6_storage;
3587 int prev_scope = 0;
3588 int error;
3589 u_short prev_port = 0;
3590
3591 if (net != NULL) {
3592 flowlabel = net->tos_flowlabel;
3593 } else {
3594 flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
3595 }
3596 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip6_hdr));
3597 if (o_pak == NULL) {
3598 /* failed to prepend data, give up */
3599 sctp_m_freem(m);
3600 return (ENOMEM);
3601 }
3602 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip6_hdr);
3603 packet_length += sizeof(struct ip6_hdr);
3604 SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
3605 ip6h = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip6_hdr *);
3606 /*
3607 * We assume here that inp_flow is in host byte order within
3608 * the TCB!
3609 */
3610 flowBottom = flowlabel & 0x0000ffff;
3611 flowTop = ((flowlabel & 0x000f0000) >> 16);
3612 tosTop = (((flowlabel & 0xf0) >> 4) | IPV6_VERSION);
3613 /* protect *sin6 from overwrite */
3614 sin6 = (struct sockaddr_in6 *)to;
3615 tmp = *sin6;
3616 sin6 = &tmp;
3617
3618 /* KAME hack: embed scopeid */
3619 if (sa6_embedscope(sin6, ip6_use_defzone) != 0)
3620 return (EINVAL);
3621 if (net == NULL) {
3622 memset(&ip6route, 0, sizeof(ip6route));
3623 ro = (struct route *)&ip6route;
3624 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
3625 } else {
3626 ro = (struct route *)&net->ro;
3627 }
3628 if (stcb != NULL) {
3629 if ((stcb->asoc.ecn_allowed) && ecn_ok) {
3630 /* Enable ECN */
3631 tosBottom = (((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) | sctp_get_ect(stcb, chk)) << 4);
3632 } else {
3633 /* No ECN */
3634 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
3635 }
3636 } else {
3637 /* we could get no asoc if it is a O-O-T-B packet */
3638 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
3639 }
3640 ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom | flowTop) << 16) | flowBottom));
3641 ip6h->ip6_nxt = IPPROTO_SCTP;
3642 ip6h->ip6_plen = (SCTP_HEADER_LEN(o_pak) - sizeof(struct ip6_hdr));
3643 ip6h->ip6_dst = sin6->sin6_addr;
3644
3645 /*
3646 * Add SRC address selection here: we can only reuse to a
3647 * limited degree the kame src-addr-sel, since we can try
3648 * their selection but it may not be bound.
3649 */
3650 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
3651 lsa6_tmp.sin6_family = AF_INET6;
3652 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
3653 lsa6 = &lsa6_tmp;
3654 if (net) {
3655 if (net->src_addr_selected == 0) {
3656 /* Cache the source address */
2451 ((struct sockaddr_in6 *)&net->ro._s_addr)->sin6_addr = sctp_ipv6_source_address_selection(inp,
2452 stcb, ro, net, out_of_asoc_ok);
2453
2454 if (ro->ro_rt)
2455 net->src_addr_selected = 1;
3657 net->ro._s_addr = sctp_source_address_selection(inp,
3658 stcb,
3659 ro,
3660 net,
3661 out_of_asoc_ok,
3662 vrf_id);
3663 if (net->ro._s_addr == NULL) {
3664#ifdef SCTP_DEBUG
3665 printf("V6:No route to host\n");
3666#endif
3667 goto no_route;
3668 }
3669 net->src_addr_selected = 1;
2456 }
3670 }
2457 lsa6->sin6_addr = ((struct sockaddr_in6 *)&net->ro._s_addr)->sin6_addr;
3671 lsa6->sin6_addr = net->ro._s_addr->address.sin6.sin6_addr;
2458 } else {
3672 } else {
2459 lsa6->sin6_addr = sctp_ipv6_source_address_selection(
2460 inp, stcb, ro, net, out_of_asoc_ok);
3673 struct sctp_ifa *_lsrc;
3674
3675 _lsrc = sctp_source_address_selection(inp, stcb, ro, net, out_of_asoc_ok, vrf_id);
3676 if (_lsrc == NULL) {
3677 goto no_route;
3678 }
3679 lsa6->sin6_addr = _lsrc->address.sin6.sin6_addr;
3680 sctp_free_ifa(_lsrc);
2461 }
2462 lsa6->sin6_port = inp->sctp_lport;
2463
2464 if ((ro->ro_rt == NULL)) {
2465 /*
2466 * src addr selection failed to find a route (or
2467 * valid source addr), so we can't get there from
2468 * here!
2469 */
3681 }
3682 lsa6->sin6_port = inp->sctp_lport;
3683
3684 if ((ro->ro_rt == NULL)) {
3685 /*
3686 * src addr selection failed to find a route (or
3687 * valid source addr), so we can't get there from
3688 * here!
3689 */
2470#ifdef SCTP_DEBUG
2471 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2472 printf("low_level_output: dropped v6 pkt- no valid source addr\n");
2473 }
2474#endif
2475 sctp_m_freem(o_pak);
2476 if (net) {
2477 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb)
2478 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
2479 stcb,
2480 SCTP_FAILED_THRESHOLD,
2481 (void *)net);
2482 net->dest_state &= ~SCTP_ADDR_REACHABLE;
2483 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
2484 if (stcb) {
2485 if (net == stcb->asoc.primary_destination) {
2486 /* need a new primary */
2487 struct sctp_nets *alt;
2488
2489 alt = sctp_find_alternate_net(stcb, net, 0);
2490 if (alt != net) {
2491 if (sctp_set_primary_addr(stcb,
2492 (struct sockaddr *)NULL,
2493 alt) == 0) {
2494 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
2495 net->src_addr_selected = 0;
2496 }
2497 }
2498 }
2499 }
2500 }
2501 return (EHOSTUNREACH);
3690 goto no_route;
2502 }
2503 /*
2504 * XXX: sa6 may not have a valid sin6_scope_id in the
2505 * non-SCOPEDROUTING case.
2506 */
2507 bzero(&lsa6_storage, sizeof(lsa6_storage));
2508 lsa6_storage.sin6_family = AF_INET6;
2509 lsa6_storage.sin6_len = sizeof(lsa6_storage);
2510 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
2511 sctp_m_freem(o_pak);
2512 return (error);
2513 }
2514 /* XXX */
2515 lsa6_storage.sin6_addr = lsa6->sin6_addr;
2516 lsa6_storage.sin6_port = inp->sctp_lport;
2517 lsa6 = &lsa6_storage;
2518 ip6h->ip6_src = lsa6->sin6_addr;
2519
2520 /*
2521 * We set the hop limit now since there is a good chance
2522 * that our ro pointer is now filled
2523 */
2524 ip6h->ip6_hlim = in6_selecthlim((struct in6pcb *)&inp->ip_inp.inp,
2525 (ro ?
2526 (ro->ro_rt ? (ro->ro_rt->rt_ifp) : (NULL)) :
2527 (NULL)));
2528 o_flgs = 0;
2529 ifp = ro->ro_rt->rt_ifp;
2530#ifdef SCTP_DEBUG
2531 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
2532 /* Copy to be sure something bad is not happening */
2533 sin6->sin6_addr = ip6h->ip6_dst;
2534 lsa6->sin6_addr = ip6h->ip6_src;
2535
2536 printf("Calling ipv6 output routine from low level\n");
2537 printf("src: ");
2538 sctp_print_address((struct sockaddr *)lsa6);
2539 printf("dst: ");
2540 sctp_print_address((struct sockaddr *)sin6);
2541 }
2542#endif /* SCTP_DEBUG */
2543 if (net) {
2544 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
2545 /* preserve the port and scope for link local send */
2546 prev_scope = sin6->sin6_scope_id;
2547 prev_port = sin6->sin6_port;
2548 }
2549 ret = ip6_output(o_pak, ((struct in6pcb *)inp)->in6p_outputopts,
2550 (struct route_in6 *)ro,
2551 o_flgs,
2552 ((struct in6pcb *)inp)->in6p_moptions,
2553 &ifp
2554 ,NULL
2555 );
2556 if (net) {
2557 /* for link local this must be done */
2558 sin6->sin6_scope_id = prev_scope;
2559 sin6->sin6_port = prev_port;
2560 }
2561#ifdef SCTP_DEBUG
2562 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
2563 printf("return from send is %d\n", ret);
2564 }
2565#endif /* SCTP_DEBUG_OUTPUT */
2566 SCTP_STAT_INCR(sctps_sendpackets);
2567 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
3691 }
3692 /*
3693 * XXX: sa6 may not have a valid sin6_scope_id in the
3694 * non-SCOPEDROUTING case.
3695 */
3696 bzero(&lsa6_storage, sizeof(lsa6_storage));
3697 lsa6_storage.sin6_family = AF_INET6;
3698 lsa6_storage.sin6_len = sizeof(lsa6_storage);
3699 if ((error = sa6_recoverscope(&lsa6_storage)) != 0) {
3700 sctp_m_freem(o_pak);
3701 return (error);
3702 }
3703 /* XXX */
3704 lsa6_storage.sin6_addr = lsa6->sin6_addr;
3705 lsa6_storage.sin6_port = inp->sctp_lport;
3706 lsa6 = &lsa6_storage;
3707 ip6h->ip6_src = lsa6->sin6_addr;
3708
3709 /*
3710 * We set the hop limit now since there is a good chance
3711 * that our ro pointer is now filled
3712 */
3713 ip6h->ip6_hlim = in6_selecthlim((struct in6pcb *)&inp->ip_inp.inp,
3714 (ro ?
3715 (ro->ro_rt ? (ro->ro_rt->rt_ifp) : (NULL)) :
3716 (NULL)));
3717 o_flgs = 0;
3718 ifp = ro->ro_rt->rt_ifp;
3719#ifdef SCTP_DEBUG
3720 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
3721 /* Copy to be sure something bad is not happening */
3722 sin6->sin6_addr = ip6h->ip6_dst;
3723 lsa6->sin6_addr = ip6h->ip6_src;
3724
3725 printf("Calling ipv6 output routine from low level\n");
3726 printf("src: ");
3727 sctp_print_address((struct sockaddr *)lsa6);
3728 printf("dst: ");
3729 sctp_print_address((struct sockaddr *)sin6);
3730 }
3731#endif /* SCTP_DEBUG */
3732 if (net) {
3733 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
3734 /* preserve the port and scope for link local send */
3735 prev_scope = sin6->sin6_scope_id;
3736 prev_port = sin6->sin6_port;
3737 }
3738 ret = ip6_output(o_pak, ((struct in6pcb *)inp)->in6p_outputopts,
3739 (struct route_in6 *)ro,
3740 o_flgs,
3741 ((struct in6pcb *)inp)->in6p_moptions,
3742 &ifp
3743 ,NULL
3744 );
3745 if (net) {
3746 /* for link local this must be done */
3747 sin6->sin6_scope_id = prev_scope;
3748 sin6->sin6_port = prev_port;
3749 }
3750#ifdef SCTP_DEBUG
3751 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
3752 printf("return from send is %d\n", ret);
3753 }
3754#endif /* SCTP_DEBUG_OUTPUT */
3755 SCTP_STAT_INCR(sctps_sendpackets);
3756 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
2568 if (ret)
3757 if (ret) {
2569 SCTP_STAT_INCR(sctps_senderrors);
3758 SCTP_STAT_INCR(sctps_senderrors);
3759 }
2570 if (net == NULL) {
2571 /* Now if we had a temp route free it */
2572 if (ro->ro_rt) {
2573 RTFREE(ro->ro_rt);
2574 }
2575 } else {
2576 /* PMTU check versus smallest asoc MTU goes here */
2577 if (ro->ro_rt == NULL) {
2578 /* Route was freed */
3760 if (net == NULL) {
3761 /* Now if we had a temp route free it */
3762 if (ro->ro_rt) {
3763 RTFREE(ro->ro_rt);
3764 }
3765 } else {
3766 /* PMTU check versus smallest asoc MTU goes here */
3767 if (ro->ro_rt == NULL) {
3768 /* Route was freed */
3769
3770 if (net->ro._s_addr &&
3771 net->src_addr_selected) {
3772 sctp_free_ifa(net->ro._s_addr);
3773 net->ro._s_addr = NULL;
3774 }
2579 net->src_addr_selected = 0;
2580 }
2581 if (ro->ro_rt != NULL) {
2582 if (ro->ro_rt->rt_rmx.rmx_mtu &&
2583 (stcb->asoc.smallest_mtu > ro->ro_rt->rt_rmx.rmx_mtu)) {
2584 sctp_mtu_size_reset(inp,
2585 &stcb->asoc,
2586 ro->ro_rt->rt_rmx.rmx_mtu);
2587 }
2588 } else if (ifp) {
2589 if (ND_IFINFO(ifp)->linkmtu &&
2590 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
2591 sctp_mtu_size_reset(inp,
2592 &stcb->asoc,
2593 ND_IFINFO(ifp)->linkmtu);
2594 }
2595 }
2596 }
2597 return (ret);
2598 }
2599#endif
2600 else {
2601#ifdef SCTP_DEBUG
2602 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2603 printf("Unknown protocol (TSNH) type %d\n", ((struct sockaddr *)to)->sa_family);
2604 }
2605#endif
2606 sctp_m_freem(m);
2607 return (EFAULT);
2608 }
2609}
2610
2611
2612void
2613sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
2614{
2615 struct mbuf *m, *m_at, *m_last;
2616 struct sctp_nets *net;
2617 struct sctp_init_msg *initm;
2618 struct sctp_supported_addr_param *sup_addr;
2619 struct sctp_ecn_supported_param *ecn;
2620 struct sctp_prsctp_supported_param *prsctp;
2621 struct sctp_ecn_nonce_supported_param *ecn_nonce;
2622 struct sctp_supported_chunk_types_param *pr_supported;
2623 int cnt_inits_to = 0;
2624 int padval, ret;
2625 int num_ext;
2626 int p_len;
2627
2628 /* INIT's always go to the primary (and usually ONLY address) */
2629 m_last = NULL;
2630 net = stcb->asoc.primary_destination;
2631 if (net == NULL) {
2632 net = TAILQ_FIRST(&stcb->asoc.nets);
2633 if (net == NULL) {
2634 /* TSNH */
2635 return;
2636 }
2637 /* we confirm any address we send an INIT to */
2638 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2639 sctp_set_primary_addr(stcb, NULL, net);
2640 } else {
2641 /* we confirm any address we send an INIT to */
2642 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2643 }
2644#ifdef SCTP_DEBUG
2645 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
2646 printf("Sending INIT\n");
2647 }
2648#endif
2649 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) {
2650 /*
2651 * special hook, if we are sending to link local it will not
2652 * show up in our private address count.
2653 */
2654 struct sockaddr_in6 *sin6l;
2655
2656 sin6l = &net->ro._l_addr.sin6;
2657 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr))
2658 cnt_inits_to = 1;
2659 }
2660 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
2661 /* This case should not happen */
2662 return;
2663 }
2664 /* start the INIT timer */
2665 if (sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net)) {
2666 /* we are hosed since I can't start the INIT timer? */
2667 return;
2668 }
2669 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
2670 if (m == NULL) {
2671 /* No memory, INIT timer will re-attempt. */
2672 return;
2673 }
2674 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_msg);
2675 /* Now lets put the SCTP header in place */
2676 initm = mtod(m, struct sctp_init_msg *);
2677 initm->sh.src_port = inp->sctp_lport;
2678 initm->sh.dest_port = stcb->rport;
2679 initm->sh.v_tag = 0;
2680 initm->sh.checksum = 0; /* calculate later */
2681 /* now the chunk header */
2682 initm->msg.ch.chunk_type = SCTP_INITIATION;
2683 initm->msg.ch.chunk_flags = 0;
2684 /* fill in later from mbuf we build */
2685 initm->msg.ch.chunk_length = 0;
2686 /* place in my tag */
2687 initm->msg.init.initiate_tag = htonl(stcb->asoc.my_vtag);
2688 /* set up some of the credits. */
2689 initm->msg.init.a_rwnd = htonl(max(inp->sctp_socket->so_rcv.sb_hiwat,
2690 SCTP_MINIMAL_RWND));
2691
2692 initm->msg.init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
2693 initm->msg.init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
2694 initm->msg.init.initial_tsn = htonl(stcb->asoc.init_seq_number);
2695 /* now the address restriction */
2696 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)initm +
2697 sizeof(*initm));
2698 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
2699 /* we support 2 types IPv6/IPv4 */
2700 sup_addr->ph.param_length = htons(sizeof(*sup_addr) +
2701 sizeof(uint16_t));
2702 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
2703 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS);
2704 SCTP_BUF_LEN(m) += sizeof(*sup_addr) + sizeof(uint16_t);
2705
2706 if (inp->sctp_ep.adaptation_layer_indicator) {
2707 struct sctp_adaptation_layer_indication *ali;
2708
2709 ali = (struct sctp_adaptation_layer_indication *)(
2710 (caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t));
2711 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
2712 ali->ph.param_length = htons(sizeof(*ali));
2713 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
2714 SCTP_BUF_LEN(m) += sizeof(*ali);
2715 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali +
2716 sizeof(*ali));
2717 } else {
2718 ecn = (struct sctp_ecn_supported_param *)((caddr_t)sup_addr +
2719 sizeof(*sup_addr) + sizeof(uint16_t));
2720 }
2721
2722 /* now any cookie time extensions */
2723 if (stcb->asoc.cookie_preserve_req) {
2724 struct sctp_cookie_perserve_param *cookie_preserve;
2725
2726 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn);
2727 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
2728 cookie_preserve->ph.param_length = htons(
2729 sizeof(*cookie_preserve));
2730 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
2731 SCTP_BUF_LEN(m) += sizeof(*cookie_preserve);
2732 ecn = (struct sctp_ecn_supported_param *)(
2733 (caddr_t)cookie_preserve + sizeof(*cookie_preserve));
2734 stcb->asoc.cookie_preserve_req = 0;
2735 }
2736 /* ECN parameter */
2737 if (sctp_ecn_enable == 1) {
2738 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
2739 ecn->ph.param_length = htons(sizeof(*ecn));
2740 SCTP_BUF_LEN(m) += sizeof(*ecn);
2741 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
2742 sizeof(*ecn));
2743 } else {
2744 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
2745 }
2746 /* And now tell the peer we do pr-sctp */
2747 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
2748 prsctp->ph.param_length = htons(sizeof(*prsctp));
2749 SCTP_BUF_LEN(m) += sizeof(*prsctp);
2750
2751 /* And now tell the peer we do all the extensions */
2752 pr_supported = (struct sctp_supported_chunk_types_param *)
2753 ((caddr_t)prsctp + sizeof(*prsctp));
2754 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
2755 num_ext = 0;
2756 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
2757 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
2758 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
2759 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
2760 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
2761 if (!sctp_auth_disable)
2762 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
2763 p_len = sizeof(*pr_supported) + num_ext;
2764 pr_supported->ph.param_length = htons(p_len);
2765 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
2766 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
2767
2768 /* ECN nonce: And now tell the peer we support ECN nonce */
2769 if (sctp_ecn_nonce) {
2770 ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
2771 ((caddr_t)pr_supported + SCTP_SIZE32(p_len));
2772 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
2773 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
2774 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce);
2775 }
2776 /* add authentication parameters */
2777 if (!sctp_auth_disable) {
2778 struct sctp_auth_random *random;
2779 struct sctp_auth_hmac_algo *hmacs;
2780 struct sctp_auth_chunk_list *chunks;
2781
2782 /* attach RANDOM parameter, if available */
2783 if (stcb->asoc.authinfo.random != NULL) {
2784 random = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2785 p_len = sizeof(*random) + stcb->asoc.authinfo.random_len;
2786#ifdef SCTP_AUTH_DRAFT_04
2787 random->ph.param_type = htons(SCTP_RANDOM);
2788 random->ph.param_length = htons(p_len);
2789 bcopy(stcb->asoc.authinfo.random->key,
2790 random->random_data,
2791 stcb->asoc.authinfo.random_len);
2792#else
2793 /* random key already contains the header */
2794 bcopy(stcb->asoc.authinfo.random->key, random, p_len);
2795#endif
2796 /* zero out any padding required */
2797 bzero((caddr_t)random + p_len, SCTP_SIZE32(p_len) - p_len);
2798 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
2799 }
2800 /* add HMAC_ALGO parameter */
2801 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2802 p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
2803 (uint8_t *) hmacs->hmac_ids);
2804 if (p_len > 0) {
2805 p_len += sizeof(*hmacs);
2806 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
2807 hmacs->ph.param_length = htons(p_len);
2808 /* zero out any padding required */
2809 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
2810 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
2811 }
2812 /* add CHUNKS parameter */
2813 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2814 p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks,
2815 chunks->chunk_types);
2816 if (p_len > 0) {
2817 p_len += sizeof(*chunks);
2818 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
2819 chunks->ph.param_length = htons(p_len);
2820 /* zero out any padding required */
2821 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
2822 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
2823 }
2824 }
2825 m_at = m;
2826 /* now the addresses */
2827 {
2828 struct sctp_scoping scp;
2829
2830 /*
2831 * To optimize this we could put the scoping stuff into a
2832 * structure and remove the individual uint8's from the
2833 * assoc structure. Then we could just pass in the address
2834 * within the stcb.. but for now this is a quick hack to get
2835 * the address stuff teased apart.
2836 */
2837 scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal;
2838 scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal;
2839 scp.loopback_scope = stcb->asoc.loopback_scope;
2840 scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope;
2841 scp.local_scope = stcb->asoc.local_scope;
2842 scp.site_scope = stcb->asoc.site_scope;
2843
2844 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
2845 }
2846
2847
2848 /* calulate the size and update pkt header and chunk header */
2849 p_len = 0;
2850 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2851 if (SCTP_BUF_NEXT(m_at) == NULL)
2852 m_last = m_at;
2853 p_len += SCTP_BUF_LEN(m_at);
2854 }
2855 initm->msg.ch.chunk_length = htons((p_len - sizeof(struct sctphdr)));
2856 /*
2857 * We pass 0 here to NOT set IP_DF if its IPv4, we ignore the return
2858 * here since the timer will drive a retranmission.
2859 */
2860
2861 /* I don't expect this to execute but we will be safe here */
2862 padval = p_len % 4;
2863 if ((padval) && (m_last)) {
2864 /*
2865 * The compiler worries that m_last may not be set even
2866 * though I think it is impossible :-> however we add m_last
2867 * here just in case.
2868 */
2869 int ret;
2870
2871 ret = sctp_add_pad_tombuf(m_last, (4 - padval));
2872 if (ret) {
2873 /* Houston we have a problem, no space */
2874 sctp_m_freem(m);
2875 return;
2876 }
2877 p_len += padval;
2878 }
2879 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
2880 (struct sockaddr *)&net->ro._l_addr,
2881 m, 0, NULL, 0, 0, NULL, 0);
2882 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
2883 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
2884 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
2885}
2886
2887struct mbuf *
2888sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
2889 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp)
2890{
2891 /*
2892 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
2893 * being equal to the beginning of the params i.e. (iphlen +
2894 * sizeof(struct sctp_init_msg) parse through the parameters to the
2895 * end of the mbuf verifying that all parameters are known.
2896 *
2897 * For unknown parameters build and return a mbuf with
2898 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
2899 * processing this chunk stop, and set *abort_processing to 1.
2900 *
2901 * By having param_offset be pre-set to where parameters begin it is
2902 * hoped that this routine may be reused in the future by new
2903 * features.
2904 */
2905 struct sctp_paramhdr *phdr, params;
2906
2907 struct mbuf *mat, *op_err;
2908 char tempbuf[SCTP_CHUNK_BUFFER_SIZE];
2909 int at, limit, pad_needed;
2910 uint16_t ptype, plen;
2911 int err_at;
2912
2913 *abort_processing = 0;
2914 mat = in_initpkt;
2915 err_at = 0;
2916 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
2917 at = param_offset;
2918 op_err = NULL;
2919
2920 phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
2921 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
2922 ptype = ntohs(phdr->param_type);
2923 plen = ntohs(phdr->param_length);
2924 limit -= SCTP_SIZE32(plen);
2925 if (plen < sizeof(struct sctp_paramhdr)) {
2926#ifdef SCTP_DEBUG
2927 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
2928 printf("sctp_output.c:Impossible length in parameter < %d\n", plen);
2929 }
2930#endif
2931 *abort_processing = 1;
2932 break;
2933 }
2934 /*
2935 * All parameters for all chunks that we know/understand are
2936 * listed here. We process them other places and make
2937 * appropriate stop actions per the upper bits. However this
2938 * is the generic routine processor's can call to get back
2939 * an operr.. to either incorporate (init-ack) or send.
2940 */
2941 if ((ptype == SCTP_HEARTBEAT_INFO) ||
2942 (ptype == SCTP_IPV4_ADDRESS) ||
2943 (ptype == SCTP_IPV6_ADDRESS) ||
2944 (ptype == SCTP_STATE_COOKIE) ||
2945 (ptype == SCTP_UNRECOG_PARAM) ||
2946 (ptype == SCTP_COOKIE_PRESERVE) ||
2947 (ptype == SCTP_SUPPORTED_ADDRTYPE) ||
2948 (ptype == SCTP_PRSCTP_SUPPORTED) ||
2949 (ptype == SCTP_ADD_IP_ADDRESS) ||
2950 (ptype == SCTP_DEL_IP_ADDRESS) ||
2951 (ptype == SCTP_ECN_CAPABLE) ||
2952 (ptype == SCTP_ULP_ADAPTATION) ||
2953 (ptype == SCTP_ERROR_CAUSE_IND) ||
2954 (ptype == SCTP_RANDOM) ||
2955 (ptype == SCTP_CHUNK_LIST) ||
2956 (ptype == SCTP_CHUNK_LIST) ||
2957 (ptype == SCTP_SET_PRIM_ADDR) ||
2958 (ptype == SCTP_SUCCESS_REPORT) ||
2959 (ptype == SCTP_ULP_ADAPTATION) ||
2960 (ptype == SCTP_SUPPORTED_CHUNK_EXT) ||
2961 (ptype == SCTP_ECN_NONCE_SUPPORTED)
2962 ) {
2963 /* no skip it */
2964 at += SCTP_SIZE32(plen);
2965 } else if (ptype == SCTP_HOSTNAME_ADDRESS) {
2966 /* We can NOT handle HOST NAME addresses!! */
2967 int l_len;
2968
2969#ifdef SCTP_DEBUG
2970 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
2971 printf("Can't handle hostname addresses.. abort processing\n");
2972 }
2973#endif
2974 *abort_processing = 1;
2975 if (op_err == NULL) {
2976 /* Ok need to try to get a mbuf */
2977 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
2978 l_len += plen;
2979 l_len += sizeof(struct sctp_paramhdr);
2980 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
2981 if (op_err) {
2982 SCTP_BUF_LEN(op_err) = 0;
2983 /*
2984 * pre-reserve space for ip and sctp
2985 * header and chunk hdr
2986 */
2987 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
2988 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
2989 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
2990 }
2991 }
2992 if (op_err) {
2993 /* If we have space */
2994 struct sctp_paramhdr s;
2995
2996 if (err_at % 4) {
2997 uint32_t cpthis = 0;
2998
2999 pad_needed = 4 - (err_at % 4);
3000 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
3001 err_at += pad_needed;
3002 }
3003 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
3004 s.param_length = htons(sizeof(s) + plen);
3005 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
3006 err_at += sizeof(s);
3007 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen);
3008 if (phdr == NULL) {
3009 sctp_m_freem(op_err);
3010 /*
3011 * we are out of memory but we still
3012 * need to have a look at what to do
3013 * (the system is in trouble
3014 * though).
3015 */
3016 return (NULL);
3017 }
3018 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
3019 err_at += plen;
3020 }
3021 return (op_err);
3022 } else {
3023 /*
3024 * we do not recognize the parameter figure out what
3025 * we do.
3026 */
3027 if ((ptype & 0x4000) == 0x4000) {
3028 /* Report bit is set?? */
3029 if (op_err == NULL) {
3030 int l_len;
3031
3032 /* Ok need to try to get an mbuf */
3033 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
3034 l_len += plen;
3035 l_len += sizeof(struct sctp_paramhdr);
3036 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
3037 if (op_err) {
3038 SCTP_BUF_LEN(op_err) = 0;
3039 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
3040 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
3041 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
3042 }
3043 }
3044 if (op_err) {
3045 /* If we have space */
3046 struct sctp_paramhdr s;
3047
3048 if (err_at % 4) {
3049 uint32_t cpthis = 0;
3050
3051 pad_needed = 4 - (err_at % 4);
3052 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
3053 err_at += pad_needed;
3054 }
3055 s.param_type = htons(SCTP_UNRECOG_PARAM);
3056 s.param_length = htons(sizeof(s) + plen);
3057 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
3058 err_at += sizeof(s);
3059 if (plen > sizeof(tempbuf)) {
3060 plen = sizeof(tempbuf);
3061 }
3062 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen);
3063 if (phdr == NULL) {
3064 sctp_m_freem(op_err);
3065 /*
3066 * we are out of memory but
3067 * we still need to have a
3068 * look at what to do (the
3069 * system is in trouble
3070 * though).
3071 */
3072 goto more_processing;
3073 }
3074 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
3075 err_at += plen;
3076 }
3077 }
3078 more_processing:
3079 if ((ptype & 0x8000) == 0x0000) {
3080 return (op_err);
3081 } else {
3082 /* skip this chunk and continue processing */
3083 at += SCTP_SIZE32(plen);
3084 }
3085
3086 }
3087 phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
3088 }
3089 return (op_err);
3090}
3091
3092static int
3093sctp_are_there_new_addresses(struct sctp_association *asoc,
3094 struct mbuf *in_initpkt, int iphlen, int offset)
3095{
3096 /*
3097 * Given a INIT packet, look through the packet to verify that there
3098 * are NO new addresses. As we go through the parameters add reports
3099 * of any un-understood parameters that require an error. Also we
3100 * must return (1) to drop the packet if we see a un-understood
3101 * parameter that tells us to drop the chunk.
3102 */
3103 struct sockaddr_in sin4, *sa4;
3104 struct sockaddr_in6 sin6, *sa6;
3105 struct sockaddr *sa_touse;
3106 struct sockaddr *sa;
3107 struct sctp_paramhdr *phdr, params;
3108 struct ip *iph;
3109 struct mbuf *mat;
3110 uint16_t ptype, plen;
3111 int err_at;
3112 uint8_t fnd;
3113 struct sctp_nets *net;
3114
3115 memset(&sin4, 0, sizeof(sin4));
3116 memset(&sin6, 0, sizeof(sin6));
3117 sin4.sin_family = AF_INET;
3118 sin4.sin_len = sizeof(sin4);
3119 sin6.sin6_family = AF_INET6;
3120 sin6.sin6_len = sizeof(sin6);
3121
3122 sa_touse = NULL;
3123 /* First what about the src address of the pkt ? */
3124 iph = mtod(in_initpkt, struct ip *);
3125 if (iph->ip_v == IPVERSION) {
3126 /* source addr is IPv4 */
3127 sin4.sin_addr = iph->ip_src;
3128 sa_touse = (struct sockaddr *)&sin4;
3129 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
3130 /* source addr is IPv6 */
3131 struct ip6_hdr *ip6h;
3132
3133 ip6h = mtod(in_initpkt, struct ip6_hdr *);
3134 sin6.sin6_addr = ip6h->ip6_src;
3135 sa_touse = (struct sockaddr *)&sin6;
3136 } else {
3137 return (1);
3138 }
3139
3140 fnd = 0;
3141 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3142 sa = (struct sockaddr *)&net->ro._l_addr;
3143 if (sa->sa_family == sa_touse->sa_family) {
3144 if (sa->sa_family == AF_INET) {
3145 sa4 = (struct sockaddr_in *)sa;
3146 if (sa4->sin_addr.s_addr ==
3147 sin4.sin_addr.s_addr) {
3148 fnd = 1;
3149 break;
3150 }
3151 } else if (sa->sa_family == AF_INET6) {
3152 sa6 = (struct sockaddr_in6 *)sa;
3153 if (SCTP6_ARE_ADDR_EQUAL(&sa6->sin6_addr,
3154 &sin6.sin6_addr)) {
3155 fnd = 1;
3156 break;
3157 }
3158 }
3159 }
3160 }
3161 if (fnd == 0) {
3162 /* New address added! no need to look futher. */
3163 return (1);
3164 }
3165 /* Ok so far lets munge through the rest of the packet */
3166 mat = in_initpkt;
3167 err_at = 0;
3168 sa_touse = NULL;
3169 offset += sizeof(struct sctp_init_chunk);
3170 phdr = sctp_get_next_param(mat, offset, &params, sizeof(params));
3171 while (phdr) {
3172 ptype = ntohs(phdr->param_type);
3173 plen = ntohs(phdr->param_length);
3174 if (ptype == SCTP_IPV4_ADDRESS) {
3175 struct sctp_ipv4addr_param *p4, p4_buf;
3176
3177 phdr = sctp_get_next_param(mat, offset,
3178 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
3179 if (plen != sizeof(struct sctp_ipv4addr_param) ||
3180 phdr == NULL) {
3181 return (1);
3182 }
3183 p4 = (struct sctp_ipv4addr_param *)phdr;
3184 sin4.sin_addr.s_addr = p4->addr;
3185 sa_touse = (struct sockaddr *)&sin4;
3186 } else if (ptype == SCTP_IPV6_ADDRESS) {
3187 struct sctp_ipv6addr_param *p6, p6_buf;
3188
3189 phdr = sctp_get_next_param(mat, offset,
3190 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
3191 if (plen != sizeof(struct sctp_ipv6addr_param) ||
3192 phdr == NULL) {
3193 return (1);
3194 }
3195 p6 = (struct sctp_ipv6addr_param *)phdr;
3196 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
3197 sizeof(p6->addr));
3198 sa_touse = (struct sockaddr *)&sin4;
3199 }
3200 if (sa_touse) {
3201 /* ok, sa_touse points to one to check */
3202 fnd = 0;
3203 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3204 sa = (struct sockaddr *)&net->ro._l_addr;
3205 if (sa->sa_family != sa_touse->sa_family) {
3206 continue;
3207 }
3208 if (sa->sa_family == AF_INET) {
3209 sa4 = (struct sockaddr_in *)sa;
3210 if (sa4->sin_addr.s_addr ==
3211 sin4.sin_addr.s_addr) {
3212 fnd = 1;
3213 break;
3214 }
3215 } else if (sa->sa_family == AF_INET6) {
3216 sa6 = (struct sockaddr_in6 *)sa;
3217 if (SCTP6_ARE_ADDR_EQUAL(
3218 &sa6->sin6_addr, &sin6.sin6_addr)) {
3219 fnd = 1;
3220 break;
3221 }
3222 }
3223 }
3224 if (!fnd) {
3225 /* New addr added! no need to look further */
3226 return (1);
3227 }
3228 }
3229 offset += SCTP_SIZE32(plen);
3230 phdr = sctp_get_next_param(mat, offset, &params, sizeof(params));
3231 }
3232 return (0);
3233}
3234
3235/*
3236 * Given a MBUF chain that was sent into us containing an INIT. Build a
3237 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
3238 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
3239 * message (i.e. the struct sctp_init_msg).
3240 */
3241void
3242sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3243 struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh,
3244 struct sctp_init_chunk *init_chk)
3245{
3246 struct sctp_association *asoc;
3247 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *m_last;
3248 struct sctp_init_msg *initackm_out;
3249 struct sctp_ecn_supported_param *ecn;
3250 struct sctp_prsctp_supported_param *prsctp;
3251 struct sctp_ecn_nonce_supported_param *ecn_nonce;
3252 struct sctp_supported_chunk_types_param *pr_supported;
3253 struct sockaddr_storage store;
3254 struct sockaddr_in *sin;
3255 struct sockaddr_in6 *sin6;
3256 struct route *ro;
3257 struct ip *iph;
3258 struct ip6_hdr *ip6;
3259 struct sockaddr *to;
3260 struct sctp_state_cookie stc;
3261 struct sctp_nets *net = NULL;
3262 int cnt_inits_to = 0;
3263 uint16_t his_limit, i_want;
3264 int abort_flag, padval, sz_of;
3265 int num_ext;
3266 int p_len;
3775 net->src_addr_selected = 0;
3776 }
3777 if (ro->ro_rt != NULL) {
3778 if (ro->ro_rt->rt_rmx.rmx_mtu &&
3779 (stcb->asoc.smallest_mtu > ro->ro_rt->rt_rmx.rmx_mtu)) {
3780 sctp_mtu_size_reset(inp,
3781 &stcb->asoc,
3782 ro->ro_rt->rt_rmx.rmx_mtu);
3783 }
3784 } else if (ifp) {
3785 if (ND_IFINFO(ifp)->linkmtu &&
3786 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
3787 sctp_mtu_size_reset(inp,
3788 &stcb->asoc,
3789 ND_IFINFO(ifp)->linkmtu);
3790 }
3791 }
3792 }
3793 return (ret);
3794 }
3795#endif
3796 else {
3797#ifdef SCTP_DEBUG
3798 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
3799 printf("Unknown protocol (TSNH) type %d\n", ((struct sockaddr *)to)->sa_family);
3800 }
3801#endif
3802 sctp_m_freem(m);
3803 return (EFAULT);
3804 }
3805}
3806
3807
3808void
3809sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
3810{
3811 struct mbuf *m, *m_at, *m_last;
3812 struct sctp_nets *net;
3813 struct sctp_init_msg *initm;
3814 struct sctp_supported_addr_param *sup_addr;
3815 struct sctp_ecn_supported_param *ecn;
3816 struct sctp_prsctp_supported_param *prsctp;
3817 struct sctp_ecn_nonce_supported_param *ecn_nonce;
3818 struct sctp_supported_chunk_types_param *pr_supported;
3819 int cnt_inits_to = 0;
3820 int padval, ret;
3821 int num_ext;
3822 int p_len;
3823
3824 /* INIT's always go to the primary (and usually ONLY address) */
3825 m_last = NULL;
3826 net = stcb->asoc.primary_destination;
3827 if (net == NULL) {
3828 net = TAILQ_FIRST(&stcb->asoc.nets);
3829 if (net == NULL) {
3830 /* TSNH */
3831 return;
3832 }
3833 /* we confirm any address we send an INIT to */
3834 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
3835 sctp_set_primary_addr(stcb, NULL, net);
3836 } else {
3837 /* we confirm any address we send an INIT to */
3838 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
3839 }
3840#ifdef SCTP_DEBUG
3841 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
3842 printf("Sending INIT\n");
3843 }
3844#endif
3845 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) {
3846 /*
3847 * special hook, if we are sending to link local it will not
3848 * show up in our private address count.
3849 */
3850 struct sockaddr_in6 *sin6l;
3851
3852 sin6l = &net->ro._l_addr.sin6;
3853 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr))
3854 cnt_inits_to = 1;
3855 }
3856 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3857 /* This case should not happen */
3858 return;
3859 }
3860 /* start the INIT timer */
3861 if (sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net)) {
3862 /* we are hosed since I can't start the INIT timer? */
3863 return;
3864 }
3865 m = sctp_get_mbuf_for_msg(MCLBYTES, 1, M_DONTWAIT, 1, MT_DATA);
3866 if (m == NULL) {
3867 /* No memory, INIT timer will re-attempt. */
3868 return;
3869 }
3870 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_msg);
3871 /* Now lets put the SCTP header in place */
3872 initm = mtod(m, struct sctp_init_msg *);
3873 initm->sh.src_port = inp->sctp_lport;
3874 initm->sh.dest_port = stcb->rport;
3875 initm->sh.v_tag = 0;
3876 initm->sh.checksum = 0; /* calculate later */
3877 /* now the chunk header */
3878 initm->msg.ch.chunk_type = SCTP_INITIATION;
3879 initm->msg.ch.chunk_flags = 0;
3880 /* fill in later from mbuf we build */
3881 initm->msg.ch.chunk_length = 0;
3882 /* place in my tag */
3883 initm->msg.init.initiate_tag = htonl(stcb->asoc.my_vtag);
3884 /* set up some of the credits. */
3885 initm->msg.init.a_rwnd = htonl(max(inp->sctp_socket->so_rcv.sb_hiwat,
3886 SCTP_MINIMAL_RWND));
3887
3888 initm->msg.init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
3889 initm->msg.init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
3890 initm->msg.init.initial_tsn = htonl(stcb->asoc.init_seq_number);
3891 /* now the address restriction */
3892 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)initm +
3893 sizeof(*initm));
3894 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
3895 /* we support 2 types IPv6/IPv4 */
3896 sup_addr->ph.param_length = htons(sizeof(*sup_addr) +
3897 sizeof(uint16_t));
3898 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
3899 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS);
3900 SCTP_BUF_LEN(m) += sizeof(*sup_addr) + sizeof(uint16_t);
3901
3902 if (inp->sctp_ep.adaptation_layer_indicator) {
3903 struct sctp_adaptation_layer_indication *ali;
3904
3905 ali = (struct sctp_adaptation_layer_indication *)(
3906 (caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t));
3907 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
3908 ali->ph.param_length = htons(sizeof(*ali));
3909 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
3910 SCTP_BUF_LEN(m) += sizeof(*ali);
3911 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali +
3912 sizeof(*ali));
3913 } else {
3914 ecn = (struct sctp_ecn_supported_param *)((caddr_t)sup_addr +
3915 sizeof(*sup_addr) + sizeof(uint16_t));
3916 }
3917
3918 /* now any cookie time extensions */
3919 if (stcb->asoc.cookie_preserve_req) {
3920 struct sctp_cookie_perserve_param *cookie_preserve;
3921
3922 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn);
3923 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
3924 cookie_preserve->ph.param_length = htons(
3925 sizeof(*cookie_preserve));
3926 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
3927 SCTP_BUF_LEN(m) += sizeof(*cookie_preserve);
3928 ecn = (struct sctp_ecn_supported_param *)(
3929 (caddr_t)cookie_preserve + sizeof(*cookie_preserve));
3930 stcb->asoc.cookie_preserve_req = 0;
3931 }
3932 /* ECN parameter */
3933 if (sctp_ecn_enable == 1) {
3934 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
3935 ecn->ph.param_length = htons(sizeof(*ecn));
3936 SCTP_BUF_LEN(m) += sizeof(*ecn);
3937 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
3938 sizeof(*ecn));
3939 } else {
3940 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
3941 }
3942 /* And now tell the peer we do pr-sctp */
3943 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
3944 prsctp->ph.param_length = htons(sizeof(*prsctp));
3945 SCTP_BUF_LEN(m) += sizeof(*prsctp);
3946
3947 /* And now tell the peer we do all the extensions */
3948 pr_supported = (struct sctp_supported_chunk_types_param *)
3949 ((caddr_t)prsctp + sizeof(*prsctp));
3950 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
3951 num_ext = 0;
3952 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
3953 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
3954 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
3955 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
3956 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
3957 if (!sctp_auth_disable)
3958 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
3959 p_len = sizeof(*pr_supported) + num_ext;
3960 pr_supported->ph.param_length = htons(p_len);
3961 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
3962 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
3963
3964 /* ECN nonce: And now tell the peer we support ECN nonce */
3965 if (sctp_ecn_nonce) {
3966 ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
3967 ((caddr_t)pr_supported + SCTP_SIZE32(p_len));
3968 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
3969 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
3970 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce);
3971 }
3972 /* add authentication parameters */
3973 if (!sctp_auth_disable) {
3974 struct sctp_auth_random *random;
3975 struct sctp_auth_hmac_algo *hmacs;
3976 struct sctp_auth_chunk_list *chunks;
3977
3978 /* attach RANDOM parameter, if available */
3979 if (stcb->asoc.authinfo.random != NULL) {
3980 random = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
3981 p_len = sizeof(*random) + stcb->asoc.authinfo.random_len;
3982#ifdef SCTP_AUTH_DRAFT_04
3983 random->ph.param_type = htons(SCTP_RANDOM);
3984 random->ph.param_length = htons(p_len);
3985 bcopy(stcb->asoc.authinfo.random->key,
3986 random->random_data,
3987 stcb->asoc.authinfo.random_len);
3988#else
3989 /* random key already contains the header */
3990 bcopy(stcb->asoc.authinfo.random->key, random, p_len);
3991#endif
3992 /* zero out any padding required */
3993 bzero((caddr_t)random + p_len, SCTP_SIZE32(p_len) - p_len);
3994 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
3995 }
3996 /* add HMAC_ALGO parameter */
3997 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
3998 p_len = sctp_serialize_hmaclist(stcb->asoc.local_hmacs,
3999 (uint8_t *) hmacs->hmac_ids);
4000 if (p_len > 0) {
4001 p_len += sizeof(*hmacs);
4002 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4003 hmacs->ph.param_length = htons(p_len);
4004 /* zero out any padding required */
4005 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
4006 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4007 }
4008 /* add CHUNKS parameter */
4009 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4010 p_len = sctp_serialize_auth_chunks(stcb->asoc.local_auth_chunks,
4011 chunks->chunk_types);
4012 if (p_len > 0) {
4013 p_len += sizeof(*chunks);
4014 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4015 chunks->ph.param_length = htons(p_len);
4016 /* zero out any padding required */
4017 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
4018 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4019 }
4020 }
4021 m_at = m;
4022 /* now the addresses */
4023 {
4024 struct sctp_scoping scp;
4025
4026 /*
4027 * To optimize this we could put the scoping stuff into a
4028 * structure and remove the individual uint8's from the
4029 * assoc structure. Then we could just pass in the address
4030 * within the stcb.. but for now this is a quick hack to get
4031 * the address stuff teased apart.
4032 */
4033 scp.ipv4_addr_legal = stcb->asoc.ipv4_addr_legal;
4034 scp.ipv6_addr_legal = stcb->asoc.ipv6_addr_legal;
4035 scp.loopback_scope = stcb->asoc.loopback_scope;
4036 scp.ipv4_local_scope = stcb->asoc.ipv4_local_scope;
4037 scp.local_scope = stcb->asoc.local_scope;
4038 scp.site_scope = stcb->asoc.site_scope;
4039
4040 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
4041 }
4042
4043
4044 /* calulate the size and update pkt header and chunk header */
4045 p_len = 0;
4046 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
4047 if (SCTP_BUF_NEXT(m_at) == NULL)
4048 m_last = m_at;
4049 p_len += SCTP_BUF_LEN(m_at);
4050 }
4051 initm->msg.ch.chunk_length = htons((p_len - sizeof(struct sctphdr)));
4052 /*
4053 * We pass 0 here to NOT set IP_DF if its IPv4, we ignore the return
4054 * here since the timer will drive a retranmission.
4055 */
4056
4057 /* I don't expect this to execute but we will be safe here */
4058 padval = p_len % 4;
4059 if ((padval) && (m_last)) {
4060 /*
4061 * The compiler worries that m_last may not be set even
4062 * though I think it is impossible :-> however we add m_last
4063 * here just in case.
4064 */
4065 int ret;
4066
4067 ret = sctp_add_pad_tombuf(m_last, (4 - padval));
4068 if (ret) {
4069 /* Houston we have a problem, no space */
4070 sctp_m_freem(m);
4071 return;
4072 }
4073 p_len += padval;
4074 }
4075 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
4076 (struct sockaddr *)&net->ro._l_addr,
4077 m, 0, NULL, 0, 0, NULL, 0);
4078 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
4079 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
4080 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
4081}
4082
4083struct mbuf *
4084sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
4085 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp)
4086{
4087 /*
4088 * Given a mbuf containing an INIT or INIT-ACK with the param_offset
4089 * being equal to the beginning of the params i.e. (iphlen +
4090 * sizeof(struct sctp_init_msg) parse through the parameters to the
4091 * end of the mbuf verifying that all parameters are known.
4092 *
4093 * For unknown parameters build and return a mbuf with
4094 * UNRECOGNIZED_PARAMETER errors. If the flags indicate to stop
4095 * processing this chunk stop, and set *abort_processing to 1.
4096 *
4097 * By having param_offset be pre-set to where parameters begin it is
4098 * hoped that this routine may be reused in the future by new
4099 * features.
4100 */
4101 struct sctp_paramhdr *phdr, params;
4102
4103 struct mbuf *mat, *op_err;
4104 char tempbuf[SCTP_CHUNK_BUFFER_SIZE];
4105 int at, limit, pad_needed;
4106 uint16_t ptype, plen;
4107 int err_at;
4108
4109 *abort_processing = 0;
4110 mat = in_initpkt;
4111 err_at = 0;
4112 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
4113 at = param_offset;
4114 op_err = NULL;
4115
4116 phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
4117 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
4118 ptype = ntohs(phdr->param_type);
4119 plen = ntohs(phdr->param_length);
4120 limit -= SCTP_SIZE32(plen);
4121 if (plen < sizeof(struct sctp_paramhdr)) {
4122#ifdef SCTP_DEBUG
4123 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
4124 printf("sctp_output.c:Impossible length in parameter < %d\n", plen);
4125 }
4126#endif
4127 *abort_processing = 1;
4128 break;
4129 }
4130 /*
4131 * All parameters for all chunks that we know/understand are
4132 * listed here. We process them other places and make
4133 * appropriate stop actions per the upper bits. However this
4134 * is the generic routine processor's can call to get back
4135 * an operr.. to either incorporate (init-ack) or send.
4136 */
4137 if ((ptype == SCTP_HEARTBEAT_INFO) ||
4138 (ptype == SCTP_IPV4_ADDRESS) ||
4139 (ptype == SCTP_IPV6_ADDRESS) ||
4140 (ptype == SCTP_STATE_COOKIE) ||
4141 (ptype == SCTP_UNRECOG_PARAM) ||
4142 (ptype == SCTP_COOKIE_PRESERVE) ||
4143 (ptype == SCTP_SUPPORTED_ADDRTYPE) ||
4144 (ptype == SCTP_PRSCTP_SUPPORTED) ||
4145 (ptype == SCTP_ADD_IP_ADDRESS) ||
4146 (ptype == SCTP_DEL_IP_ADDRESS) ||
4147 (ptype == SCTP_ECN_CAPABLE) ||
4148 (ptype == SCTP_ULP_ADAPTATION) ||
4149 (ptype == SCTP_ERROR_CAUSE_IND) ||
4150 (ptype == SCTP_RANDOM) ||
4151 (ptype == SCTP_CHUNK_LIST) ||
4152 (ptype == SCTP_CHUNK_LIST) ||
4153 (ptype == SCTP_SET_PRIM_ADDR) ||
4154 (ptype == SCTP_SUCCESS_REPORT) ||
4155 (ptype == SCTP_ULP_ADAPTATION) ||
4156 (ptype == SCTP_SUPPORTED_CHUNK_EXT) ||
4157 (ptype == SCTP_ECN_NONCE_SUPPORTED)
4158 ) {
4159 /* no skip it */
4160 at += SCTP_SIZE32(plen);
4161 } else if (ptype == SCTP_HOSTNAME_ADDRESS) {
4162 /* We can NOT handle HOST NAME addresses!! */
4163 int l_len;
4164
4165#ifdef SCTP_DEBUG
4166 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
4167 printf("Can't handle hostname addresses.. abort processing\n");
4168 }
4169#endif
4170 *abort_processing = 1;
4171 if (op_err == NULL) {
4172 /* Ok need to try to get a mbuf */
4173 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4174 l_len += plen;
4175 l_len += sizeof(struct sctp_paramhdr);
4176 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
4177 if (op_err) {
4178 SCTP_BUF_LEN(op_err) = 0;
4179 /*
4180 * pre-reserve space for ip and sctp
4181 * header and chunk hdr
4182 */
4183 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
4184 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
4185 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
4186 }
4187 }
4188 if (op_err) {
4189 /* If we have space */
4190 struct sctp_paramhdr s;
4191
4192 if (err_at % 4) {
4193 uint32_t cpthis = 0;
4194
4195 pad_needed = 4 - (err_at % 4);
4196 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
4197 err_at += pad_needed;
4198 }
4199 s.param_type = htons(SCTP_CAUSE_UNRESOLVABLE_ADDR);
4200 s.param_length = htons(sizeof(s) + plen);
4201 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
4202 err_at += sizeof(s);
4203 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen);
4204 if (phdr == NULL) {
4205 sctp_m_freem(op_err);
4206 /*
4207 * we are out of memory but we still
4208 * need to have a look at what to do
4209 * (the system is in trouble
4210 * though).
4211 */
4212 return (NULL);
4213 }
4214 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
4215 err_at += plen;
4216 }
4217 return (op_err);
4218 } else {
4219 /*
4220 * we do not recognize the parameter figure out what
4221 * we do.
4222 */
4223 if ((ptype & 0x4000) == 0x4000) {
4224 /* Report bit is set?? */
4225 if (op_err == NULL) {
4226 int l_len;
4227
4228 /* Ok need to try to get an mbuf */
4229 l_len = sizeof(struct ip6_hdr) + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
4230 l_len += plen;
4231 l_len += sizeof(struct sctp_paramhdr);
4232 op_err = sctp_get_mbuf_for_msg(l_len, 0, M_DONTWAIT, 1, MT_DATA);
4233 if (op_err) {
4234 SCTP_BUF_LEN(op_err) = 0;
4235 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr));
4236 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr));
4237 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
4238 }
4239 }
4240 if (op_err) {
4241 /* If we have space */
4242 struct sctp_paramhdr s;
4243
4244 if (err_at % 4) {
4245 uint32_t cpthis = 0;
4246
4247 pad_needed = 4 - (err_at % 4);
4248 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
4249 err_at += pad_needed;
4250 }
4251 s.param_type = htons(SCTP_UNRECOG_PARAM);
4252 s.param_length = htons(sizeof(s) + plen);
4253 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
4254 err_at += sizeof(s);
4255 if (plen > sizeof(tempbuf)) {
4256 plen = sizeof(tempbuf);
4257 }
4258 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen);
4259 if (phdr == NULL) {
4260 sctp_m_freem(op_err);
4261 /*
4262 * we are out of memory but
4263 * we still need to have a
4264 * look at what to do (the
4265 * system is in trouble
4266 * though).
4267 */
4268 goto more_processing;
4269 }
4270 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
4271 err_at += plen;
4272 }
4273 }
4274 more_processing:
4275 if ((ptype & 0x8000) == 0x0000) {
4276 return (op_err);
4277 } else {
4278 /* skip this chunk and continue processing */
4279 at += SCTP_SIZE32(plen);
4280 }
4281
4282 }
4283 phdr = sctp_get_next_param(mat, at, &params, sizeof(params));
4284 }
4285 return (op_err);
4286}
4287
4288static int
4289sctp_are_there_new_addresses(struct sctp_association *asoc,
4290 struct mbuf *in_initpkt, int iphlen, int offset)
4291{
4292 /*
4293 * Given a INIT packet, look through the packet to verify that there
4294 * are NO new addresses. As we go through the parameters add reports
4295 * of any un-understood parameters that require an error. Also we
4296 * must return (1) to drop the packet if we see a un-understood
4297 * parameter that tells us to drop the chunk.
4298 */
4299 struct sockaddr_in sin4, *sa4;
4300 struct sockaddr_in6 sin6, *sa6;
4301 struct sockaddr *sa_touse;
4302 struct sockaddr *sa;
4303 struct sctp_paramhdr *phdr, params;
4304 struct ip *iph;
4305 struct mbuf *mat;
4306 uint16_t ptype, plen;
4307 int err_at;
4308 uint8_t fnd;
4309 struct sctp_nets *net;
4310
4311 memset(&sin4, 0, sizeof(sin4));
4312 memset(&sin6, 0, sizeof(sin6));
4313 sin4.sin_family = AF_INET;
4314 sin4.sin_len = sizeof(sin4);
4315 sin6.sin6_family = AF_INET6;
4316 sin6.sin6_len = sizeof(sin6);
4317
4318 sa_touse = NULL;
4319 /* First what about the src address of the pkt ? */
4320 iph = mtod(in_initpkt, struct ip *);
4321 if (iph->ip_v == IPVERSION) {
4322 /* source addr is IPv4 */
4323 sin4.sin_addr = iph->ip_src;
4324 sa_touse = (struct sockaddr *)&sin4;
4325 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
4326 /* source addr is IPv6 */
4327 struct ip6_hdr *ip6h;
4328
4329 ip6h = mtod(in_initpkt, struct ip6_hdr *);
4330 sin6.sin6_addr = ip6h->ip6_src;
4331 sa_touse = (struct sockaddr *)&sin6;
4332 } else {
4333 return (1);
4334 }
4335
4336 fnd = 0;
4337 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4338 sa = (struct sockaddr *)&net->ro._l_addr;
4339 if (sa->sa_family == sa_touse->sa_family) {
4340 if (sa->sa_family == AF_INET) {
4341 sa4 = (struct sockaddr_in *)sa;
4342 if (sa4->sin_addr.s_addr ==
4343 sin4.sin_addr.s_addr) {
4344 fnd = 1;
4345 break;
4346 }
4347 } else if (sa->sa_family == AF_INET6) {
4348 sa6 = (struct sockaddr_in6 *)sa;
4349 if (SCTP6_ARE_ADDR_EQUAL(&sa6->sin6_addr,
4350 &sin6.sin6_addr)) {
4351 fnd = 1;
4352 break;
4353 }
4354 }
4355 }
4356 }
4357 if (fnd == 0) {
4358 /* New address added! no need to look futher. */
4359 return (1);
4360 }
4361 /* Ok so far lets munge through the rest of the packet */
4362 mat = in_initpkt;
4363 err_at = 0;
4364 sa_touse = NULL;
4365 offset += sizeof(struct sctp_init_chunk);
4366 phdr = sctp_get_next_param(mat, offset, &params, sizeof(params));
4367 while (phdr) {
4368 ptype = ntohs(phdr->param_type);
4369 plen = ntohs(phdr->param_length);
4370 if (ptype == SCTP_IPV4_ADDRESS) {
4371 struct sctp_ipv4addr_param *p4, p4_buf;
4372
4373 phdr = sctp_get_next_param(mat, offset,
4374 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
4375 if (plen != sizeof(struct sctp_ipv4addr_param) ||
4376 phdr == NULL) {
4377 return (1);
4378 }
4379 p4 = (struct sctp_ipv4addr_param *)phdr;
4380 sin4.sin_addr.s_addr = p4->addr;
4381 sa_touse = (struct sockaddr *)&sin4;
4382 } else if (ptype == SCTP_IPV6_ADDRESS) {
4383 struct sctp_ipv6addr_param *p6, p6_buf;
4384
4385 phdr = sctp_get_next_param(mat, offset,
4386 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
4387 if (plen != sizeof(struct sctp_ipv6addr_param) ||
4388 phdr == NULL) {
4389 return (1);
4390 }
4391 p6 = (struct sctp_ipv6addr_param *)phdr;
4392 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
4393 sizeof(p6->addr));
4394 sa_touse = (struct sockaddr *)&sin4;
4395 }
4396 if (sa_touse) {
4397 /* ok, sa_touse points to one to check */
4398 fnd = 0;
4399 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4400 sa = (struct sockaddr *)&net->ro._l_addr;
4401 if (sa->sa_family != sa_touse->sa_family) {
4402 continue;
4403 }
4404 if (sa->sa_family == AF_INET) {
4405 sa4 = (struct sockaddr_in *)sa;
4406 if (sa4->sin_addr.s_addr ==
4407 sin4.sin_addr.s_addr) {
4408 fnd = 1;
4409 break;
4410 }
4411 } else if (sa->sa_family == AF_INET6) {
4412 sa6 = (struct sockaddr_in6 *)sa;
4413 if (SCTP6_ARE_ADDR_EQUAL(
4414 &sa6->sin6_addr, &sin6.sin6_addr)) {
4415 fnd = 1;
4416 break;
4417 }
4418 }
4419 }
4420 if (!fnd) {
4421 /* New addr added! no need to look further */
4422 return (1);
4423 }
4424 }
4425 offset += SCTP_SIZE32(plen);
4426 phdr = sctp_get_next_param(mat, offset, &params, sizeof(params));
4427 }
4428 return (0);
4429}
4430
4431/*
4432 * Given a MBUF chain that was sent into us containing an INIT. Build a
4433 * INIT-ACK with COOKIE and send back. We assume that the in_initpkt has done
4434 * a pullup to include IPv6/4header, SCTP header and initial part of INIT
4435 * message (i.e. the struct sctp_init_msg).
4436 */
4437void
4438sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
4439 struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh,
4440 struct sctp_init_chunk *init_chk)
4441{
4442 struct sctp_association *asoc;
4443 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *m_last;
4444 struct sctp_init_msg *initackm_out;
4445 struct sctp_ecn_supported_param *ecn;
4446 struct sctp_prsctp_supported_param *prsctp;
4447 struct sctp_ecn_nonce_supported_param *ecn_nonce;
4448 struct sctp_supported_chunk_types_param *pr_supported;
4449 struct sockaddr_storage store;
4450 struct sockaddr_in *sin;
4451 struct sockaddr_in6 *sin6;
4452 struct route *ro;
4453 struct ip *iph;
4454 struct ip6_hdr *ip6;
4455 struct sockaddr *to;
4456 struct sctp_state_cookie stc;
4457 struct sctp_nets *net = NULL;
4458 int cnt_inits_to = 0;
4459 uint16_t his_limit, i_want;
4460 int abort_flag, padval, sz_of;
4461 int num_ext;
4462 int p_len;
4463 uint32_t vrf_id;
3267
4464
4465 vrf_id = SCTP_DEFAULT_VRFID;
3268 if (stcb) {
3269 asoc = &stcb->asoc;
3270 } else {
3271 asoc = NULL;
3272 }
3273 m_last = NULL;
3274 if ((asoc != NULL) &&
3275 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
3276 (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) {
3277 /* new addresses, out of here in non-cookie-wait states */
3278 /*
3279 * Send a ABORT, we don't add the new address error clause
3280 * though we even set the T bit and copy in the 0 tag.. this
3281 * looks no different than if no listener was present.
3282 */
3283 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL);
3284 return;
3285 }
3286 abort_flag = 0;
3287 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
3288 (offset + sizeof(struct sctp_init_chunk)),
3289 &abort_flag, (struct sctp_chunkhdr *)init_chk);
3290 if (abort_flag) {
3291 sctp_send_abort(init_pkt, iphlen, sh, init_chk->init.initiate_tag, op_err);
3292 return;
3293 }
3294 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3295 if (m == NULL) {
3296 /* No memory, INIT timer will re-attempt. */
3297 if (op_err)
3298 sctp_m_freem(op_err);
3299 return;
3300 }
3301 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_msg);
3302
3303 /* the time I built cookie */
3304 SCTP_GETTIME_TIMEVAL(&stc.time_entered);
3305
3306 /* populate any tie tags */
3307 if (asoc != NULL) {
3308 /* unlock before tag selections */
3309 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
3310 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
3311 stc.cookie_life = asoc->cookie_life;
3312 net = asoc->primary_destination;
3313 } else {
3314 stc.tie_tag_my_vtag = 0;
3315 stc.tie_tag_peer_vtag = 0;
3316 /* life I will award this cookie */
3317 stc.cookie_life = inp->sctp_ep.def_cookie_life;
3318 }
3319
3320 /* copy in the ports for later check */
3321 stc.myport = sh->dest_port;
3322 stc.peerport = sh->src_port;
3323
3324 /*
3325 * If we wanted to honor cookie life extentions, we would add to
3326 * stc.cookie_life. For now we should NOT honor any extension
3327 */
3328 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
3329 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3330 struct inpcb *in_inp;
3331
3332 /* Its a V6 socket */
3333 in_inp = (struct inpcb *)inp;
3334 stc.ipv6_addr_legal = 1;
3335 /* Now look at the binding flag to see if V4 will be legal */
3336 if (SCTP_IPV6_V6ONLY(in_inp) == 0) {
3337 stc.ipv4_addr_legal = 1;
3338 } else {
3339 /* V4 addresses are NOT legal on the association */
3340 stc.ipv4_addr_legal = 0;
3341 }
3342 } else {
3343 /* Its a V4 socket, no - V6 */
3344 stc.ipv4_addr_legal = 1;
3345 stc.ipv6_addr_legal = 0;
3346 }
3347
3348#ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
3349 stc.ipv4_scope = 1;
3350#else
3351 stc.ipv4_scope = 0;
3352#endif
3353 /* now for scope setup */
3354 memset((caddr_t)&store, 0, sizeof(store));
3355 sin = (struct sockaddr_in *)&store;
3356 sin6 = (struct sockaddr_in6 *)&store;
3357 if (net == NULL) {
3358 to = (struct sockaddr *)&store;
3359 iph = mtod(init_pkt, struct ip *);
3360 if (iph->ip_v == IPVERSION) {
4466 if (stcb) {
4467 asoc = &stcb->asoc;
4468 } else {
4469 asoc = NULL;
4470 }
4471 m_last = NULL;
4472 if ((asoc != NULL) &&
4473 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
4474 (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) {
4475 /* new addresses, out of here in non-cookie-wait states */
4476 /*
4477 * Send a ABORT, we don't add the new address error clause
4478 * though we even set the T bit and copy in the 0 tag.. this
4479 * looks no different than if no listener was present.
4480 */
4481 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL);
4482 return;
4483 }
4484 abort_flag = 0;
4485 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
4486 (offset + sizeof(struct sctp_init_chunk)),
4487 &abort_flag, (struct sctp_chunkhdr *)init_chk);
4488 if (abort_flag) {
4489 sctp_send_abort(init_pkt, iphlen, sh, init_chk->init.initiate_tag, op_err);
4490 return;
4491 }
4492 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
4493 if (m == NULL) {
4494 /* No memory, INIT timer will re-attempt. */
4495 if (op_err)
4496 sctp_m_freem(op_err);
4497 return;
4498 }
4499 SCTP_BUF_LEN(m) = sizeof(struct sctp_init_msg);
4500
4501 /* the time I built cookie */
4502 SCTP_GETTIME_TIMEVAL(&stc.time_entered);
4503
4504 /* populate any tie tags */
4505 if (asoc != NULL) {
4506 /* unlock before tag selections */
4507 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
4508 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
4509 stc.cookie_life = asoc->cookie_life;
4510 net = asoc->primary_destination;
4511 } else {
4512 stc.tie_tag_my_vtag = 0;
4513 stc.tie_tag_peer_vtag = 0;
4514 /* life I will award this cookie */
4515 stc.cookie_life = inp->sctp_ep.def_cookie_life;
4516 }
4517
4518 /* copy in the ports for later check */
4519 stc.myport = sh->dest_port;
4520 stc.peerport = sh->src_port;
4521
4522 /*
4523 * If we wanted to honor cookie life extentions, we would add to
4524 * stc.cookie_life. For now we should NOT honor any extension
4525 */
4526 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
4527 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
4528 struct inpcb *in_inp;
4529
4530 /* Its a V6 socket */
4531 in_inp = (struct inpcb *)inp;
4532 stc.ipv6_addr_legal = 1;
4533 /* Now look at the binding flag to see if V4 will be legal */
4534 if (SCTP_IPV6_V6ONLY(in_inp) == 0) {
4535 stc.ipv4_addr_legal = 1;
4536 } else {
4537 /* V4 addresses are NOT legal on the association */
4538 stc.ipv4_addr_legal = 0;
4539 }
4540 } else {
4541 /* Its a V4 socket, no - V6 */
4542 stc.ipv4_addr_legal = 1;
4543 stc.ipv6_addr_legal = 0;
4544 }
4545
4546#ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
4547 stc.ipv4_scope = 1;
4548#else
4549 stc.ipv4_scope = 0;
4550#endif
4551 /* now for scope setup */
4552 memset((caddr_t)&store, 0, sizeof(store));
4553 sin = (struct sockaddr_in *)&store;
4554 sin6 = (struct sockaddr_in6 *)&store;
4555 if (net == NULL) {
4556 to = (struct sockaddr *)&store;
4557 iph = mtod(init_pkt, struct ip *);
4558 if (iph->ip_v == IPVERSION) {
3361 struct in_addr addr;
4559 struct sctp_ifa *addr;
3362 struct route iproute;
3363
3364 sin->sin_family = AF_INET;
3365 sin->sin_len = sizeof(struct sockaddr_in);
3366 sin->sin_port = sh->src_port;
3367 sin->sin_addr = iph->ip_src;
3368 /* lookup address */
3369 stc.address[0] = sin->sin_addr.s_addr;
3370 stc.address[1] = 0;
3371 stc.address[2] = 0;
3372 stc.address[3] = 0;
3373 stc.addr_type = SCTP_IPV4_ADDRESS;
3374 /* local from address */
3375 memset(&iproute, 0, sizeof(iproute));
3376 ro = &iproute;
3377 memcpy(&ro->ro_dst, sin, sizeof(*sin));
4560 struct route iproute;
4561
4562 sin->sin_family = AF_INET;
4563 sin->sin_len = sizeof(struct sockaddr_in);
4564 sin->sin_port = sh->src_port;
4565 sin->sin_addr = iph->ip_src;
4566 /* lookup address */
4567 stc.address[0] = sin->sin_addr.s_addr;
4568 stc.address[1] = 0;
4569 stc.address[2] = 0;
4570 stc.address[3] = 0;
4571 stc.addr_type = SCTP_IPV4_ADDRESS;
4572 /* local from address */
4573 memset(&iproute, 0, sizeof(iproute));
4574 ro = &iproute;
4575 memcpy(&ro->ro_dst, sin, sizeof(*sin));
3378 addr = sctp_ipv4_source_address_selection(inp, NULL,
3379 ro, NULL, 0);
4576 addr = sctp_source_address_selection(inp, NULL,
4577 ro, NULL, 0, vrf_id);
4578 if (addr == NULL)
4579 return;
4580
3380 if (ro->ro_rt) {
3381 RTFREE(ro->ro_rt);
4581 if (ro->ro_rt) {
4582 RTFREE(ro->ro_rt);
4583 ro->ro_rt = NULL;
3382 }
4584 }
3383 stc.laddress[0] = addr.s_addr;
4585 stc.laddress[0] = addr->address.sin.sin_addr.s_addr;
3384 stc.laddress[1] = 0;
3385 stc.laddress[2] = 0;
3386 stc.laddress[3] = 0;
3387 stc.laddr_type = SCTP_IPV4_ADDRESS;
3388 /* scope_id is only for v6 */
3389 stc.scope_id = 0;
3390#ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
3391 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
3392 stc.ipv4_scope = 1;
3393 }
3394#else
3395 stc.ipv4_scope = 1;
3396#endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
3397 /* Must use the address in this case */
4586 stc.laddress[1] = 0;
4587 stc.laddress[2] = 0;
4588 stc.laddress[3] = 0;
4589 stc.laddr_type = SCTP_IPV4_ADDRESS;
4590 /* scope_id is only for v6 */
4591 stc.scope_id = 0;
4592#ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
4593 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
4594 stc.ipv4_scope = 1;
4595 }
4596#else
4597 stc.ipv4_scope = 1;
4598#endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
4599 /* Must use the address in this case */
3398 if (sctp_is_address_on_local_host((struct sockaddr *)sin)) {
4600 if (sctp_is_address_on_local_host((struct sockaddr *)sin, vrf_id)) {
3399 stc.loopback_scope = 1;
3400 stc.ipv4_scope = 1;
3401 stc.site_scope = 1;
4601 stc.loopback_scope = 1;
4602 stc.ipv4_scope = 1;
4603 stc.site_scope = 1;
3402 stc.local_scope = 1;
4604 stc.local_scope = 0;
3403 }
3404 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
4605 }
4606 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
3405 struct in6_addr addr;
4607 struct sctp_ifa *addr;
3406
3407 struct route_in6 iproute6;
3408
3409 ip6 = mtod(init_pkt, struct ip6_hdr *);
3410 sin6->sin6_family = AF_INET6;
3411 sin6->sin6_len = sizeof(struct sockaddr_in6);
3412 sin6->sin6_port = sh->src_port;
3413 sin6->sin6_addr = ip6->ip6_src;
3414 /* lookup address */
3415 memcpy(&stc.address, &sin6->sin6_addr,
3416 sizeof(struct in6_addr));
3417 sin6->sin6_scope_id = 0;
3418 stc.addr_type = SCTP_IPV6_ADDRESS;
3419 stc.scope_id = 0;
4608
4609 struct route_in6 iproute6;
4610
4611 ip6 = mtod(init_pkt, struct ip6_hdr *);
4612 sin6->sin6_family = AF_INET6;
4613 sin6->sin6_len = sizeof(struct sockaddr_in6);
4614 sin6->sin6_port = sh->src_port;
4615 sin6->sin6_addr = ip6->ip6_src;
4616 /* lookup address */
4617 memcpy(&stc.address, &sin6->sin6_addr,
4618 sizeof(struct in6_addr));
4619 sin6->sin6_scope_id = 0;
4620 stc.addr_type = SCTP_IPV6_ADDRESS;
4621 stc.scope_id = 0;
3420 if (sctp_is_address_on_local_host((struct sockaddr *)sin6)) {
4622 if (sctp_is_address_on_local_host((struct sockaddr *)sin6, vrf_id)) {
3421 stc.loopback_scope = 1;
4623 stc.loopback_scope = 1;
3422 stc.local_scope = 1;
4624 stc.local_scope = 0;
3423 stc.site_scope = 1;
3424 stc.ipv4_scope = 1;
3425 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
3426 /*
3427 * If the new destination is a LINK_LOCAL we
3428 * must have common both site and local
3429 * scope. Don't set local scope though since
3430 * we must depend on the source to be added
3431 * implicitly. We cannot assure just because
3432 * we share one link that all links are
3433 * common.
3434 */
3435 stc.local_scope = 0;
3436 stc.site_scope = 1;
3437 stc.ipv4_scope = 1;
3438 /*
3439 * we start counting for the private address
3440 * stuff at 1. since the link local we
3441 * source from won't show up in our scoped
3442 * count.
3443 */
3444 cnt_inits_to = 1;
3445 /* pull out the scope_id from incoming pkt */
3446 /* FIX ME: does this have scope from rcvif? */
3447 (void)sa6_recoverscope(sin6);
3448
3449 sa6_embedscope(sin6, ip6_use_defzone);
3450 stc.scope_id = sin6->sin6_scope_id;
3451 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
3452 /*
3453 * If the new destination is SITE_LOCAL then
3454 * we must have site scope in common.
3455 */
3456 stc.site_scope = 1;
3457 }
3458 /* local from address */
3459 memset(&iproute6, 0, sizeof(iproute6));
3460 ro = (struct route *)&iproute6;
3461 memcpy(&ro->ro_dst, sin6, sizeof(*sin6));
4625 stc.site_scope = 1;
4626 stc.ipv4_scope = 1;
4627 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
4628 /*
4629 * If the new destination is a LINK_LOCAL we
4630 * must have common both site and local
4631 * scope. Don't set local scope though since
4632 * we must depend on the source to be added
4633 * implicitly. We cannot assure just because
4634 * we share one link that all links are
4635 * common.
4636 */
4637 stc.local_scope = 0;
4638 stc.site_scope = 1;
4639 stc.ipv4_scope = 1;
4640 /*
4641 * we start counting for the private address
4642 * stuff at 1. since the link local we
4643 * source from won't show up in our scoped
4644 * count.
4645 */
4646 cnt_inits_to = 1;
4647 /* pull out the scope_id from incoming pkt */
4648 /* FIX ME: does this have scope from rcvif? */
4649 (void)sa6_recoverscope(sin6);
4650
4651 sa6_embedscope(sin6, ip6_use_defzone);
4652 stc.scope_id = sin6->sin6_scope_id;
4653 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
4654 /*
4655 * If the new destination is SITE_LOCAL then
4656 * we must have site scope in common.
4657 */
4658 stc.site_scope = 1;
4659 }
4660 /* local from address */
4661 memset(&iproute6, 0, sizeof(iproute6));
4662 ro = (struct route *)&iproute6;
4663 memcpy(&ro->ro_dst, sin6, sizeof(*sin6));
3462 addr = sctp_ipv6_source_address_selection(inp, NULL,
3463 ro, NULL, 0);
4664 addr = sctp_source_address_selection(inp, NULL,
4665 ro, NULL, 0, vrf_id);
4666 if (addr == NULL)
4667 return;
4668
3464 if (ro->ro_rt) {
3465 RTFREE(ro->ro_rt);
4669 if (ro->ro_rt) {
4670 RTFREE(ro->ro_rt);
4671 ro->ro_rt = NULL;
3466 }
4672 }
3467 memcpy(&stc.laddress, &addr, sizeof(struct in6_addr));
4673 memcpy(&stc.laddress, &addr->address.sin6.sin6_addr, sizeof(struct in6_addr));
3468 stc.laddr_type = SCTP_IPV6_ADDRESS;
3469 }
3470 } else {
3471 /* set the scope per the existing tcb */
3472 struct sctp_nets *lnet;
3473
3474 stc.loopback_scope = asoc->loopback_scope;
3475 stc.ipv4_scope = asoc->ipv4_local_scope;
3476 stc.site_scope = asoc->site_scope;
3477 stc.local_scope = asoc->local_scope;
3478 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
3479 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
3480 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
3481 /*
3482 * if we have a LL address, start
3483 * counting at 1.
3484 */
3485 cnt_inits_to = 1;
3486 }
3487 }
3488 }
3489
3490 /* use the net pointer */
3491 to = (struct sockaddr *)&net->ro._l_addr;
3492 if (to->sa_family == AF_INET) {
3493 sin = (struct sockaddr_in *)to;
3494 stc.address[0] = sin->sin_addr.s_addr;
3495 stc.address[1] = 0;
3496 stc.address[2] = 0;
3497 stc.address[3] = 0;
3498 stc.addr_type = SCTP_IPV4_ADDRESS;
3499 if (net->src_addr_selected == 0) {
3500 /*
3501 * strange case here, the INIT should have
3502 * did the selection.
3503 */
4674 stc.laddr_type = SCTP_IPV6_ADDRESS;
4675 }
4676 } else {
4677 /* set the scope per the existing tcb */
4678 struct sctp_nets *lnet;
4679
4680 stc.loopback_scope = asoc->loopback_scope;
4681 stc.ipv4_scope = asoc->ipv4_local_scope;
4682 stc.site_scope = asoc->site_scope;
4683 stc.local_scope = asoc->local_scope;
4684 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
4685 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
4686 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
4687 /*
4688 * if we have a LL address, start
4689 * counting at 1.
4690 */
4691 cnt_inits_to = 1;
4692 }
4693 }
4694 }
4695
4696 /* use the net pointer */
4697 to = (struct sockaddr *)&net->ro._l_addr;
4698 if (to->sa_family == AF_INET) {
4699 sin = (struct sockaddr_in *)to;
4700 stc.address[0] = sin->sin_addr.s_addr;
4701 stc.address[1] = 0;
4702 stc.address[2] = 0;
4703 stc.address[3] = 0;
4704 stc.addr_type = SCTP_IPV4_ADDRESS;
4705 if (net->src_addr_selected == 0) {
4706 /*
4707 * strange case here, the INIT should have
4708 * did the selection.
4709 */
3504 net->ro._s_addr.sin.sin_addr =
3505 sctp_ipv4_source_address_selection(inp,
3506 stcb, (struct route *)&net->ro, net, 0);
4710 net->ro._s_addr = sctp_source_address_selection(inp,
4711 stcb, (struct route *)&net->ro,
4712 net, 0, vrf_id);
4713 if (net->ro._s_addr == NULL)
4714 return;
4715
3507 net->src_addr_selected = 1;
3508
3509 }
4716 net->src_addr_selected = 1;
4717
4718 }
3510 stc.laddress[0] = net->ro._s_addr.sin.sin_addr.s_addr;
4719 stc.laddress[0] = net->ro._s_addr->address.sin.sin_addr.s_addr;
3511 stc.laddress[1] = 0;
3512 stc.laddress[2] = 0;
3513 stc.laddress[3] = 0;
3514 stc.laddr_type = SCTP_IPV4_ADDRESS;
3515 } else if (to->sa_family == AF_INET6) {
3516 sin6 = (struct sockaddr_in6 *)to;
3517 memcpy(&stc.address, &sin6->sin6_addr,
3518 sizeof(struct in6_addr));
3519 stc.addr_type = SCTP_IPV6_ADDRESS;
3520 if (net->src_addr_selected == 0) {
3521 /*
3522 * strange case here, the INIT should have
3523 * did the selection.
3524 */
4720 stc.laddress[1] = 0;
4721 stc.laddress[2] = 0;
4722 stc.laddress[3] = 0;
4723 stc.laddr_type = SCTP_IPV4_ADDRESS;
4724 } else if (to->sa_family == AF_INET6) {
4725 sin6 = (struct sockaddr_in6 *)to;
4726 memcpy(&stc.address, &sin6->sin6_addr,
4727 sizeof(struct in6_addr));
4728 stc.addr_type = SCTP_IPV6_ADDRESS;
4729 if (net->src_addr_selected == 0) {
4730 /*
4731 * strange case here, the INIT should have
4732 * did the selection.
4733 */
3525 net->ro._s_addr.sin6.sin6_addr =
3526 sctp_ipv6_source_address_selection(inp,
3527 stcb, (struct route *)&net->ro, net, 0);
4734 net->ro._s_addr = sctp_source_address_selection(inp,
4735 stcb, (struct route *)&net->ro,
4736 net, 0, vrf_id);
4737 if (net->ro._s_addr == NULL)
4738 return;
4739
3528 net->src_addr_selected = 1;
3529 }
4740 net->src_addr_selected = 1;
4741 }
3530 memcpy(&stc.laddress, &net->ro._l_addr.sin6.sin6_addr,
4742 memcpy(&stc.laddress, &net->ro._s_addr->address.sin6.sin6_addr,
3531 sizeof(struct in6_addr));
3532 stc.laddr_type = SCTP_IPV6_ADDRESS;
3533 }
3534 }
3535 /* Now lets put the SCTP header in place */
3536 initackm_out = mtod(m, struct sctp_init_msg *);
3537 initackm_out->sh.src_port = inp->sctp_lport;
3538 initackm_out->sh.dest_port = sh->src_port;
3539 initackm_out->sh.v_tag = init_chk->init.initiate_tag;
3540 /* Save it off for quick ref */
3541 stc.peers_vtag = init_chk->init.initiate_tag;
3542 initackm_out->sh.checksum = 0; /* calculate later */
3543 /* who are we */
3544 memcpy(stc.identification, SCTP_VERSION_STRING,
3545 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
3546 /* now the chunk header */
3547 initackm_out->msg.ch.chunk_type = SCTP_INITIATION_ACK;
3548 initackm_out->msg.ch.chunk_flags = 0;
3549 /* fill in later from mbuf we build */
3550 initackm_out->msg.ch.chunk_length = 0;
3551 /* place in my tag */
3552 if ((asoc != NULL) &&
3553 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
3554 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
3555 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
3556 /* re-use the v-tags and init-seq here */
3557 initackm_out->msg.init.initiate_tag = htonl(asoc->my_vtag);
3558 initackm_out->msg.init.initial_tsn = htonl(asoc->init_seq_number);
3559 } else {
3560 uint32_t vtag;
3561
3562 if (asoc) {
3563 atomic_add_int(&asoc->refcnt, 1);
3564 SCTP_TCB_UNLOCK(stcb);
3565 vtag = sctp_select_a_tag(inp);
3566 initackm_out->msg.init.initiate_tag = htonl(vtag);
3567 /* get a TSN to use too */
3568 initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
3569 SCTP_TCB_LOCK(stcb);
3570 atomic_add_int(&asoc->refcnt, -1);
3571 } else {
3572 vtag = sctp_select_a_tag(inp);
3573 initackm_out->msg.init.initiate_tag = htonl(vtag);
3574 /* get a TSN to use too */
3575 initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
3576 }
3577 }
3578 /* save away my tag to */
3579 stc.my_vtag = initackm_out->msg.init.initiate_tag;
3580
3581 /* set up some of the credits. */
3582 initackm_out->msg.init.a_rwnd = htonl(max(inp->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND));
3583 /* set what I want */
3584 his_limit = ntohs(init_chk->init.num_inbound_streams);
3585 /* choose what I want */
3586 if (asoc != NULL) {
3587 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
3588 i_want = asoc->streamoutcnt;
3589 } else {
3590 i_want = inp->sctp_ep.pre_open_stream_count;
3591 }
3592 } else {
3593 i_want = inp->sctp_ep.pre_open_stream_count;
3594 }
3595 if (his_limit < i_want) {
3596 /* I Want more :< */
3597 initackm_out->msg.init.num_outbound_streams = init_chk->init.num_inbound_streams;
3598 } else {
3599 /* I can have what I want :> */
3600 initackm_out->msg.init.num_outbound_streams = htons(i_want);
3601 }
3602 /* tell him his limt. */
3603 initackm_out->msg.init.num_inbound_streams =
3604 htons(inp->sctp_ep.max_open_streams_intome);
3605 /* setup the ECN pointer */
3606
3607 if (inp->sctp_ep.adaptation_layer_indicator) {
3608 struct sctp_adaptation_layer_indication *ali;
3609
3610 ali = (struct sctp_adaptation_layer_indication *)(
3611 (caddr_t)initackm_out + sizeof(*initackm_out));
3612 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
3613 ali->ph.param_length = htons(sizeof(*ali));
3614 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
3615 SCTP_BUF_LEN(m) += sizeof(*ali);
3616 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali +
3617 sizeof(*ali));
3618 } else {
3619 ecn = (struct sctp_ecn_supported_param *)(
3620 (caddr_t)initackm_out + sizeof(*initackm_out));
3621 }
3622
3623 /* ECN parameter */
3624 if (sctp_ecn_enable == 1) {
3625 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
3626 ecn->ph.param_length = htons(sizeof(*ecn));
3627 SCTP_BUF_LEN(m) += sizeof(*ecn);
3628
3629 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
3630 sizeof(*ecn));
3631 } else {
3632 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
3633 }
3634 /* And now tell the peer we do pr-sctp */
3635 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
3636 prsctp->ph.param_length = htons(sizeof(*prsctp));
3637 SCTP_BUF_LEN(m) += sizeof(*prsctp);
3638
3639 /* And now tell the peer we do all the extensions */
3640 pr_supported = (struct sctp_supported_chunk_types_param *)
3641 ((caddr_t)prsctp + sizeof(*prsctp));
3642
3643 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
3644 num_ext = 0;
3645 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
3646 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
3647 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
3648 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
3649 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
3650 if (!sctp_auth_disable)
3651 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
3652 p_len = sizeof(*pr_supported) + num_ext;
3653 pr_supported->ph.param_length = htons(p_len);
3654 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
3655 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
3656
3657 /* ECN nonce: And now tell the peer we support ECN nonce */
3658 if (sctp_ecn_nonce) {
3659 ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
3660 ((caddr_t)pr_supported + SCTP_SIZE32(p_len));
3661 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
3662 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
3663 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce);
3664 }
3665 /* add authentication parameters */
3666 if (!sctp_auth_disable) {
3667 struct sctp_auth_random *random;
3668 struct sctp_auth_hmac_algo *hmacs;
3669 struct sctp_auth_chunk_list *chunks;
3670 uint16_t random_len;
3671
3672 /* generate and add RANDOM parameter */
4743 sizeof(struct in6_addr));
4744 stc.laddr_type = SCTP_IPV6_ADDRESS;
4745 }
4746 }
4747 /* Now lets put the SCTP header in place */
4748 initackm_out = mtod(m, struct sctp_init_msg *);
4749 initackm_out->sh.src_port = inp->sctp_lport;
4750 initackm_out->sh.dest_port = sh->src_port;
4751 initackm_out->sh.v_tag = init_chk->init.initiate_tag;
4752 /* Save it off for quick ref */
4753 stc.peers_vtag = init_chk->init.initiate_tag;
4754 initackm_out->sh.checksum = 0; /* calculate later */
4755 /* who are we */
4756 memcpy(stc.identification, SCTP_VERSION_STRING,
4757 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
4758 /* now the chunk header */
4759 initackm_out->msg.ch.chunk_type = SCTP_INITIATION_ACK;
4760 initackm_out->msg.ch.chunk_flags = 0;
4761 /* fill in later from mbuf we build */
4762 initackm_out->msg.ch.chunk_length = 0;
4763 /* place in my tag */
4764 if ((asoc != NULL) &&
4765 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
4766 (SCTP_GET_STATE(asoc) == SCTP_STATE_INUSE) ||
4767 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
4768 /* re-use the v-tags and init-seq here */
4769 initackm_out->msg.init.initiate_tag = htonl(asoc->my_vtag);
4770 initackm_out->msg.init.initial_tsn = htonl(asoc->init_seq_number);
4771 } else {
4772 uint32_t vtag;
4773
4774 if (asoc) {
4775 atomic_add_int(&asoc->refcnt, 1);
4776 SCTP_TCB_UNLOCK(stcb);
4777 vtag = sctp_select_a_tag(inp);
4778 initackm_out->msg.init.initiate_tag = htonl(vtag);
4779 /* get a TSN to use too */
4780 initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
4781 SCTP_TCB_LOCK(stcb);
4782 atomic_add_int(&asoc->refcnt, -1);
4783 } else {
4784 vtag = sctp_select_a_tag(inp);
4785 initackm_out->msg.init.initiate_tag = htonl(vtag);
4786 /* get a TSN to use too */
4787 initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
4788 }
4789 }
4790 /* save away my tag to */
4791 stc.my_vtag = initackm_out->msg.init.initiate_tag;
4792
4793 /* set up some of the credits. */
4794 initackm_out->msg.init.a_rwnd = htonl(max(inp->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND));
4795 /* set what I want */
4796 his_limit = ntohs(init_chk->init.num_inbound_streams);
4797 /* choose what I want */
4798 if (asoc != NULL) {
4799 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
4800 i_want = asoc->streamoutcnt;
4801 } else {
4802 i_want = inp->sctp_ep.pre_open_stream_count;
4803 }
4804 } else {
4805 i_want = inp->sctp_ep.pre_open_stream_count;
4806 }
4807 if (his_limit < i_want) {
4808 /* I Want more :< */
4809 initackm_out->msg.init.num_outbound_streams = init_chk->init.num_inbound_streams;
4810 } else {
4811 /* I can have what I want :> */
4812 initackm_out->msg.init.num_outbound_streams = htons(i_want);
4813 }
4814 /* tell him his limt. */
4815 initackm_out->msg.init.num_inbound_streams =
4816 htons(inp->sctp_ep.max_open_streams_intome);
4817 /* setup the ECN pointer */
4818
4819 if (inp->sctp_ep.adaptation_layer_indicator) {
4820 struct sctp_adaptation_layer_indication *ali;
4821
4822 ali = (struct sctp_adaptation_layer_indication *)(
4823 (caddr_t)initackm_out + sizeof(*initackm_out));
4824 ali->ph.param_type = htons(SCTP_ULP_ADAPTATION);
4825 ali->ph.param_length = htons(sizeof(*ali));
4826 ali->indication = ntohl(inp->sctp_ep.adaptation_layer_indicator);
4827 SCTP_BUF_LEN(m) += sizeof(*ali);
4828 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali +
4829 sizeof(*ali));
4830 } else {
4831 ecn = (struct sctp_ecn_supported_param *)(
4832 (caddr_t)initackm_out + sizeof(*initackm_out));
4833 }
4834
4835 /* ECN parameter */
4836 if (sctp_ecn_enable == 1) {
4837 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
4838 ecn->ph.param_length = htons(sizeof(*ecn));
4839 SCTP_BUF_LEN(m) += sizeof(*ecn);
4840
4841 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
4842 sizeof(*ecn));
4843 } else {
4844 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
4845 }
4846 /* And now tell the peer we do pr-sctp */
4847 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
4848 prsctp->ph.param_length = htons(sizeof(*prsctp));
4849 SCTP_BUF_LEN(m) += sizeof(*prsctp);
4850
4851 /* And now tell the peer we do all the extensions */
4852 pr_supported = (struct sctp_supported_chunk_types_param *)
4853 ((caddr_t)prsctp + sizeof(*prsctp));
4854
4855 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
4856 num_ext = 0;
4857 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF;
4858 pr_supported->chunk_types[num_ext++] = SCTP_ASCONF_ACK;
4859 pr_supported->chunk_types[num_ext++] = SCTP_FORWARD_CUM_TSN;
4860 pr_supported->chunk_types[num_ext++] = SCTP_PACKET_DROPPED;
4861 pr_supported->chunk_types[num_ext++] = SCTP_STREAM_RESET;
4862 if (!sctp_auth_disable)
4863 pr_supported->chunk_types[num_ext++] = SCTP_AUTHENTICATION;
4864 p_len = sizeof(*pr_supported) + num_ext;
4865 pr_supported->ph.param_length = htons(p_len);
4866 bzero((caddr_t)pr_supported + p_len, SCTP_SIZE32(p_len) - p_len);
4867 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4868
4869 /* ECN nonce: And now tell the peer we support ECN nonce */
4870 if (sctp_ecn_nonce) {
4871 ecn_nonce = (struct sctp_ecn_nonce_supported_param *)
4872 ((caddr_t)pr_supported + SCTP_SIZE32(p_len));
4873 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
4874 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
4875 SCTP_BUF_LEN(m) += sizeof(*ecn_nonce);
4876 }
4877 /* add authentication parameters */
4878 if (!sctp_auth_disable) {
4879 struct sctp_auth_random *random;
4880 struct sctp_auth_hmac_algo *hmacs;
4881 struct sctp_auth_chunk_list *chunks;
4882 uint16_t random_len;
4883
4884 /* generate and add RANDOM parameter */
3673 random_len = sctp_auth_random_len;
4885 random_len = SCTP_AUTH_RANDOM_SIZE_DEFAULT;
3674 random = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
3675 random->ph.param_type = htons(SCTP_RANDOM);
3676 p_len = sizeof(*random) + random_len;
3677 random->ph.param_length = htons(p_len);
3678 SCTP_READ_RANDOM(random->random_data, random_len);
3679 /* zero out any padding required */
3680 bzero((caddr_t)random + p_len, SCTP_SIZE32(p_len) - p_len);
3681 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
3682
3683 /* add HMAC_ALGO parameter */
3684 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
3685 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
3686 (uint8_t *) hmacs->hmac_ids);
3687 if (p_len > 0) {
3688 p_len += sizeof(*hmacs);
3689 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
3690 hmacs->ph.param_length = htons(p_len);
3691 /* zero out any padding required */
3692 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
3693 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
3694 }
3695 /* add CHUNKS parameter */
3696 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
3697 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
3698 chunks->chunk_types);
3699 if (p_len > 0) {
3700 p_len += sizeof(*chunks);
3701 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
3702 chunks->ph.param_length = htons(p_len);
3703 /* zero out any padding required */
3704 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
3705 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
3706 }
3707 }
3708 m_at = m;
3709 /* now the addresses */
3710 {
3711 struct sctp_scoping scp;
3712
3713 /*
3714 * To optimize this we could put the scoping stuff into a
3715 * structure and remove the individual uint8's from the stc
3716 * structure. Then we could just pass in the address within
3717 * the stc.. but for now this is a quick hack to get the
3718 * address stuff teased apart.
3719 */
3720 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
3721 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
3722 scp.loopback_scope = stc.loopback_scope;
3723 scp.ipv4_local_scope = stc.ipv4_scope;
3724 scp.local_scope = stc.local_scope;
3725 scp.site_scope = stc.site_scope;
3726 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
3727 }
3728
3729 /* tack on the operational error if present */
3730 if (op_err) {
3731 struct mbuf *ol;
3732 int llen;
3733
3734 llen = 0;
3735 ol = op_err;
3736 while (ol) {
3737 llen += SCTP_BUF_LEN(ol);
3738 ol = SCTP_BUF_NEXT(ol);
3739 }
3740 if (llen % 4) {
3741 /* must add a pad to the param */
3742 uint32_t cpthis = 0;
3743 int padlen;
3744
3745 padlen = 4 - (llen % 4);
3746 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
3747 }
3748 while (SCTP_BUF_NEXT(m_at) != NULL) {
3749 m_at = SCTP_BUF_NEXT(m_at);
3750 }
3751 SCTP_BUF_NEXT(m_at) = op_err;
3752 while (SCTP_BUF_NEXT(m_at) != NULL) {
3753 m_at = SCTP_BUF_NEXT(m_at);
3754 }
3755 }
3756 /* Get total size of init packet */
3757 sz_of = SCTP_SIZE32(ntohs(init_chk->ch.chunk_length));
3758 /* pre-calulate the size and update pkt header and chunk header */
3759 p_len = 0;
3760 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
3761 p_len += SCTP_BUF_LEN(m_tmp);
3762 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
3763 /* m_tmp should now point to last one */
3764 break;
3765 }
3766 }
3767 /*
3768 * Figure now the size of the cookie. We know the size of the
3769 * INIT-ACK. The Cookie is going to be the size of INIT, INIT-ACK,
3770 * COOKIE-STRUCTURE and SIGNATURE.
3771 */
3772
3773 /*
3774 * take our earlier INIT calc and add in the sz we just calculated
3775 * minus the size of the sctphdr (its not included in chunk size
3776 */
3777
3778 /* add once for the INIT-ACK */
3779 sz_of += (p_len - sizeof(struct sctphdr));
3780
3781 /* add a second time for the INIT-ACK in the cookie */
3782 sz_of += (p_len - sizeof(struct sctphdr));
3783
3784 /* Now add the cookie header and cookie message struct */
3785 sz_of += sizeof(struct sctp_state_cookie_param);
3786 /* ...and add the size of our signature */
3787 sz_of += SCTP_SIGNATURE_SIZE;
3788 initackm_out->msg.ch.chunk_length = htons(sz_of);
3789
3790 /* Now we must build a cookie */
3791 m_cookie = sctp_add_cookie(inp, init_pkt, offset, m,
3792 sizeof(struct sctphdr), &stc);
3793 if (m_cookie == NULL) {
3794 /* memory problem */
3795 sctp_m_freem(m);
3796 return;
3797 }
3798 /* Now append the cookie to the end and update the space/size */
3799 SCTP_BUF_NEXT(m_tmp) = m_cookie;
3800 for (; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
3801 p_len += SCTP_BUF_LEN(m_tmp);
3802 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
3803 /* m_tmp should now point to last one */
3804 m_last = m_tmp;
3805 break;
3806 }
3807 }
3808
3809 /*
3810 * We pass 0 here to NOT set IP_DF if its IPv4, we ignore the return
3811 * here since the timer will drive a retranmission.
3812 */
3813 padval = p_len % 4;
3814 if ((padval) && (m_last)) {
3815 /* see my previous comments on m_last */
3816 int ret;
3817
3818 ret = sctp_add_pad_tombuf(m_last, (4 - padval));
3819 if (ret) {
3820 /* Houston we have a problem, no space */
3821 sctp_m_freem(m);
3822 return;
3823 }
3824 p_len += padval;
3825 }
3826 sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
3827 NULL, 0);
3828 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
3829}
3830
3831
3832void
3833sctp_insert_on_wheel(struct sctp_tcb *stcb,
3834 struct sctp_association *asoc,
3835 struct sctp_stream_out *strq, int holds_lock)
3836{
3837 struct sctp_stream_out *stre, *strn;
3838
3839 if (holds_lock == 0)
3840 SCTP_TCB_SEND_LOCK(stcb);
3841 if ((strq->next_spoke.tqe_next) ||
3842 (strq->next_spoke.tqe_prev)) {
3843 /* already on wheel */
3844 goto outof_here;
3845 }
3846 stre = TAILQ_FIRST(&asoc->out_wheel);
3847 if (stre == NULL) {
3848 /* only one on wheel */
3849 TAILQ_INSERT_HEAD(&asoc->out_wheel, strq, next_spoke);
3850 goto outof_here;
3851 }
3852 for (; stre; stre = strn) {
3853 strn = TAILQ_NEXT(stre, next_spoke);
3854 if (stre->stream_no > strq->stream_no) {
3855 TAILQ_INSERT_BEFORE(stre, strq, next_spoke);
3856 goto outof_here;
3857 } else if (stre->stream_no == strq->stream_no) {
3858 /* huh, should not happen */
3859 goto outof_here;
3860 } else if (strn == NULL) {
3861 /* next one is null */
3862 TAILQ_INSERT_AFTER(&asoc->out_wheel, stre, strq,
3863 next_spoke);
3864 }
3865 }
3866outof_here:
3867 if (holds_lock == 0)
3868 SCTP_TCB_SEND_UNLOCK(stcb);
3869
3870
3871}
3872
3873static void
3874sctp_remove_from_wheel(struct sctp_tcb *stcb,
3875 struct sctp_association *asoc,
3876 struct sctp_stream_out *strq)
3877{
3878 /* take off and then setup so we know it is not on the wheel */
3879 SCTP_TCB_SEND_LOCK(stcb);
3880 if (TAILQ_FIRST(&strq->outqueue)) {
3881 /* more was added */
3882 SCTP_TCB_SEND_UNLOCK(stcb);
3883 return;
3884 }
3885 TAILQ_REMOVE(&asoc->out_wheel, strq, next_spoke);
3886 strq->next_spoke.tqe_next = NULL;
3887 strq->next_spoke.tqe_prev = NULL;
3888 SCTP_TCB_SEND_UNLOCK(stcb);
3889}
3890
3891static void
3892sctp_prune_prsctp(struct sctp_tcb *stcb,
3893 struct sctp_association *asoc,
3894 struct sctp_sndrcvinfo *srcv,
3895 int dataout)
3896{
3897 int freed_spc = 0;
3898 struct sctp_tmit_chunk *chk, *nchk;
3899
3900 SCTP_TCB_LOCK_ASSERT(stcb);
3901 if ((asoc->peer_supports_prsctp) &&
3902 (asoc->sent_queue_cnt_removeable > 0)) {
3903 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3904 /*
3905 * Look for chunks marked with the PR_SCTP flag AND
3906 * the buffer space flag. If the one being sent is
3907 * equal or greater priority then purge the old one
3908 * and free some space.
3909 */
3910 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
3911 /*
3912 * This one is PR-SCTP AND buffer space
3913 * limited type
3914 */
3915 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
3916 /*
3917 * Lower numbers equates to higher
3918 * priority so if the one we are
3919 * looking at has a larger or equal
3920 * priority we want to drop the data
3921 * and NOT retransmit it.
3922 */
3923 if (chk->data) {
3924 /*
3925 * We release the book_size
3926 * if the mbuf is here
3927 */
3928 int ret_spc;
3929 int cause;
3930
3931 if (chk->sent > SCTP_DATAGRAM_UNSENT)
3932 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT;
3933 else
3934 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT;
3935 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
3936 cause,
3937 &asoc->sent_queue);
3938 freed_spc += ret_spc;
3939 if (freed_spc >= dataout) {
3940 return;
3941 }
3942 } /* if chunk was present */
3943 } /* if of sufficent priority */
3944 } /* if chunk has enabled */
3945 } /* tailqforeach */
3946
3947 chk = TAILQ_FIRST(&asoc->send_queue);
3948 while (chk) {
3949 nchk = TAILQ_NEXT(chk, sctp_next);
3950 /* Here we must move to the sent queue and mark */
3951 if (PR_SCTP_TTL_ENABLED(chk->flags)) {
3952 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
3953 if (chk->data) {
3954 /*
3955 * We release the book_size
3956 * if the mbuf is here
3957 */
3958 int ret_spc;
3959
3960 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
3961 SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT,
3962 &asoc->send_queue);
3963
3964 freed_spc += ret_spc;
3965 if (freed_spc >= dataout) {
3966 return;
3967 }
3968 } /* end if chk->data */
3969 } /* end if right class */
3970 } /* end if chk pr-sctp */
3971 chk = nchk;
3972 } /* end while (chk) */
3973 } /* if enabled in asoc */
3974}
3975
3976__inline int
3977sctp_get_frag_point(struct sctp_tcb *stcb,
3978 struct sctp_association *asoc)
3979{
3980 int siz, ovh;
3981
3982 /*
3983 * For endpoints that have both v6 and v4 addresses we must reserve
3984 * room for the ipv6 header, for those that are only dealing with V4
3985 * we use a larger frag point.
3986 */
3987 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3988 ovh = SCTP_MED_OVERHEAD;
3989 } else {
3990 ovh = SCTP_MED_V4_OVERHEAD;
3991 }
3992
3993 if (stcb->sctp_ep->sctp_frag_point > asoc->smallest_mtu)
3994 siz = asoc->smallest_mtu - ovh;
3995 else
3996 siz = (stcb->sctp_ep->sctp_frag_point - ovh);
3997 /*
3998 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
3999 */
4000 /* A data chunk MUST fit in a cluster */
4001 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
4002 /* } */
4003
4004 /* adjust for an AUTH chunk if DATA requires auth */
4005 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
4006 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
4007
4008 if (siz % 4) {
4009 /* make it an even word boundary please */
4010 siz -= (siz % 4);
4011 }
4012 return (siz);
4013}
4886 random = (struct sctp_auth_random *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4887 random->ph.param_type = htons(SCTP_RANDOM);
4888 p_len = sizeof(*random) + random_len;
4889 random->ph.param_length = htons(p_len);
4890 SCTP_READ_RANDOM(random->random_data, random_len);
4891 /* zero out any padding required */
4892 bzero((caddr_t)random + p_len, SCTP_SIZE32(p_len) - p_len);
4893 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4894
4895 /* add HMAC_ALGO parameter */
4896 hmacs = (struct sctp_auth_hmac_algo *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4897 p_len = sctp_serialize_hmaclist(inp->sctp_ep.local_hmacs,
4898 (uint8_t *) hmacs->hmac_ids);
4899 if (p_len > 0) {
4900 p_len += sizeof(*hmacs);
4901 hmacs->ph.param_type = htons(SCTP_HMAC_LIST);
4902 hmacs->ph.param_length = htons(p_len);
4903 /* zero out any padding required */
4904 bzero((caddr_t)hmacs + p_len, SCTP_SIZE32(p_len) - p_len);
4905 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4906 }
4907 /* add CHUNKS parameter */
4908 chunks = (struct sctp_auth_chunk_list *)(mtod(m, caddr_t)+SCTP_BUF_LEN(m));
4909 p_len = sctp_serialize_auth_chunks(inp->sctp_ep.local_auth_chunks,
4910 chunks->chunk_types);
4911 if (p_len > 0) {
4912 p_len += sizeof(*chunks);
4913 chunks->ph.param_type = htons(SCTP_CHUNK_LIST);
4914 chunks->ph.param_length = htons(p_len);
4915 /* zero out any padding required */
4916 bzero((caddr_t)chunks + p_len, SCTP_SIZE32(p_len) - p_len);
4917 SCTP_BUF_LEN(m) += SCTP_SIZE32(p_len);
4918 }
4919 }
4920 m_at = m;
4921 /* now the addresses */
4922 {
4923 struct sctp_scoping scp;
4924
4925 /*
4926 * To optimize this we could put the scoping stuff into a
4927 * structure and remove the individual uint8's from the stc
4928 * structure. Then we could just pass in the address within
4929 * the stc.. but for now this is a quick hack to get the
4930 * address stuff teased apart.
4931 */
4932 scp.ipv4_addr_legal = stc.ipv4_addr_legal;
4933 scp.ipv6_addr_legal = stc.ipv6_addr_legal;
4934 scp.loopback_scope = stc.loopback_scope;
4935 scp.ipv4_local_scope = stc.ipv4_scope;
4936 scp.local_scope = stc.local_scope;
4937 scp.site_scope = stc.site_scope;
4938 m_at = sctp_add_addresses_to_i_ia(inp, &scp, m_at, cnt_inits_to);
4939 }
4940
4941 /* tack on the operational error if present */
4942 if (op_err) {
4943 struct mbuf *ol;
4944 int llen;
4945
4946 llen = 0;
4947 ol = op_err;
4948 while (ol) {
4949 llen += SCTP_BUF_LEN(ol);
4950 ol = SCTP_BUF_NEXT(ol);
4951 }
4952 if (llen % 4) {
4953 /* must add a pad to the param */
4954 uint32_t cpthis = 0;
4955 int padlen;
4956
4957 padlen = 4 - (llen % 4);
4958 m_copyback(op_err, llen, padlen, (caddr_t)&cpthis);
4959 }
4960 while (SCTP_BUF_NEXT(m_at) != NULL) {
4961 m_at = SCTP_BUF_NEXT(m_at);
4962 }
4963 SCTP_BUF_NEXT(m_at) = op_err;
4964 while (SCTP_BUF_NEXT(m_at) != NULL) {
4965 m_at = SCTP_BUF_NEXT(m_at);
4966 }
4967 }
4968 /* Get total size of init packet */
4969 sz_of = SCTP_SIZE32(ntohs(init_chk->ch.chunk_length));
4970 /* pre-calulate the size and update pkt header and chunk header */
4971 p_len = 0;
4972 for (m_tmp = m; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
4973 p_len += SCTP_BUF_LEN(m_tmp);
4974 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
4975 /* m_tmp should now point to last one */
4976 break;
4977 }
4978 }
4979 /*
4980 * Figure now the size of the cookie. We know the size of the
4981 * INIT-ACK. The Cookie is going to be the size of INIT, INIT-ACK,
4982 * COOKIE-STRUCTURE and SIGNATURE.
4983 */
4984
4985 /*
4986 * take our earlier INIT calc and add in the sz we just calculated
4987 * minus the size of the sctphdr (its not included in chunk size
4988 */
4989
4990 /* add once for the INIT-ACK */
4991 sz_of += (p_len - sizeof(struct sctphdr));
4992
4993 /* add a second time for the INIT-ACK in the cookie */
4994 sz_of += (p_len - sizeof(struct sctphdr));
4995
4996 /* Now add the cookie header and cookie message struct */
4997 sz_of += sizeof(struct sctp_state_cookie_param);
4998 /* ...and add the size of our signature */
4999 sz_of += SCTP_SIGNATURE_SIZE;
5000 initackm_out->msg.ch.chunk_length = htons(sz_of);
5001
5002 /* Now we must build a cookie */
5003 m_cookie = sctp_add_cookie(inp, init_pkt, offset, m,
5004 sizeof(struct sctphdr), &stc);
5005 if (m_cookie == NULL) {
5006 /* memory problem */
5007 sctp_m_freem(m);
5008 return;
5009 }
5010 /* Now append the cookie to the end and update the space/size */
5011 SCTP_BUF_NEXT(m_tmp) = m_cookie;
5012 for (; m_tmp; m_tmp = SCTP_BUF_NEXT(m_tmp)) {
5013 p_len += SCTP_BUF_LEN(m_tmp);
5014 if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5015 /* m_tmp should now point to last one */
5016 m_last = m_tmp;
5017 break;
5018 }
5019 }
5020
5021 /*
5022 * We pass 0 here to NOT set IP_DF if its IPv4, we ignore the return
5023 * here since the timer will drive a retranmission.
5024 */
5025 padval = p_len % 4;
5026 if ((padval) && (m_last)) {
5027 /* see my previous comments on m_last */
5028 int ret;
5029
5030 ret = sctp_add_pad_tombuf(m_last, (4 - padval));
5031 if (ret) {
5032 /* Houston we have a problem, no space */
5033 sctp_m_freem(m);
5034 return;
5035 }
5036 p_len += padval;
5037 }
5038 sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, NULL, 0, 0,
5039 NULL, 0);
5040 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5041}
5042
5043
5044void
5045sctp_insert_on_wheel(struct sctp_tcb *stcb,
5046 struct sctp_association *asoc,
5047 struct sctp_stream_out *strq, int holds_lock)
5048{
5049 struct sctp_stream_out *stre, *strn;
5050
5051 if (holds_lock == 0)
5052 SCTP_TCB_SEND_LOCK(stcb);
5053 if ((strq->next_spoke.tqe_next) ||
5054 (strq->next_spoke.tqe_prev)) {
5055 /* already on wheel */
5056 goto outof_here;
5057 }
5058 stre = TAILQ_FIRST(&asoc->out_wheel);
5059 if (stre == NULL) {
5060 /* only one on wheel */
5061 TAILQ_INSERT_HEAD(&asoc->out_wheel, strq, next_spoke);
5062 goto outof_here;
5063 }
5064 for (; stre; stre = strn) {
5065 strn = TAILQ_NEXT(stre, next_spoke);
5066 if (stre->stream_no > strq->stream_no) {
5067 TAILQ_INSERT_BEFORE(stre, strq, next_spoke);
5068 goto outof_here;
5069 } else if (stre->stream_no == strq->stream_no) {
5070 /* huh, should not happen */
5071 goto outof_here;
5072 } else if (strn == NULL) {
5073 /* next one is null */
5074 TAILQ_INSERT_AFTER(&asoc->out_wheel, stre, strq,
5075 next_spoke);
5076 }
5077 }
5078outof_here:
5079 if (holds_lock == 0)
5080 SCTP_TCB_SEND_UNLOCK(stcb);
5081
5082
5083}
5084
5085static void
5086sctp_remove_from_wheel(struct sctp_tcb *stcb,
5087 struct sctp_association *asoc,
5088 struct sctp_stream_out *strq)
5089{
5090 /* take off and then setup so we know it is not on the wheel */
5091 SCTP_TCB_SEND_LOCK(stcb);
5092 if (TAILQ_FIRST(&strq->outqueue)) {
5093 /* more was added */
5094 SCTP_TCB_SEND_UNLOCK(stcb);
5095 return;
5096 }
5097 TAILQ_REMOVE(&asoc->out_wheel, strq, next_spoke);
5098 strq->next_spoke.tqe_next = NULL;
5099 strq->next_spoke.tqe_prev = NULL;
5100 SCTP_TCB_SEND_UNLOCK(stcb);
5101}
5102
5103static void
5104sctp_prune_prsctp(struct sctp_tcb *stcb,
5105 struct sctp_association *asoc,
5106 struct sctp_sndrcvinfo *srcv,
5107 int dataout)
5108{
5109 int freed_spc = 0;
5110 struct sctp_tmit_chunk *chk, *nchk;
5111
5112 SCTP_TCB_LOCK_ASSERT(stcb);
5113 if ((asoc->peer_supports_prsctp) &&
5114 (asoc->sent_queue_cnt_removeable > 0)) {
5115 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
5116 /*
5117 * Look for chunks marked with the PR_SCTP flag AND
5118 * the buffer space flag. If the one being sent is
5119 * equal or greater priority then purge the old one
5120 * and free some space.
5121 */
5122 if (PR_SCTP_BUF_ENABLED(chk->flags)) {
5123 /*
5124 * This one is PR-SCTP AND buffer space
5125 * limited type
5126 */
5127 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
5128 /*
5129 * Lower numbers equates to higher
5130 * priority so if the one we are
5131 * looking at has a larger or equal
5132 * priority we want to drop the data
5133 * and NOT retransmit it.
5134 */
5135 if (chk->data) {
5136 /*
5137 * We release the book_size
5138 * if the mbuf is here
5139 */
5140 int ret_spc;
5141 int cause;
5142
5143 if (chk->sent > SCTP_DATAGRAM_UNSENT)
5144 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT;
5145 else
5146 cause = SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT;
5147 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
5148 cause,
5149 &asoc->sent_queue);
5150 freed_spc += ret_spc;
5151 if (freed_spc >= dataout) {
5152 return;
5153 }
5154 } /* if chunk was present */
5155 } /* if of sufficent priority */
5156 } /* if chunk has enabled */
5157 } /* tailqforeach */
5158
5159 chk = TAILQ_FIRST(&asoc->send_queue);
5160 while (chk) {
5161 nchk = TAILQ_NEXT(chk, sctp_next);
5162 /* Here we must move to the sent queue and mark */
5163 if (PR_SCTP_TTL_ENABLED(chk->flags)) {
5164 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
5165 if (chk->data) {
5166 /*
5167 * We release the book_size
5168 * if the mbuf is here
5169 */
5170 int ret_spc;
5171
5172 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
5173 SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_UNSENT,
5174 &asoc->send_queue);
5175
5176 freed_spc += ret_spc;
5177 if (freed_spc >= dataout) {
5178 return;
5179 }
5180 } /* end if chk->data */
5181 } /* end if right class */
5182 } /* end if chk pr-sctp */
5183 chk = nchk;
5184 } /* end while (chk) */
5185 } /* if enabled in asoc */
5186}
5187
5188__inline int
5189sctp_get_frag_point(struct sctp_tcb *stcb,
5190 struct sctp_association *asoc)
5191{
5192 int siz, ovh;
5193
5194 /*
5195 * For endpoints that have both v6 and v4 addresses we must reserve
5196 * room for the ipv6 header, for those that are only dealing with V4
5197 * we use a larger frag point.
5198 */
5199 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5200 ovh = SCTP_MED_OVERHEAD;
5201 } else {
5202 ovh = SCTP_MED_V4_OVERHEAD;
5203 }
5204
5205 if (stcb->sctp_ep->sctp_frag_point > asoc->smallest_mtu)
5206 siz = asoc->smallest_mtu - ovh;
5207 else
5208 siz = (stcb->sctp_ep->sctp_frag_point - ovh);
5209 /*
5210 * if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) {
5211 */
5212 /* A data chunk MUST fit in a cluster */
5213 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk)); */
5214 /* } */
5215
5216 /* adjust for an AUTH chunk if DATA requires auth */
5217 if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks))
5218 siz -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
5219
5220 if (siz % 4) {
5221 /* make it an even word boundary please */
5222 siz -= (siz % 4);
5223 }
5224 return (siz);
5225}
4014extern unsigned int sctp_max_chunks_on_queue;
4015
4016static void
4017sctp_set_prsctp_policy(struct sctp_tcb *stcb,
4018 struct sctp_stream_queue_pending *sp)
4019{
4020 sp->pr_sctp_on = 0;
4021 if (stcb->asoc.peer_supports_prsctp) {
4022 /*
4023 * We assume that the user wants PR_SCTP_TTL if the user
4024 * provides a positive lifetime but does not specify any
4025 * PR_SCTP policy. This is a BAD assumption and causes
4026 * problems at least with the U-Vancovers MPI folks. I will
4027 * change this to be no policy means NO PR-SCTP.
4028 */
4029 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
4030 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
4031 sp->pr_sctp_on = 1;
4032 } else {
4033 return;
4034 }
4035 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
4036 case CHUNK_FLAGS_PR_SCTP_BUF:
4037 /*
4038 * Time to live is a priority stored in tv_sec when
4039 * doing the buffer drop thing.
4040 */
4041 sp->ts.tv_sec = sp->timetolive;
4042 sp->ts.tv_usec = 0;
4043 break;
4044 case CHUNK_FLAGS_PR_SCTP_TTL:
4045 {
4046 struct timeval tv;
4047
4048 SCTP_GETTIME_TIMEVAL(&sp->ts);
4049 tv.tv_sec = sp->timetolive / 1000;
4050 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
4051 timevaladd(&sp->ts, &tv);
4052 }
4053 break;
4054 case CHUNK_FLAGS_PR_SCTP_RTX:
4055 /*
4056 * Time to live is a the number or retransmissions
4057 * stored in tv_sec.
4058 */
4059 sp->ts.tv_sec = sp->timetolive;
4060 sp->ts.tv_usec = 0;
4061 break;
4062 default:
4063#ifdef SCTP_DEBUG
4064 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
4065 printf("Unknown PR_SCTP policy %u.\n", PR_SCTP_POLICY(sp->sinfo_flags));
4066 }
4067#endif
4068 break;
4069 }
4070 }
4071}
4072
4073static int
4074sctp_msg_append(struct sctp_tcb *stcb,
4075 struct sctp_nets *net,
4076 struct mbuf *m,
4077 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
4078{
4079 int error = 0, holds_lock;
4080 struct mbuf *at;
4081 struct sctp_stream_queue_pending *sp = NULL;
4082 struct sctp_stream_out *strm;
4083
4084 /*
4085 * Given an mbuf chain, put it into the association send queue and
4086 * place it on the wheel
4087 */
4088 holds_lock = hold_stcb_lock;
4089 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
4090 /* Invalid stream number */
4091 error = EINVAL;
4092 goto out_now;
4093 }
4094 if ((stcb->asoc.stream_locked) &&
4095 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
4096 error = EAGAIN;
4097 goto out_now;
4098 }
4099 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
4100 /* Now can we send this? */
4101 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
4102 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
4103 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
4104 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
4105 /* got data while shutting down */
4106 error = ECONNRESET;
4107 goto out_now;
4108 }
4109 sp = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_strmoq, struct sctp_stream_queue_pending);
4110 if (sp == NULL) {
4111 error = ENOMEM;
4112 goto out_now;
4113 }
4114 SCTP_INCR_STRMOQ_COUNT();
4115 sp->sinfo_flags = srcv->sinfo_flags;
4116 sp->timetolive = srcv->sinfo_timetolive;
4117 sp->ppid = srcv->sinfo_ppid;
4118 sp->context = srcv->sinfo_context;
4119 sp->strseq = 0;
4120 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
4121 sp->net = net;
4122 sp->addr_over = 1;
4123 } else {
4124 sp->net = stcb->asoc.primary_destination;
4125 sp->addr_over = 0;
4126 }
4127 atomic_add_int(&sp->net->ref_count, 1);
4128 SCTP_GETTIME_TIMEVAL(&sp->ts);
4129 sp->stream = srcv->sinfo_stream;
4130 sp->msg_is_complete = 1;
4131 sp->some_taken = 0;
4132 sp->data = m;
4133 sp->tail_mbuf = NULL;
4134 sp->length = 0;
4135 at = m;
4136 sctp_set_prsctp_policy(stcb, sp);
4137 /*
4138 * We could in theory (for sendall) pass the length in, but we would
4139 * still have to hunt through the chain since we need to setup the
4140 * tail_mbuf
4141 */
4142 while (at) {
4143 if (SCTP_BUF_NEXT(at) == NULL)
4144 sp->tail_mbuf = at;
4145 sp->length += SCTP_BUF_LEN(at);
4146 at = SCTP_BUF_NEXT(at);
4147 }
4148 SCTP_TCB_SEND_LOCK(stcb);
4149 sctp_snd_sb_alloc(stcb, sp->length);
4150 stcb->asoc.stream_queue_cnt++;
4151 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
4152 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
4153 sp->strseq = strm->next_sequence_sent;
4154 strm->next_sequence_sent++;
4155 }
4156 if ((strm->next_spoke.tqe_next == NULL) &&
4157 (strm->next_spoke.tqe_prev == NULL)) {
4158 /* Not on wheel, insert */
4159 sctp_insert_on_wheel(stcb, &stcb->asoc, strm, 1);
4160 }
4161 m = NULL;
4162 SCTP_TCB_SEND_UNLOCK(stcb);
4163out_now:
4164 if (m) {
4165 sctp_m_freem(m);
4166 }
4167 return (error);
4168}
4169
4170
4171static struct mbuf *
4172sctp_copy_mbufchain(struct mbuf *clonechain,
4173 struct mbuf *outchain,
4174 struct mbuf **endofchain,
4175 int can_take_mbuf,
4176 int sizeofcpy,
4177 uint8_t copy_by_ref)
4178{
4179 struct mbuf *m;
4180 struct mbuf *appendchain;
4181 caddr_t cp;
4182 int len;
4183
4184 if (endofchain == NULL) {
4185 /* error */
4186error_out:
4187 if (outchain)
4188 sctp_m_freem(outchain);
4189 return (NULL);
4190 }
4191 if (can_take_mbuf) {
4192 appendchain = clonechain;
4193 } else {
4194 if (!copy_by_ref && (sizeofcpy <= ((((sctp_mbuf_threshold_count - 1) * MLEN) + MHLEN)))) {
4195 /* Its not in a cluster */
4196 if (*endofchain == NULL) {
4197 /* lets get a mbuf cluster */
4198 if (outchain == NULL) {
4199 /* This is the general case */
4200 new_mbuf:
4201 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
4202 if (outchain == NULL) {
4203 goto error_out;
4204 }
4205 SCTP_BUF_LEN(outchain) = 0;
4206 *endofchain = outchain;
4207 /* get the prepend space */
4208 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
4209 } else {
4210 /*
4211 * We really should not get a NULL
4212 * in endofchain
4213 */
4214 /* find end */
4215 m = outchain;
4216 while (m) {
4217 if (SCTP_BUF_NEXT(m) == NULL) {
4218 *endofchain = m;
4219 break;
4220 }
4221 m = SCTP_BUF_NEXT(m);
4222 }
4223 /* sanity */
4224 if (*endofchain == NULL) {
4225 /*
4226 * huh, TSNH XXX maybe we
4227 * should panic
4228 */
4229 sctp_m_freem(outchain);
4230 goto new_mbuf;
4231 }
4232 }
4233 /* get the new end of length */
4234 len = M_TRAILINGSPACE(*endofchain);
4235 } else {
4236 /* how much is left at the end? */
4237 len = M_TRAILINGSPACE(*endofchain);
4238 }
4239 /* Find the end of the data, for appending */
4240 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
4241
4242 /* Now lets copy it out */
4243 if (len >= sizeofcpy) {
4244 /* It all fits, copy it in */
4245 m_copydata(clonechain, 0, sizeofcpy, cp);
4246 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
4247 } else {
4248 /* fill up the end of the chain */
4249 if (len > 0) {
4250 m_copydata(clonechain, 0, len, cp);
4251 SCTP_BUF_LEN((*endofchain)) += len;
4252 /* now we need another one */
4253 sizeofcpy -= len;
4254 }
4255 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
4256 if (m == NULL) {
4257 /* We failed */
4258 goto error_out;
4259 }
4260 SCTP_BUF_NEXT((*endofchain)) = m;
4261 *endofchain = m;
4262 cp = mtod((*endofchain), caddr_t);
4263 m_copydata(clonechain, len, sizeofcpy, cp);
4264 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
4265 }
4266 return (outchain);
4267 } else {
4268 /* copy the old fashion way */
4269 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT);
4270 }
4271 }
4272 if (appendchain == NULL) {
4273 /* error */
4274 if (outchain)
4275 sctp_m_freem(outchain);
4276 return (NULL);
4277 }
4278 if (outchain) {
4279 /* tack on to the end */
4280 if (*endofchain != NULL) {
4281 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
4282 } else {
4283 m = outchain;
4284 while (m) {
4285 if (SCTP_BUF_NEXT(m) == NULL) {
4286 SCTP_BUF_NEXT(m) = appendchain;
4287 break;
4288 }
4289 m = SCTP_BUF_NEXT(m);
4290 }
4291 }
4292 /*
4293 * save off the end and update the end-chain postion
4294 */
4295 m = appendchain;
4296 while (m) {
4297 if (SCTP_BUF_NEXT(m) == NULL) {
4298 *endofchain = m;
4299 break;
4300 }
4301 m = SCTP_BUF_NEXT(m);
4302 }
4303 return (outchain);
4304 } else {
4305 /* save off the end and update the end-chain postion */
4306 m = appendchain;
4307 while (m) {
4308 if (SCTP_BUF_NEXT(m) == NULL) {
4309 *endofchain = m;
4310 break;
4311 }
4312 m = SCTP_BUF_NEXT(m);
4313 }
4314 return (appendchain);
4315 }
4316}
4317
4318int
4319sctp_med_chunk_output(struct sctp_inpcb *inp,
4320 struct sctp_tcb *stcb,
4321 struct sctp_association *asoc,
4322 int *num_out,
4323 int *reason_code,
4324 int control_only, int *cwnd_full, int from_where,
4325 struct timeval *now, int *now_filled, int frag_point);
4326
4327static void
4328sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
4329 uint32_t val)
4330{
4331 struct sctp_copy_all *ca;
4332 struct mbuf *m;
4333 int ret = 0;
4334 int added_control = 0;
4335 int un_sent, do_chunk_output = 1;
4336 struct sctp_association *asoc;
4337
4338 ca = (struct sctp_copy_all *)ptr;
4339 if (ca->m == NULL) {
4340 return;
4341 }
4342 if (ca->inp != inp) {
4343 /* TSNH */
4344 return;
4345 }
4346 if ((ca->m) && ca->sndlen) {
4347 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT);
4348 if (m == NULL) {
4349 /* can't copy so we are done */
4350 ca->cnt_failed++;
4351 return;
4352 }
4353 } else {
4354 m = NULL;
4355 }
4356 SCTP_TCB_LOCK_ASSERT(stcb);
4357 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
4358 /* Abort this assoc with m as the user defined reason */
4359 if (m) {
4360 struct sctp_paramhdr *ph;
4361
4362 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT);
4363 if (m) {
4364 ph = mtod(m, struct sctp_paramhdr *);
4365 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4366 ph->param_length = htons(ca->sndlen);
4367 }
4368 /*
4369 * We add one here to keep the assoc from
4370 * dis-appearing on us.
4371 */
4372 atomic_add_int(&stcb->asoc.refcnt, 1);
4373 sctp_abort_an_association(inp, stcb,
4374 SCTP_RESPONSE_TO_USER_REQ,
4375 m);
4376 /*
4377 * sctp_abort_an_association calls sctp_free_asoc()
4378 * free association will NOT free it since we
4379 * incremented the refcnt .. we do this to prevent
4380 * it being freed and things getting tricky since we
4381 * could end up (from free_asoc) calling inpcb_free
4382 * which would get a recursive lock call to the
4383 * iterator lock.. But as a consequence of that the
4384 * stcb will return to us un-locked.. since
4385 * free_asoc returns with either no TCB or the TCB
4386 * unlocked, we must relock.. to unlock in the
4387 * iterator timer :-0
4388 */
4389 SCTP_TCB_LOCK(stcb);
4390 atomic_add_int(&stcb->asoc.refcnt, -1);
4391 goto no_chunk_output;
4392 }
4393 } else {
4394 if (m) {
4395 ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m,
4396 &ca->sndrcv, 1);
4397 }
4398 asoc = &stcb->asoc;
4399 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
4400 /* shutdown this assoc */
4401 if (TAILQ_EMPTY(&asoc->send_queue) &&
4402 TAILQ_EMPTY(&asoc->sent_queue) &&
4403 (asoc->stream_queue_cnt == 0)) {
4404 if (asoc->locked_on_sending) {
4405 goto abort_anyway;
4406 }
4407 /*
4408 * there is nothing queued to send, so I'm
4409 * done...
4410 */
4411 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
4412 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
4413 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4414 /*
4415 * only send SHUTDOWN the first time
4416 * through
4417 */
4418 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
4419 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
4420 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4421 }
4422 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
4423 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
4424 asoc->primary_destination);
4425 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
4426 asoc->primary_destination);
4427 added_control = 1;
4428 do_chunk_output = 0;
4429 }
4430 } else {
4431 /*
4432 * we still got (or just got) data to send,
4433 * so set SHUTDOWN_PENDING
4434 */
4435 /*
4436 * XXX sockets draft says that SCTP_EOF
4437 * should be sent with no data. currently,
4438 * we will allow user data to be sent first
4439 * and move to SHUTDOWN-PENDING
4440 */
4441 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
4442 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
4443 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4444 if (asoc->locked_on_sending) {
4445 /*
4446 * Locked to send out the
4447 * data
4448 */
4449 struct sctp_stream_queue_pending *sp;
4450
4451 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
4452 if (sp) {
4453 if ((sp->length == 0) && (sp->msg_is_complete == 0))
4454 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4455 }
4456 }
4457 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
4458 if (TAILQ_EMPTY(&asoc->send_queue) &&
4459 TAILQ_EMPTY(&asoc->sent_queue) &&
4460 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4461 abort_anyway:
4462 atomic_add_int(&stcb->asoc.refcnt, 1);
4463 sctp_abort_an_association(stcb->sctp_ep, stcb,
4464 SCTP_RESPONSE_TO_USER_REQ,
4465 NULL);
4466 atomic_add_int(&stcb->asoc.refcnt, -1);
4467 goto no_chunk_output;
4468 }
4469 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
4470 asoc->primary_destination);
4471 }
4472 }
4473
4474 }
4475 }
4476 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
4477 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk)));
4478
4479 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
4480 (stcb->asoc.total_flight > 0) &&
4481 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
4482 ) {
4483 do_chunk_output = 0;
4484 }
4485 if (do_chunk_output)
4486 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND);
4487 else if (added_control) {
4488 int num_out = 0, reason = 0, cwnd_full = 0, now_filled = 0;
4489 struct timeval now;
4490 int frag_point;
4491
4492 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
4493 sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
4494 &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point);
4495 }
4496no_chunk_output:
4497 if (ret) {
4498 ca->cnt_failed++;
4499 } else {
4500 ca->cnt_sent++;
4501 }
4502}
4503
4504static void
4505sctp_sendall_completes(void *ptr, uint32_t val)
4506{
4507 struct sctp_copy_all *ca;
4508
4509 ca = (struct sctp_copy_all *)ptr;
4510 /*
4511 * Do a notify here? Kacheong suggests that the notify be done at
4512 * the send time.. so you would push up a notification if any send
4513 * failed. Don't know if this is feasable since the only failures we
4514 * have is "memory" related and if you cannot get an mbuf to send
4515 * the data you surely can't get an mbuf to send up to notify the
4516 * user you can't send the data :->
4517 */
4518
4519 /* now free everything */
4520 sctp_m_freem(ca->m);
4521 SCTP_FREE(ca);
4522}
4523
4524
4525#define MC_ALIGN(m, len) do { \
4526 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \
4527} while (0)
4528
4529
4530
4531static struct mbuf *
4532sctp_copy_out_all(struct uio *uio, int len)
4533{
4534 struct mbuf *ret, *at;
4535 int left, willcpy, cancpy, error;
4536
4537 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA);
4538 if (ret == NULL) {
4539 /* TSNH */
4540 return (NULL);
4541 }
4542 left = len;
4543 SCTP_BUF_LEN(ret) = 0;
4544 /* save space for the data chunk header */
4545 cancpy = M_TRAILINGSPACE(ret);
4546 willcpy = min(cancpy, left);
4547 at = ret;
4548 while (left > 0) {
4549 /* Align data to the end */
4550 error = uiomove(mtod(at, caddr_t), willcpy, uio);
4551 if (error) {
4552 err_out_now:
4553 sctp_m_freem(at);
4554 return (NULL);
4555 }
4556 SCTP_BUF_LEN(at) = willcpy;
4557 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
4558 left -= willcpy;
4559 if (left > 0) {
4560 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA);
4561 if (SCTP_BUF_NEXT(at) == NULL) {
4562 goto err_out_now;
4563 }
4564 at = SCTP_BUF_NEXT(at);
4565 SCTP_BUF_LEN(at) = 0;
4566 cancpy = M_TRAILINGSPACE(at);
4567 willcpy = min(cancpy, left);
4568 }
4569 }
4570 return (ret);
4571}
4572
4573static int
4574sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
4575 struct sctp_sndrcvinfo *srcv)
4576{
4577 int ret;
4578 struct sctp_copy_all *ca;
4579
4580 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
4581 "CopyAll");
4582 if (ca == NULL) {
4583 sctp_m_freem(m);
4584 return (ENOMEM);
4585 }
4586 memset(ca, 0, sizeof(struct sctp_copy_all));
4587
4588 ca->inp = inp;
4589 ca->sndrcv = *srcv;
4590 /*
4591 * take off the sendall flag, it would be bad if we failed to do
4592 * this :-0
4593 */
4594 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
4595 /* get length and mbuf chain */
4596 if (uio) {
4597 ca->sndlen = uio->uio_resid;
4598 ca->m = sctp_copy_out_all(uio, ca->sndlen);
4599 if (ca->m == NULL) {
4600 SCTP_FREE(ca);
4601 return (ENOMEM);
4602 }
4603 } else {
4604 /* Gather the length of the send */
4605 struct mbuf *mat;
4606
4607 mat = m;
4608 ca->sndlen = 0;
4609 while (m) {
4610 ca->sndlen += SCTP_BUF_LEN(m);
4611 m = SCTP_BUF_NEXT(m);
4612 }
4613 ca->m = m;
4614 }
5226
5227static void
5228sctp_set_prsctp_policy(struct sctp_tcb *stcb,
5229 struct sctp_stream_queue_pending *sp)
5230{
5231 sp->pr_sctp_on = 0;
5232 if (stcb->asoc.peer_supports_prsctp) {
5233 /*
5234 * We assume that the user wants PR_SCTP_TTL if the user
5235 * provides a positive lifetime but does not specify any
5236 * PR_SCTP policy. This is a BAD assumption and causes
5237 * problems at least with the U-Vancovers MPI folks. I will
5238 * change this to be no policy means NO PR-SCTP.
5239 */
5240 if (PR_SCTP_ENABLED(sp->sinfo_flags)) {
5241 sp->act_flags |= PR_SCTP_POLICY(sp->sinfo_flags);
5242 sp->pr_sctp_on = 1;
5243 } else {
5244 return;
5245 }
5246 switch (PR_SCTP_POLICY(sp->sinfo_flags)) {
5247 case CHUNK_FLAGS_PR_SCTP_BUF:
5248 /*
5249 * Time to live is a priority stored in tv_sec when
5250 * doing the buffer drop thing.
5251 */
5252 sp->ts.tv_sec = sp->timetolive;
5253 sp->ts.tv_usec = 0;
5254 break;
5255 case CHUNK_FLAGS_PR_SCTP_TTL:
5256 {
5257 struct timeval tv;
5258
5259 SCTP_GETTIME_TIMEVAL(&sp->ts);
5260 tv.tv_sec = sp->timetolive / 1000;
5261 tv.tv_usec = (sp->timetolive * 1000) % 1000000;
5262 timevaladd(&sp->ts, &tv);
5263 }
5264 break;
5265 case CHUNK_FLAGS_PR_SCTP_RTX:
5266 /*
5267 * Time to live is a the number or retransmissions
5268 * stored in tv_sec.
5269 */
5270 sp->ts.tv_sec = sp->timetolive;
5271 sp->ts.tv_usec = 0;
5272 break;
5273 default:
5274#ifdef SCTP_DEBUG
5275 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
5276 printf("Unknown PR_SCTP policy %u.\n", PR_SCTP_POLICY(sp->sinfo_flags));
5277 }
5278#endif
5279 break;
5280 }
5281 }
5282}
5283
5284static int
5285sctp_msg_append(struct sctp_tcb *stcb,
5286 struct sctp_nets *net,
5287 struct mbuf *m,
5288 struct sctp_sndrcvinfo *srcv, int hold_stcb_lock)
5289{
5290 int error = 0, holds_lock;
5291 struct mbuf *at;
5292 struct sctp_stream_queue_pending *sp = NULL;
5293 struct sctp_stream_out *strm;
5294
5295 /*
5296 * Given an mbuf chain, put it into the association send queue and
5297 * place it on the wheel
5298 */
5299 holds_lock = hold_stcb_lock;
5300 if (srcv->sinfo_stream >= stcb->asoc.streamoutcnt) {
5301 /* Invalid stream number */
5302 error = EINVAL;
5303 goto out_now;
5304 }
5305 if ((stcb->asoc.stream_locked) &&
5306 (stcb->asoc.stream_locked_on != srcv->sinfo_stream)) {
5307 error = EAGAIN;
5308 goto out_now;
5309 }
5310 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
5311 /* Now can we send this? */
5312 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
5313 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
5314 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
5315 (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)) {
5316 /* got data while shutting down */
5317 error = ECONNRESET;
5318 goto out_now;
5319 }
5320 sp = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_strmoq, struct sctp_stream_queue_pending);
5321 if (sp == NULL) {
5322 error = ENOMEM;
5323 goto out_now;
5324 }
5325 SCTP_INCR_STRMOQ_COUNT();
5326 sp->sinfo_flags = srcv->sinfo_flags;
5327 sp->timetolive = srcv->sinfo_timetolive;
5328 sp->ppid = srcv->sinfo_ppid;
5329 sp->context = srcv->sinfo_context;
5330 sp->strseq = 0;
5331 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
5332 sp->net = net;
5333 sp->addr_over = 1;
5334 } else {
5335 sp->net = stcb->asoc.primary_destination;
5336 sp->addr_over = 0;
5337 }
5338 atomic_add_int(&sp->net->ref_count, 1);
5339 SCTP_GETTIME_TIMEVAL(&sp->ts);
5340 sp->stream = srcv->sinfo_stream;
5341 sp->msg_is_complete = 1;
5342 sp->some_taken = 0;
5343 sp->data = m;
5344 sp->tail_mbuf = NULL;
5345 sp->length = 0;
5346 at = m;
5347 sctp_set_prsctp_policy(stcb, sp);
5348 /*
5349 * We could in theory (for sendall) pass the length in, but we would
5350 * still have to hunt through the chain since we need to setup the
5351 * tail_mbuf
5352 */
5353 while (at) {
5354 if (SCTP_BUF_NEXT(at) == NULL)
5355 sp->tail_mbuf = at;
5356 sp->length += SCTP_BUF_LEN(at);
5357 at = SCTP_BUF_NEXT(at);
5358 }
5359 SCTP_TCB_SEND_LOCK(stcb);
5360 sctp_snd_sb_alloc(stcb, sp->length);
5361 stcb->asoc.stream_queue_cnt++;
5362 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
5363 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
5364 sp->strseq = strm->next_sequence_sent;
5365 strm->next_sequence_sent++;
5366 }
5367 if ((strm->next_spoke.tqe_next == NULL) &&
5368 (strm->next_spoke.tqe_prev == NULL)) {
5369 /* Not on wheel, insert */
5370 sctp_insert_on_wheel(stcb, &stcb->asoc, strm, 1);
5371 }
5372 m = NULL;
5373 SCTP_TCB_SEND_UNLOCK(stcb);
5374out_now:
5375 if (m) {
5376 sctp_m_freem(m);
5377 }
5378 return (error);
5379}
5380
5381
5382static struct mbuf *
5383sctp_copy_mbufchain(struct mbuf *clonechain,
5384 struct mbuf *outchain,
5385 struct mbuf **endofchain,
5386 int can_take_mbuf,
5387 int sizeofcpy,
5388 uint8_t copy_by_ref)
5389{
5390 struct mbuf *m;
5391 struct mbuf *appendchain;
5392 caddr_t cp;
5393 int len;
5394
5395 if (endofchain == NULL) {
5396 /* error */
5397error_out:
5398 if (outchain)
5399 sctp_m_freem(outchain);
5400 return (NULL);
5401 }
5402 if (can_take_mbuf) {
5403 appendchain = clonechain;
5404 } else {
5405 if (!copy_by_ref && (sizeofcpy <= ((((sctp_mbuf_threshold_count - 1) * MLEN) + MHLEN)))) {
5406 /* Its not in a cluster */
5407 if (*endofchain == NULL) {
5408 /* lets get a mbuf cluster */
5409 if (outchain == NULL) {
5410 /* This is the general case */
5411 new_mbuf:
5412 outchain = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
5413 if (outchain == NULL) {
5414 goto error_out;
5415 }
5416 SCTP_BUF_LEN(outchain) = 0;
5417 *endofchain = outchain;
5418 /* get the prepend space */
5419 SCTP_BUF_RESV_UF(outchain, (SCTP_FIRST_MBUF_RESV + 4));
5420 } else {
5421 /*
5422 * We really should not get a NULL
5423 * in endofchain
5424 */
5425 /* find end */
5426 m = outchain;
5427 while (m) {
5428 if (SCTP_BUF_NEXT(m) == NULL) {
5429 *endofchain = m;
5430 break;
5431 }
5432 m = SCTP_BUF_NEXT(m);
5433 }
5434 /* sanity */
5435 if (*endofchain == NULL) {
5436 /*
5437 * huh, TSNH XXX maybe we
5438 * should panic
5439 */
5440 sctp_m_freem(outchain);
5441 goto new_mbuf;
5442 }
5443 }
5444 /* get the new end of length */
5445 len = M_TRAILINGSPACE(*endofchain);
5446 } else {
5447 /* how much is left at the end? */
5448 len = M_TRAILINGSPACE(*endofchain);
5449 }
5450 /* Find the end of the data, for appending */
5451 cp = (mtod((*endofchain), caddr_t)+SCTP_BUF_LEN((*endofchain)));
5452
5453 /* Now lets copy it out */
5454 if (len >= sizeofcpy) {
5455 /* It all fits, copy it in */
5456 m_copydata(clonechain, 0, sizeofcpy, cp);
5457 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
5458 } else {
5459 /* fill up the end of the chain */
5460 if (len > 0) {
5461 m_copydata(clonechain, 0, len, cp);
5462 SCTP_BUF_LEN((*endofchain)) += len;
5463 /* now we need another one */
5464 sizeofcpy -= len;
5465 }
5466 m = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_HEADER);
5467 if (m == NULL) {
5468 /* We failed */
5469 goto error_out;
5470 }
5471 SCTP_BUF_NEXT((*endofchain)) = m;
5472 *endofchain = m;
5473 cp = mtod((*endofchain), caddr_t);
5474 m_copydata(clonechain, len, sizeofcpy, cp);
5475 SCTP_BUF_LEN((*endofchain)) += sizeofcpy;
5476 }
5477 return (outchain);
5478 } else {
5479 /* copy the old fashion way */
5480 appendchain = SCTP_M_COPYM(clonechain, 0, M_COPYALL, M_DONTWAIT);
5481 }
5482 }
5483 if (appendchain == NULL) {
5484 /* error */
5485 if (outchain)
5486 sctp_m_freem(outchain);
5487 return (NULL);
5488 }
5489 if (outchain) {
5490 /* tack on to the end */
5491 if (*endofchain != NULL) {
5492 SCTP_BUF_NEXT(((*endofchain))) = appendchain;
5493 } else {
5494 m = outchain;
5495 while (m) {
5496 if (SCTP_BUF_NEXT(m) == NULL) {
5497 SCTP_BUF_NEXT(m) = appendchain;
5498 break;
5499 }
5500 m = SCTP_BUF_NEXT(m);
5501 }
5502 }
5503 /*
5504 * save off the end and update the end-chain postion
5505 */
5506 m = appendchain;
5507 while (m) {
5508 if (SCTP_BUF_NEXT(m) == NULL) {
5509 *endofchain = m;
5510 break;
5511 }
5512 m = SCTP_BUF_NEXT(m);
5513 }
5514 return (outchain);
5515 } else {
5516 /* save off the end and update the end-chain postion */
5517 m = appendchain;
5518 while (m) {
5519 if (SCTP_BUF_NEXT(m) == NULL) {
5520 *endofchain = m;
5521 break;
5522 }
5523 m = SCTP_BUF_NEXT(m);
5524 }
5525 return (appendchain);
5526 }
5527}
5528
5529int
5530sctp_med_chunk_output(struct sctp_inpcb *inp,
5531 struct sctp_tcb *stcb,
5532 struct sctp_association *asoc,
5533 int *num_out,
5534 int *reason_code,
5535 int control_only, int *cwnd_full, int from_where,
5536 struct timeval *now, int *now_filled, int frag_point);
5537
5538static void
5539sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
5540 uint32_t val)
5541{
5542 struct sctp_copy_all *ca;
5543 struct mbuf *m;
5544 int ret = 0;
5545 int added_control = 0;
5546 int un_sent, do_chunk_output = 1;
5547 struct sctp_association *asoc;
5548
5549 ca = (struct sctp_copy_all *)ptr;
5550 if (ca->m == NULL) {
5551 return;
5552 }
5553 if (ca->inp != inp) {
5554 /* TSNH */
5555 return;
5556 }
5557 if ((ca->m) && ca->sndlen) {
5558 m = SCTP_M_COPYM(ca->m, 0, M_COPYALL, M_DONTWAIT);
5559 if (m == NULL) {
5560 /* can't copy so we are done */
5561 ca->cnt_failed++;
5562 return;
5563 }
5564 } else {
5565 m = NULL;
5566 }
5567 SCTP_TCB_LOCK_ASSERT(stcb);
5568 if (ca->sndrcv.sinfo_flags & SCTP_ABORT) {
5569 /* Abort this assoc with m as the user defined reason */
5570 if (m) {
5571 struct sctp_paramhdr *ph;
5572
5573 SCTP_BUF_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT);
5574 if (m) {
5575 ph = mtod(m, struct sctp_paramhdr *);
5576 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
5577 ph->param_length = htons(ca->sndlen);
5578 }
5579 /*
5580 * We add one here to keep the assoc from
5581 * dis-appearing on us.
5582 */
5583 atomic_add_int(&stcb->asoc.refcnt, 1);
5584 sctp_abort_an_association(inp, stcb,
5585 SCTP_RESPONSE_TO_USER_REQ,
5586 m);
5587 /*
5588 * sctp_abort_an_association calls sctp_free_asoc()
5589 * free association will NOT free it since we
5590 * incremented the refcnt .. we do this to prevent
5591 * it being freed and things getting tricky since we
5592 * could end up (from free_asoc) calling inpcb_free
5593 * which would get a recursive lock call to the
5594 * iterator lock.. But as a consequence of that the
5595 * stcb will return to us un-locked.. since
5596 * free_asoc returns with either no TCB or the TCB
5597 * unlocked, we must relock.. to unlock in the
5598 * iterator timer :-0
5599 */
5600 SCTP_TCB_LOCK(stcb);
5601 atomic_add_int(&stcb->asoc.refcnt, -1);
5602 goto no_chunk_output;
5603 }
5604 } else {
5605 if (m) {
5606 ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m,
5607 &ca->sndrcv, 1);
5608 }
5609 asoc = &stcb->asoc;
5610 if (ca->sndrcv.sinfo_flags & SCTP_EOF) {
5611 /* shutdown this assoc */
5612 if (TAILQ_EMPTY(&asoc->send_queue) &&
5613 TAILQ_EMPTY(&asoc->sent_queue) &&
5614 (asoc->stream_queue_cnt == 0)) {
5615 if (asoc->locked_on_sending) {
5616 goto abort_anyway;
5617 }
5618 /*
5619 * there is nothing queued to send, so I'm
5620 * done...
5621 */
5622 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
5623 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
5624 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5625 /*
5626 * only send SHUTDOWN the first time
5627 * through
5628 */
5629 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
5630 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
5631 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5632 }
5633 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
5634 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
5635 asoc->primary_destination);
5636 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
5637 asoc->primary_destination);
5638 added_control = 1;
5639 do_chunk_output = 0;
5640 }
5641 } else {
5642 /*
5643 * we still got (or just got) data to send,
5644 * so set SHUTDOWN_PENDING
5645 */
5646 /*
5647 * XXX sockets draft says that SCTP_EOF
5648 * should be sent with no data. currently,
5649 * we will allow user data to be sent first
5650 * and move to SHUTDOWN-PENDING
5651 */
5652 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
5653 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
5654 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5655 if (asoc->locked_on_sending) {
5656 /*
5657 * Locked to send out the
5658 * data
5659 */
5660 struct sctp_stream_queue_pending *sp;
5661
5662 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
5663 if (sp) {
5664 if ((sp->length == 0) && (sp->msg_is_complete == 0))
5665 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
5666 }
5667 }
5668 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
5669 if (TAILQ_EMPTY(&asoc->send_queue) &&
5670 TAILQ_EMPTY(&asoc->sent_queue) &&
5671 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
5672 abort_anyway:
5673 atomic_add_int(&stcb->asoc.refcnt, 1);
5674 sctp_abort_an_association(stcb->sctp_ep, stcb,
5675 SCTP_RESPONSE_TO_USER_REQ,
5676 NULL);
5677 atomic_add_int(&stcb->asoc.refcnt, -1);
5678 goto no_chunk_output;
5679 }
5680 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
5681 asoc->primary_destination);
5682 }
5683 }
5684
5685 }
5686 }
5687 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
5688 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk)));
5689
5690 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
5691 (stcb->asoc.total_flight > 0) &&
5692 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
5693 ) {
5694 do_chunk_output = 0;
5695 }
5696 if (do_chunk_output)
5697 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND);
5698 else if (added_control) {
5699 int num_out = 0, reason = 0, cwnd_full = 0, now_filled = 0;
5700 struct timeval now;
5701 int frag_point;
5702
5703 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
5704 sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
5705 &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point);
5706 }
5707no_chunk_output:
5708 if (ret) {
5709 ca->cnt_failed++;
5710 } else {
5711 ca->cnt_sent++;
5712 }
5713}
5714
5715static void
5716sctp_sendall_completes(void *ptr, uint32_t val)
5717{
5718 struct sctp_copy_all *ca;
5719
5720 ca = (struct sctp_copy_all *)ptr;
5721 /*
5722 * Do a notify here? Kacheong suggests that the notify be done at
5723 * the send time.. so you would push up a notification if any send
5724 * failed. Don't know if this is feasable since the only failures we
5725 * have is "memory" related and if you cannot get an mbuf to send
5726 * the data you surely can't get an mbuf to send up to notify the
5727 * user you can't send the data :->
5728 */
5729
5730 /* now free everything */
5731 sctp_m_freem(ca->m);
5732 SCTP_FREE(ca);
5733}
5734
5735
5736#define MC_ALIGN(m, len) do { \
5737 SCTP_BUF_RESV_UF(m, ((MCLBYTES - (len)) & ~(sizeof(long) - 1)); \
5738} while (0)
5739
5740
5741
5742static struct mbuf *
5743sctp_copy_out_all(struct uio *uio, int len)
5744{
5745 struct mbuf *ret, *at;
5746 int left, willcpy, cancpy, error;
5747
5748 ret = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_WAIT, 1, MT_DATA);
5749 if (ret == NULL) {
5750 /* TSNH */
5751 return (NULL);
5752 }
5753 left = len;
5754 SCTP_BUF_LEN(ret) = 0;
5755 /* save space for the data chunk header */
5756 cancpy = M_TRAILINGSPACE(ret);
5757 willcpy = min(cancpy, left);
5758 at = ret;
5759 while (left > 0) {
5760 /* Align data to the end */
5761 error = uiomove(mtod(at, caddr_t), willcpy, uio);
5762 if (error) {
5763 err_out_now:
5764 sctp_m_freem(at);
5765 return (NULL);
5766 }
5767 SCTP_BUF_LEN(at) = willcpy;
5768 SCTP_BUF_NEXT_PKT(at) = SCTP_BUF_NEXT(at) = 0;
5769 left -= willcpy;
5770 if (left > 0) {
5771 SCTP_BUF_NEXT(at) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 1, MT_DATA);
5772 if (SCTP_BUF_NEXT(at) == NULL) {
5773 goto err_out_now;
5774 }
5775 at = SCTP_BUF_NEXT(at);
5776 SCTP_BUF_LEN(at) = 0;
5777 cancpy = M_TRAILINGSPACE(at);
5778 willcpy = min(cancpy, left);
5779 }
5780 }
5781 return (ret);
5782}
5783
5784static int
5785sctp_sendall(struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m,
5786 struct sctp_sndrcvinfo *srcv)
5787{
5788 int ret;
5789 struct sctp_copy_all *ca;
5790
5791 SCTP_MALLOC(ca, struct sctp_copy_all *, sizeof(struct sctp_copy_all),
5792 "CopyAll");
5793 if (ca == NULL) {
5794 sctp_m_freem(m);
5795 return (ENOMEM);
5796 }
5797 memset(ca, 0, sizeof(struct sctp_copy_all));
5798
5799 ca->inp = inp;
5800 ca->sndrcv = *srcv;
5801 /*
5802 * take off the sendall flag, it would be bad if we failed to do
5803 * this :-0
5804 */
5805 ca->sndrcv.sinfo_flags &= ~SCTP_SENDALL;
5806 /* get length and mbuf chain */
5807 if (uio) {
5808 ca->sndlen = uio->uio_resid;
5809 ca->m = sctp_copy_out_all(uio, ca->sndlen);
5810 if (ca->m == NULL) {
5811 SCTP_FREE(ca);
5812 return (ENOMEM);
5813 }
5814 } else {
5815 /* Gather the length of the send */
5816 struct mbuf *mat;
5817
5818 mat = m;
5819 ca->sndlen = 0;
5820 while (m) {
5821 ca->sndlen += SCTP_BUF_LEN(m);
5822 m = SCTP_BUF_NEXT(m);
5823 }
5824 ca->m = m;
5825 }
4615 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator,
5826 ret = sctp_initiate_iterator(NULL, sctp_sendall_iterator, NULL,
4616 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES, SCTP_ASOC_ANY_STATE,
4617 (void *)ca, 0,
4618 sctp_sendall_completes, inp, 1);
4619 if (ret) {
4620#ifdef SCTP_DEBUG
4621 printf("Failed to initiate iterator for sendall\n");
4622#endif
4623 SCTP_FREE(ca);
4624 return (EFAULT);
4625 }
4626 return (0);
4627}
4628
4629
4630void
4631sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
4632{
4633 struct sctp_tmit_chunk *chk, *nchk;
4634
4635 chk = TAILQ_FIRST(&asoc->control_send_queue);
4636 while (chk) {
4637 nchk = TAILQ_NEXT(chk, sctp_next);
4638 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
4639 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
4640 if (chk->data) {
4641 sctp_m_freem(chk->data);
4642 chk->data = NULL;
4643 }
4644 asoc->ctrl_queue_cnt--;
4645 if (chk->whoTo)
4646 sctp_free_remote_addr(chk->whoTo);
4647 sctp_free_a_chunk(stcb, chk);
4648 }
4649 chk = nchk;
4650 }
4651}
4652
4653void
4654sctp_toss_old_asconf(struct sctp_tcb *stcb)
4655{
4656 struct sctp_association *asoc;
4657 struct sctp_tmit_chunk *chk, *chk_tmp;
4658
4659 asoc = &stcb->asoc;
4660 for (chk = TAILQ_FIRST(&asoc->control_send_queue); chk != NULL;
4661 chk = chk_tmp) {
4662 /* get next chk */
4663 chk_tmp = TAILQ_NEXT(chk, sctp_next);
4664 /* find SCTP_ASCONF chunk in queue (only one ever in queue) */
4665 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
4666 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
4667 if (chk->data) {
4668 sctp_m_freem(chk->data);
4669 chk->data = NULL;
4670 }
4671 asoc->ctrl_queue_cnt--;
4672 if (chk->whoTo)
4673 sctp_free_remote_addr(chk->whoTo);
4674 sctp_free_a_chunk(stcb, chk);
4675 }
4676 }
4677}
4678
4679
4680static __inline void
4681sctp_clean_up_datalist(struct sctp_tcb *stcb,
4682
4683 struct sctp_association *asoc,
4684 struct sctp_tmit_chunk **data_list,
4685 int bundle_at,
4686 struct sctp_nets *net)
4687{
4688 int i;
4689 struct sctp_tmit_chunk *tp1;
4690
4691 for (i = 0; i < bundle_at; i++) {
4692 /* off of the send queue */
4693 if (i) {
4694 /*
4695 * Any chunk NOT 0 you zap the time chunk 0 gets
4696 * zapped or set based on if a RTO measurment is
4697 * needed.
4698 */
4699 data_list[i]->do_rtt = 0;
4700 }
4701 /* record time */
4702 data_list[i]->sent_rcv_time = net->last_sent_time;
4703 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
4704 TAILQ_REMOVE(&asoc->send_queue,
4705 data_list[i],
4706 sctp_next);
4707 /* on to the sent queue */
4708 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
4709 if ((tp1) && (compare_with_wrap(tp1->rec.data.TSN_seq,
4710 data_list[i]->rec.data.TSN_seq, MAX_TSN))) {
4711 struct sctp_tmit_chunk *tpp;
4712
4713 /* need to move back */
4714 back_up_more:
4715 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
4716 if (tpp == NULL) {
4717 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
4718 goto all_done;
4719 }
4720 tp1 = tpp;
4721 if (compare_with_wrap(tp1->rec.data.TSN_seq,
4722 data_list[i]->rec.data.TSN_seq, MAX_TSN)) {
4723 goto back_up_more;
4724 }
4725 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
4726 } else {
4727 TAILQ_INSERT_TAIL(&asoc->sent_queue,
4728 data_list[i],
4729 sctp_next);
4730 }
4731all_done:
4732 /* This does not lower until the cum-ack passes it */
4733 asoc->sent_queue_cnt++;
4734 asoc->send_queue_cnt--;
4735 if ((asoc->peers_rwnd <= 0) &&
4736 (asoc->total_flight == 0) &&
4737 (bundle_at == 1)) {
4738 /* Mark the chunk as being a window probe */
4739 SCTP_STAT_INCR(sctps_windowprobed);
4740 data_list[i]->rec.data.state_flags |= SCTP_WINDOW_PROBE;
4741 } else {
4742 data_list[i]->rec.data.state_flags &= ~SCTP_WINDOW_PROBE;
4743 }
4744#ifdef SCTP_AUDITING_ENABLED
4745 sctp_audit_log(0xC2, 3);
4746#endif
4747 data_list[i]->sent = SCTP_DATAGRAM_SENT;
4748 data_list[i]->snd_count = 1;
4749 data_list[i]->rec.data.chunk_was_revoked = 0;
4750#ifdef SCTP_FLIGHT_LOGGING
4751 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
4752 data_list[i]->whoTo->flight_size,
4753 data_list[i]->book_size,
4754 (uintptr_t) stcb,
4755 data_list[i]->rec.data.TSN_seq);
4756#endif
4757 net->flight_size += data_list[i]->book_size;
4758 asoc->total_flight += data_list[i]->book_size;
4759 asoc->total_flight_count++;
4760#ifdef SCTP_LOG_RWND
4761 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
4762 asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh);
4763#endif
4764 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
4765 (uint32_t) (data_list[i]->send_size + sctp_peer_chunk_oh));
4766 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4767 /* SWS sender side engages */
4768 asoc->peers_rwnd = 0;
4769 }
4770 }
4771}
4772
4773static __inline void
4774sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc)
4775{
4776 struct sctp_tmit_chunk *chk, *nchk;
4777
4778 for (chk = TAILQ_FIRST(&asoc->control_send_queue);
4779 chk; chk = nchk) {
4780 nchk = TAILQ_NEXT(chk, sctp_next);
4781 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
4782 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
4783 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
4784 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
4785 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
4786 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
4787 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
4788 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
4789 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
4790 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
4791 /* Stray chunks must be cleaned up */
4792 clean_up_anyway:
4793 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
4794 if (chk->data) {
4795 sctp_m_freem(chk->data);
4796 chk->data = NULL;
4797 }
4798 asoc->ctrl_queue_cnt--;
4799 sctp_free_remote_addr(chk->whoTo);
4800 sctp_free_a_chunk(stcb, chk);
4801 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
4802 /* special handling, we must look into the param */
4803 if (chk != asoc->str_reset) {
4804 goto clean_up_anyway;
4805 }
4806 }
4807 }
4808}
4809
5827 SCTP_PCB_ANY_FLAGS, SCTP_PCB_ANY_FEATURES, SCTP_ASOC_ANY_STATE,
5828 (void *)ca, 0,
5829 sctp_sendall_completes, inp, 1);
5830 if (ret) {
5831#ifdef SCTP_DEBUG
5832 printf("Failed to initiate iterator for sendall\n");
5833#endif
5834 SCTP_FREE(ca);
5835 return (EFAULT);
5836 }
5837 return (0);
5838}
5839
5840
5841void
5842sctp_toss_old_cookies(struct sctp_tcb *stcb, struct sctp_association *asoc)
5843{
5844 struct sctp_tmit_chunk *chk, *nchk;
5845
5846 chk = TAILQ_FIRST(&asoc->control_send_queue);
5847 while (chk) {
5848 nchk = TAILQ_NEXT(chk, sctp_next);
5849 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
5850 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
5851 if (chk->data) {
5852 sctp_m_freem(chk->data);
5853 chk->data = NULL;
5854 }
5855 asoc->ctrl_queue_cnt--;
5856 if (chk->whoTo)
5857 sctp_free_remote_addr(chk->whoTo);
5858 sctp_free_a_chunk(stcb, chk);
5859 }
5860 chk = nchk;
5861 }
5862}
5863
5864void
5865sctp_toss_old_asconf(struct sctp_tcb *stcb)
5866{
5867 struct sctp_association *asoc;
5868 struct sctp_tmit_chunk *chk, *chk_tmp;
5869
5870 asoc = &stcb->asoc;
5871 for (chk = TAILQ_FIRST(&asoc->control_send_queue); chk != NULL;
5872 chk = chk_tmp) {
5873 /* get next chk */
5874 chk_tmp = TAILQ_NEXT(chk, sctp_next);
5875 /* find SCTP_ASCONF chunk in queue (only one ever in queue) */
5876 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
5877 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
5878 if (chk->data) {
5879 sctp_m_freem(chk->data);
5880 chk->data = NULL;
5881 }
5882 asoc->ctrl_queue_cnt--;
5883 if (chk->whoTo)
5884 sctp_free_remote_addr(chk->whoTo);
5885 sctp_free_a_chunk(stcb, chk);
5886 }
5887 }
5888}
5889
5890
5891static __inline void
5892sctp_clean_up_datalist(struct sctp_tcb *stcb,
5893
5894 struct sctp_association *asoc,
5895 struct sctp_tmit_chunk **data_list,
5896 int bundle_at,
5897 struct sctp_nets *net)
5898{
5899 int i;
5900 struct sctp_tmit_chunk *tp1;
5901
5902 for (i = 0; i < bundle_at; i++) {
5903 /* off of the send queue */
5904 if (i) {
5905 /*
5906 * Any chunk NOT 0 you zap the time chunk 0 gets
5907 * zapped or set based on if a RTO measurment is
5908 * needed.
5909 */
5910 data_list[i]->do_rtt = 0;
5911 }
5912 /* record time */
5913 data_list[i]->sent_rcv_time = net->last_sent_time;
5914 data_list[i]->rec.data.fast_retran_tsn = data_list[i]->rec.data.TSN_seq;
5915 TAILQ_REMOVE(&asoc->send_queue,
5916 data_list[i],
5917 sctp_next);
5918 /* on to the sent queue */
5919 tp1 = TAILQ_LAST(&asoc->sent_queue, sctpchunk_listhead);
5920 if ((tp1) && (compare_with_wrap(tp1->rec.data.TSN_seq,
5921 data_list[i]->rec.data.TSN_seq, MAX_TSN))) {
5922 struct sctp_tmit_chunk *tpp;
5923
5924 /* need to move back */
5925 back_up_more:
5926 tpp = TAILQ_PREV(tp1, sctpchunk_listhead, sctp_next);
5927 if (tpp == NULL) {
5928 TAILQ_INSERT_BEFORE(tp1, data_list[i], sctp_next);
5929 goto all_done;
5930 }
5931 tp1 = tpp;
5932 if (compare_with_wrap(tp1->rec.data.TSN_seq,
5933 data_list[i]->rec.data.TSN_seq, MAX_TSN)) {
5934 goto back_up_more;
5935 }
5936 TAILQ_INSERT_AFTER(&asoc->sent_queue, tp1, data_list[i], sctp_next);
5937 } else {
5938 TAILQ_INSERT_TAIL(&asoc->sent_queue,
5939 data_list[i],
5940 sctp_next);
5941 }
5942all_done:
5943 /* This does not lower until the cum-ack passes it */
5944 asoc->sent_queue_cnt++;
5945 asoc->send_queue_cnt--;
5946 if ((asoc->peers_rwnd <= 0) &&
5947 (asoc->total_flight == 0) &&
5948 (bundle_at == 1)) {
5949 /* Mark the chunk as being a window probe */
5950 SCTP_STAT_INCR(sctps_windowprobed);
5951 data_list[i]->rec.data.state_flags |= SCTP_WINDOW_PROBE;
5952 } else {
5953 data_list[i]->rec.data.state_flags &= ~SCTP_WINDOW_PROBE;
5954 }
5955#ifdef SCTP_AUDITING_ENABLED
5956 sctp_audit_log(0xC2, 3);
5957#endif
5958 data_list[i]->sent = SCTP_DATAGRAM_SENT;
5959 data_list[i]->snd_count = 1;
5960 data_list[i]->rec.data.chunk_was_revoked = 0;
5961#ifdef SCTP_FLIGHT_LOGGING
5962 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
5963 data_list[i]->whoTo->flight_size,
5964 data_list[i]->book_size,
5965 (uintptr_t) stcb,
5966 data_list[i]->rec.data.TSN_seq);
5967#endif
5968 net->flight_size += data_list[i]->book_size;
5969 asoc->total_flight += data_list[i]->book_size;
5970 asoc->total_flight_count++;
5971#ifdef SCTP_LOG_RWND
5972 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
5973 asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh);
5974#endif
5975 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
5976 (uint32_t) (data_list[i]->send_size + sctp_peer_chunk_oh));
5977 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5978 /* SWS sender side engages */
5979 asoc->peers_rwnd = 0;
5980 }
5981 }
5982}
5983
5984static __inline void
5985sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc)
5986{
5987 struct sctp_tmit_chunk *chk, *nchk;
5988
5989 for (chk = TAILQ_FIRST(&asoc->control_send_queue);
5990 chk; chk = nchk) {
5991 nchk = TAILQ_NEXT(chk, sctp_next);
5992 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
5993 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
5994 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
5995 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
5996 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
5997 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
5998 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
5999 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
6000 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
6001 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
6002 /* Stray chunks must be cleaned up */
6003 clean_up_anyway:
6004 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
6005 if (chk->data) {
6006 sctp_m_freem(chk->data);
6007 chk->data = NULL;
6008 }
6009 asoc->ctrl_queue_cnt--;
6010 sctp_free_remote_addr(chk->whoTo);
6011 sctp_free_a_chunk(stcb, chk);
6012 } else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
6013 /* special handling, we must look into the param */
6014 if (chk != asoc->str_reset) {
6015 goto clean_up_anyway;
6016 }
6017 }
6018 }
6019}
6020
4810extern int sctp_min_split_point;
4811
4812static __inline int
4813sctp_can_we_split_this(struct sctp_tcb *stcb,
4814 struct sctp_stream_queue_pending *sp,
4815 int goal_mtu, int frag_point, int eeor_on)
4816{
4817 /*
4818 * Make a decision on if I should split a msg into multiple parts.
4819 */
4820 if (goal_mtu < sctp_min_split_point) {
4821 /* you don't want enough */
4822 return (0);
4823 }
4824 if (sp->msg_is_complete == 0) {
4825 if (eeor_on) {
4826 /*
4827 * If we are doing EEOR we need to always send it if
4828 * its the entire thing.
4829 */
4830 if (goal_mtu >= sp->length)
4831 return (sp->length);
4832 } else {
4833 if (goal_mtu >= sp->length) {
4834 /*
4835 * If we cannot fill the amount needed there
4836 * is no sense of splitting the chunk.
4837 */
4838 return (0);
4839 }
4840 }
4841 /*
4842 * If we reach here sp->length is larger than the goal_mtu.
4843 * Do we wish to split it for the sake of packet putting
4844 * together?
4845 */
4846 if (goal_mtu >= min(sctp_min_split_point, stcb->asoc.smallest_mtu)) {
4847 /* Its ok to split it */
4848 return (min(goal_mtu, frag_point));
4849 }
4850 } else {
4851 /* We can always split a complete message to make it fit */
4852 if (goal_mtu >= sp->length)
4853 /* Take it all */
4854 return (sp->length);
4855
4856 return (min(goal_mtu, frag_point));
4857 }
4858 /* Nope, can't split */
4859 return (0);
4860
4861}
4862
4863static int
4864sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
4865 struct sctp_stream_out *strq,
4866 int goal_mtu,
4867 int frag_point,
4868 int *locked,
4869 int *giveup,
4870 int eeor_mode)
4871{
4872 /* Move from the stream to the send_queue keeping track of the total */
4873 struct sctp_association *asoc;
4874 struct sctp_stream_queue_pending *sp;
4875 struct sctp_tmit_chunk *chk;
4876 struct sctp_data_chunk *dchkh;
4877 int to_move;
4878 uint8_t rcv_flags = 0;
4879 uint8_t some_taken;
4880 uint8_t took_all = 0;
4881
4882 SCTP_TCB_LOCK_ASSERT(stcb);
4883 asoc = &stcb->asoc;
4884 sp = TAILQ_FIRST(&strq->outqueue);
4885 if (sp == NULL) {
4886 *locked = 0;
4887 SCTP_TCB_SEND_LOCK(stcb);
4888 if (strq->last_msg_incomplete) {
4889 printf("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
4890 strq->stream_no, strq->last_msg_incomplete);
4891 strq->last_msg_incomplete = 0;
4892 }
4893 SCTP_TCB_SEND_UNLOCK(stcb);
4894 return (0);
4895 }
4896 SCTP_TCB_SEND_LOCK(stcb);
4897 if ((sp->length == 0) && (sp->msg_is_complete == 0)) {
4898 /* Must wait for more data, must be last msg */
4899 *locked = 1;
4900 *giveup = 1;
4901 SCTP_TCB_SEND_UNLOCK(stcb);
4902 return (0);
4903 } else if (sp->length == 0) {
4904 /* This should not happen */
4905 panic("sp length is 0?");
4906 }
4907 some_taken = sp->some_taken;
4908 if ((goal_mtu >= sp->length) && (sp->msg_is_complete)) {
4909 /* It all fits and its a complete msg, no brainer */
4910 to_move = min(sp->length, frag_point);
4911 if (to_move == sp->length) {
4912 /* Getting it all */
4913 if (sp->some_taken) {
4914 rcv_flags |= SCTP_DATA_LAST_FRAG;
4915 } else {
4916 rcv_flags |= SCTP_DATA_NOT_FRAG;
4917 }
4918 } else {
4919 /* Not getting it all, frag point overrides */
4920 if (sp->some_taken == 0) {
4921 rcv_flags |= SCTP_DATA_FIRST_FRAG;
4922 }
4923 sp->some_taken = 1;
4924 }
4925 } else {
4926 to_move = sctp_can_we_split_this(stcb, sp, goal_mtu,
4927 frag_point, eeor_mode);
4928 if (to_move) {
4929 if (to_move >= sp->length) {
4930 to_move = sp->length;
4931 }
4932 if (sp->some_taken == 0) {
4933 rcv_flags |= SCTP_DATA_FIRST_FRAG;
4934 }
4935 sp->some_taken = 1;
4936 } else {
4937 if (sp->some_taken) {
4938 *locked = 1;
4939 }
4940 *giveup = 1;
4941 SCTP_TCB_SEND_UNLOCK(stcb);
4942 return (0);
4943 }
4944 }
4945 SCTP_TCB_SEND_UNLOCK(stcb);
4946 /* If we reach here, we can copy out a chunk */
4947 sctp_alloc_a_chunk(stcb, chk);
4948 if (chk == NULL) {
4949 /* No chunk memory */
4950out_gu:
4951 *giveup = 1;
4952 return (0);
4953 }
4954 /*
4955 * Setup for unordered if needed by looking at the user sent info
4956 * flags.
4957 */
4958 if (sp->sinfo_flags & SCTP_UNORDERED) {
4959 rcv_flags |= SCTP_DATA_UNORDERED;
4960 }
4961 /* clear out the chunk before setting up */
4962 memset(chk, sizeof(*chk), 0);
4963 chk->rec.data.rcv_flags = rcv_flags;
4964 SCTP_TCB_SEND_LOCK(stcb);
4965 if (SCTP_BUF_IS_EXTENDED(sp->data)) {
4966 chk->copy_by_ref = 1;
4967 } else {
4968 chk->copy_by_ref = 0;
4969 }
4970 if (to_move >= sp->length) {
4971 /* we can steal the whole thing */
4972 chk->data = sp->data;
4973 chk->last_mbuf = sp->tail_mbuf;
4974 /* register the stealing */
4975 sp->data = sp->tail_mbuf = NULL;
4976 took_all = 1;
4977 } else {
4978 struct mbuf *m;
4979
4980 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT);
4981 chk->last_mbuf = NULL;
4982 if (chk->data == NULL) {
4983 sp->some_taken = some_taken;
4984 sctp_free_a_chunk(stcb, chk);
4985 SCTP_TCB_SEND_UNLOCK(stcb);
4986 goto out_gu;
4987 }
4988 /* Pull off the data */
4989 m_adj(sp->data, to_move);
4990 /* Now lets work our way down and compact it */
4991 m = sp->data;
4992 while (m && (SCTP_BUF_LEN(m) == 0)) {
4993 sp->data = SCTP_BUF_NEXT(m);
4994 SCTP_BUF_NEXT(m) = NULL;
4995 if (sp->tail_mbuf == m) {
4996 /* freeing tail */
4997 sp->tail_mbuf = sp->data;
4998 }
4999 sctp_m_free(m);
5000 m = sp->data;
5001 }
5002 }
5003 if (to_move > sp->length) {
5004 panic("Huh, how can to_move be larger?");
5005 } else {
5006 sp->length -= to_move;
5007 }
5008
5009 if (M_LEADINGSPACE(chk->data) < sizeof(struct sctp_data_chunk)) {
5010 /* Not enough room for a chunk header, get some */
5011 struct mbuf *m;
5012
5013 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA);
5014 if (m == NULL) {
5015 /*
5016 * we're in trouble here. _PREPEND below will free
5017 * all the data if there is no leading space, so we
5018 * must put the data back and restore.
5019 */
5020 if (took_all) {
5021 /* unsteal the data */
5022 sp->data = chk->data;
5023 sp->tail_mbuf = chk->last_mbuf;
5024 } else {
5025 struct mbuf *m;
5026
5027 /* reassemble the data */
5028 m = sp->data;
5029 sp->data = chk->data;
5030 SCTP_BUF_NEXT(sp->data) = m;
5031 }
5032 sp->some_taken = some_taken;
5033 sp->length += to_move;
5034 chk->data = NULL;
5035 sctp_free_a_chunk(stcb, chk);
5036 SCTP_TCB_SEND_UNLOCK(stcb);
5037 goto out_gu;
5038 } else {
5039 SCTP_BUF_LEN(m) = 0;
5040 SCTP_BUF_NEXT(m) = chk->data;
5041 chk->data = m;
5042 M_ALIGN(chk->data, 4);
5043 }
5044 }
5045 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT);
5046 if (chk->data == NULL) {
5047 /* HELP */
5048 sctp_free_a_chunk(stcb, chk);
5049 SCTP_TCB_SEND_UNLOCK(stcb);
5050 goto out_gu;
5051 }
5052 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
5053 chk->book_size = chk->send_size = (to_move +
5054 sizeof(struct sctp_data_chunk));
5055 chk->book_size_scale = 0;
5056 chk->sent = SCTP_DATAGRAM_UNSENT;
5057
5058 /*
5059 * get last_mbuf and counts of mb useage This is ugly but hopefully
5060 * its only one mbuf.
5061 */
5062 if (chk->last_mbuf == NULL) {
5063 chk->last_mbuf = chk->data;
5064 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
5065 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
5066 }
5067 }
5068 chk->flags = 0;
5069 chk->asoc = &stcb->asoc;
5070 chk->pad_inplace = 0;
5071 chk->no_fr_allowed = 0;
5072 chk->rec.data.stream_seq = sp->strseq;
5073 chk->rec.data.stream_number = sp->stream;
5074 chk->rec.data.payloadtype = sp->ppid;
5075 chk->rec.data.context = sp->context;
5076 chk->rec.data.doing_fast_retransmit = 0;
5077 chk->rec.data.ect_nonce = 0; /* ECN Nonce */
5078
5079 chk->rec.data.timetodrop = sp->ts;
5080 chk->flags = sp->act_flags;
5081 chk->addr_over = sp->addr_over;
5082
5083 chk->whoTo = net;
5084 atomic_add_int(&chk->whoTo->ref_count, 1);
5085
5086 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
5087#ifdef SCTP_LOG_SENDING_STR
5088 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
5089 (uintptr_t) stcb, (uintptr_t) sp,
5090 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
5091 chk->rec.data.TSN_seq);
5092#endif
5093
5094 dchkh = mtod(chk->data, struct sctp_data_chunk *);
5095 /*
5096 * Put the rest of the things in place now. Size was done earlier in
5097 * previous loop prior to padding.
5098 */
5099
5100#ifdef SCTP_ASOCLOG_OF_TSNS
5101 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
5102 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
5103 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
5104 asoc->tsn_out_at++;
5105 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
5106 asoc->tsn_out_at = 0;
5107 }
5108#endif
5109
5110 dchkh->ch.chunk_type = SCTP_DATA;
5111 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
5112 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
5113 dchkh->dp.stream_id = htons(strq->stream_no);
5114 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
5115 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
5116 dchkh->ch.chunk_length = htons(chk->send_size);
5117 /* Now advance the chk->send_size by the actual pad needed. */
5118 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
5119 /* need a pad */
5120 struct mbuf *lm;
5121 int pads;
5122
5123 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
5124 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
5125 chk->pad_inplace = 1;
5126 }
5127 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
5128 /* pad added an mbuf */
5129 chk->last_mbuf = lm;
5130 }
5131 chk->send_size += pads;
5132 }
5133 /* We only re-set the policy if it is on */
5134 if (sp->pr_sctp_on) {
5135 sctp_set_prsctp_policy(stcb, sp);
5136 }
5137 if (sp->msg_is_complete && (sp->length == 0)) {
5138 /* All done pull and kill the message */
5139 asoc->stream_queue_cnt--;
5140 TAILQ_REMOVE(&strq->outqueue, sp, next);
5141 sctp_free_remote_addr(sp->net);
5142 if (sp->data) {
5143 sctp_m_freem(sp->data);
5144 sp->data = NULL;
5145 }
5146 sctp_free_a_strmoq(stcb, sp);
5147
5148 /* we can't be locked to it */
5149 *locked = 0;
5150 stcb->asoc.locked_on_sending = NULL;
5151 } else {
5152 /* more to go, we are locked */
5153 *locked = 1;
5154 }
5155 asoc->chunks_on_out_queue++;
5156 if (sp->pr_sctp_on) {
5157 asoc->pr_sctp_cnt++;
5158 chk->pr_sctp_on = 1;
5159 } else {
5160 chk->pr_sctp_on = 0;
5161 }
5162 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
5163 asoc->send_queue_cnt++;
5164 SCTP_TCB_SEND_UNLOCK(stcb);
5165 return (to_move);
5166}
5167
5168
5169static struct sctp_stream_out *
5170sctp_select_a_stream(struct sctp_tcb *stcb, struct sctp_association *asoc)
5171{
5172 struct sctp_stream_out *strq;
5173
5174 /* Find the next stream to use */
5175 if (asoc->last_out_stream == NULL) {
5176 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel);
5177 if (asoc->last_out_stream == NULL) {
5178 /* huh nothing on the wheel, TSNH */
5179 return (NULL);
5180 }
5181 goto done_it;
5182 }
5183 strq = TAILQ_NEXT(asoc->last_out_stream, next_spoke);
5184done_it:
5185 if (strq == NULL) {
5186 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel);
5187 }
5188 return (strq);
5189
5190}
5191
5192static void
5193sctp_fill_outqueue(struct sctp_tcb *stcb,
5194 struct sctp_nets *net, int frag_point, int eeor_mode)
5195{
5196 struct sctp_association *asoc;
6021
6022static __inline int
6023sctp_can_we_split_this(struct sctp_tcb *stcb,
6024 struct sctp_stream_queue_pending *sp,
6025 int goal_mtu, int frag_point, int eeor_on)
6026{
6027 /*
6028 * Make a decision on if I should split a msg into multiple parts.
6029 */
6030 if (goal_mtu < sctp_min_split_point) {
6031 /* you don't want enough */
6032 return (0);
6033 }
6034 if (sp->msg_is_complete == 0) {
6035 if (eeor_on) {
6036 /*
6037 * If we are doing EEOR we need to always send it if
6038 * its the entire thing.
6039 */
6040 if (goal_mtu >= sp->length)
6041 return (sp->length);
6042 } else {
6043 if (goal_mtu >= sp->length) {
6044 /*
6045 * If we cannot fill the amount needed there
6046 * is no sense of splitting the chunk.
6047 */
6048 return (0);
6049 }
6050 }
6051 /*
6052 * If we reach here sp->length is larger than the goal_mtu.
6053 * Do we wish to split it for the sake of packet putting
6054 * together?
6055 */
6056 if (goal_mtu >= min(sctp_min_split_point, stcb->asoc.smallest_mtu)) {
6057 /* Its ok to split it */
6058 return (min(goal_mtu, frag_point));
6059 }
6060 } else {
6061 /* We can always split a complete message to make it fit */
6062 if (goal_mtu >= sp->length)
6063 /* Take it all */
6064 return (sp->length);
6065
6066 return (min(goal_mtu, frag_point));
6067 }
6068 /* Nope, can't split */
6069 return (0);
6070
6071}
6072
6073static int
6074sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
6075 struct sctp_stream_out *strq,
6076 int goal_mtu,
6077 int frag_point,
6078 int *locked,
6079 int *giveup,
6080 int eeor_mode)
6081{
6082 /* Move from the stream to the send_queue keeping track of the total */
6083 struct sctp_association *asoc;
6084 struct sctp_stream_queue_pending *sp;
6085 struct sctp_tmit_chunk *chk;
6086 struct sctp_data_chunk *dchkh;
6087 int to_move;
6088 uint8_t rcv_flags = 0;
6089 uint8_t some_taken;
6090 uint8_t took_all = 0;
6091
6092 SCTP_TCB_LOCK_ASSERT(stcb);
6093 asoc = &stcb->asoc;
6094 sp = TAILQ_FIRST(&strq->outqueue);
6095 if (sp == NULL) {
6096 *locked = 0;
6097 SCTP_TCB_SEND_LOCK(stcb);
6098 if (strq->last_msg_incomplete) {
6099 printf("Huh? Stream:%d lm_in_c=%d but queue is NULL\n",
6100 strq->stream_no, strq->last_msg_incomplete);
6101 strq->last_msg_incomplete = 0;
6102 }
6103 SCTP_TCB_SEND_UNLOCK(stcb);
6104 return (0);
6105 }
6106 SCTP_TCB_SEND_LOCK(stcb);
6107 if ((sp->length == 0) && (sp->msg_is_complete == 0)) {
6108 /* Must wait for more data, must be last msg */
6109 *locked = 1;
6110 *giveup = 1;
6111 SCTP_TCB_SEND_UNLOCK(stcb);
6112 return (0);
6113 } else if (sp->length == 0) {
6114 /* This should not happen */
6115 panic("sp length is 0?");
6116 }
6117 some_taken = sp->some_taken;
6118 if ((goal_mtu >= sp->length) && (sp->msg_is_complete)) {
6119 /* It all fits and its a complete msg, no brainer */
6120 to_move = min(sp->length, frag_point);
6121 if (to_move == sp->length) {
6122 /* Getting it all */
6123 if (sp->some_taken) {
6124 rcv_flags |= SCTP_DATA_LAST_FRAG;
6125 } else {
6126 rcv_flags |= SCTP_DATA_NOT_FRAG;
6127 }
6128 } else {
6129 /* Not getting it all, frag point overrides */
6130 if (sp->some_taken == 0) {
6131 rcv_flags |= SCTP_DATA_FIRST_FRAG;
6132 }
6133 sp->some_taken = 1;
6134 }
6135 } else {
6136 to_move = sctp_can_we_split_this(stcb, sp, goal_mtu,
6137 frag_point, eeor_mode);
6138 if (to_move) {
6139 if (to_move >= sp->length) {
6140 to_move = sp->length;
6141 }
6142 if (sp->some_taken == 0) {
6143 rcv_flags |= SCTP_DATA_FIRST_FRAG;
6144 }
6145 sp->some_taken = 1;
6146 } else {
6147 if (sp->some_taken) {
6148 *locked = 1;
6149 }
6150 *giveup = 1;
6151 SCTP_TCB_SEND_UNLOCK(stcb);
6152 return (0);
6153 }
6154 }
6155 SCTP_TCB_SEND_UNLOCK(stcb);
6156 /* If we reach here, we can copy out a chunk */
6157 sctp_alloc_a_chunk(stcb, chk);
6158 if (chk == NULL) {
6159 /* No chunk memory */
6160out_gu:
6161 *giveup = 1;
6162 return (0);
6163 }
6164 /*
6165 * Setup for unordered if needed by looking at the user sent info
6166 * flags.
6167 */
6168 if (sp->sinfo_flags & SCTP_UNORDERED) {
6169 rcv_flags |= SCTP_DATA_UNORDERED;
6170 }
6171 /* clear out the chunk before setting up */
6172 memset(chk, sizeof(*chk), 0);
6173 chk->rec.data.rcv_flags = rcv_flags;
6174 SCTP_TCB_SEND_LOCK(stcb);
6175 if (SCTP_BUF_IS_EXTENDED(sp->data)) {
6176 chk->copy_by_ref = 1;
6177 } else {
6178 chk->copy_by_ref = 0;
6179 }
6180 if (to_move >= sp->length) {
6181 /* we can steal the whole thing */
6182 chk->data = sp->data;
6183 chk->last_mbuf = sp->tail_mbuf;
6184 /* register the stealing */
6185 sp->data = sp->tail_mbuf = NULL;
6186 took_all = 1;
6187 } else {
6188 struct mbuf *m;
6189
6190 chk->data = SCTP_M_COPYM(sp->data, 0, to_move, M_DONTWAIT);
6191 chk->last_mbuf = NULL;
6192 if (chk->data == NULL) {
6193 sp->some_taken = some_taken;
6194 sctp_free_a_chunk(stcb, chk);
6195 SCTP_TCB_SEND_UNLOCK(stcb);
6196 goto out_gu;
6197 }
6198 /* Pull off the data */
6199 m_adj(sp->data, to_move);
6200 /* Now lets work our way down and compact it */
6201 m = sp->data;
6202 while (m && (SCTP_BUF_LEN(m) == 0)) {
6203 sp->data = SCTP_BUF_NEXT(m);
6204 SCTP_BUF_NEXT(m) = NULL;
6205 if (sp->tail_mbuf == m) {
6206 /* freeing tail */
6207 sp->tail_mbuf = sp->data;
6208 }
6209 sctp_m_free(m);
6210 m = sp->data;
6211 }
6212 }
6213 if (to_move > sp->length) {
6214 panic("Huh, how can to_move be larger?");
6215 } else {
6216 sp->length -= to_move;
6217 }
6218
6219 if (M_LEADINGSPACE(chk->data) < sizeof(struct sctp_data_chunk)) {
6220 /* Not enough room for a chunk header, get some */
6221 struct mbuf *m;
6222
6223 m = sctp_get_mbuf_for_msg(1, 0, M_DONTWAIT, 0, MT_DATA);
6224 if (m == NULL) {
6225 /*
6226 * we're in trouble here. _PREPEND below will free
6227 * all the data if there is no leading space, so we
6228 * must put the data back and restore.
6229 */
6230 if (took_all) {
6231 /* unsteal the data */
6232 sp->data = chk->data;
6233 sp->tail_mbuf = chk->last_mbuf;
6234 } else {
6235 struct mbuf *m;
6236
6237 /* reassemble the data */
6238 m = sp->data;
6239 sp->data = chk->data;
6240 SCTP_BUF_NEXT(sp->data) = m;
6241 }
6242 sp->some_taken = some_taken;
6243 sp->length += to_move;
6244 chk->data = NULL;
6245 sctp_free_a_chunk(stcb, chk);
6246 SCTP_TCB_SEND_UNLOCK(stcb);
6247 goto out_gu;
6248 } else {
6249 SCTP_BUF_LEN(m) = 0;
6250 SCTP_BUF_NEXT(m) = chk->data;
6251 chk->data = m;
6252 M_ALIGN(chk->data, 4);
6253 }
6254 }
6255 SCTP_BUF_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT);
6256 if (chk->data == NULL) {
6257 /* HELP */
6258 sctp_free_a_chunk(stcb, chk);
6259 SCTP_TCB_SEND_UNLOCK(stcb);
6260 goto out_gu;
6261 }
6262 sctp_snd_sb_alloc(stcb, sizeof(struct sctp_data_chunk));
6263 chk->book_size = chk->send_size = (to_move +
6264 sizeof(struct sctp_data_chunk));
6265 chk->book_size_scale = 0;
6266 chk->sent = SCTP_DATAGRAM_UNSENT;
6267
6268 /*
6269 * get last_mbuf and counts of mb useage This is ugly but hopefully
6270 * its only one mbuf.
6271 */
6272 if (chk->last_mbuf == NULL) {
6273 chk->last_mbuf = chk->data;
6274 while (SCTP_BUF_NEXT(chk->last_mbuf) != NULL) {
6275 chk->last_mbuf = SCTP_BUF_NEXT(chk->last_mbuf);
6276 }
6277 }
6278 chk->flags = 0;
6279 chk->asoc = &stcb->asoc;
6280 chk->pad_inplace = 0;
6281 chk->no_fr_allowed = 0;
6282 chk->rec.data.stream_seq = sp->strseq;
6283 chk->rec.data.stream_number = sp->stream;
6284 chk->rec.data.payloadtype = sp->ppid;
6285 chk->rec.data.context = sp->context;
6286 chk->rec.data.doing_fast_retransmit = 0;
6287 chk->rec.data.ect_nonce = 0; /* ECN Nonce */
6288
6289 chk->rec.data.timetodrop = sp->ts;
6290 chk->flags = sp->act_flags;
6291 chk->addr_over = sp->addr_over;
6292
6293 chk->whoTo = net;
6294 atomic_add_int(&chk->whoTo->ref_count, 1);
6295
6296 chk->rec.data.TSN_seq = atomic_fetchadd_int(&asoc->sending_seq, 1);
6297#ifdef SCTP_LOG_SENDING_STR
6298 sctp_misc_ints(SCTP_STRMOUT_LOG_SEND,
6299 (uintptr_t) stcb, (uintptr_t) sp,
6300 (uint32_t) ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq),
6301 chk->rec.data.TSN_seq);
6302#endif
6303
6304 dchkh = mtod(chk->data, struct sctp_data_chunk *);
6305 /*
6306 * Put the rest of the things in place now. Size was done earlier in
6307 * previous loop prior to padding.
6308 */
6309
6310#ifdef SCTP_ASOCLOG_OF_TSNS
6311 asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
6312 asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
6313 asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
6314 asoc->tsn_out_at++;
6315 if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
6316 asoc->tsn_out_at = 0;
6317 }
6318#endif
6319
6320 dchkh->ch.chunk_type = SCTP_DATA;
6321 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
6322 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
6323 dchkh->dp.stream_id = htons(strq->stream_no);
6324 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
6325 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
6326 dchkh->ch.chunk_length = htons(chk->send_size);
6327 /* Now advance the chk->send_size by the actual pad needed. */
6328 if (chk->send_size < SCTP_SIZE32(chk->book_size)) {
6329 /* need a pad */
6330 struct mbuf *lm;
6331 int pads;
6332
6333 pads = SCTP_SIZE32(chk->book_size) - chk->send_size;
6334 if (sctp_pad_lastmbuf(chk->data, pads, chk->last_mbuf) == 0) {
6335 chk->pad_inplace = 1;
6336 }
6337 if ((lm = SCTP_BUF_NEXT(chk->last_mbuf)) != NULL) {
6338 /* pad added an mbuf */
6339 chk->last_mbuf = lm;
6340 }
6341 chk->send_size += pads;
6342 }
6343 /* We only re-set the policy if it is on */
6344 if (sp->pr_sctp_on) {
6345 sctp_set_prsctp_policy(stcb, sp);
6346 }
6347 if (sp->msg_is_complete && (sp->length == 0)) {
6348 /* All done pull and kill the message */
6349 asoc->stream_queue_cnt--;
6350 TAILQ_REMOVE(&strq->outqueue, sp, next);
6351 sctp_free_remote_addr(sp->net);
6352 if (sp->data) {
6353 sctp_m_freem(sp->data);
6354 sp->data = NULL;
6355 }
6356 sctp_free_a_strmoq(stcb, sp);
6357
6358 /* we can't be locked to it */
6359 *locked = 0;
6360 stcb->asoc.locked_on_sending = NULL;
6361 } else {
6362 /* more to go, we are locked */
6363 *locked = 1;
6364 }
6365 asoc->chunks_on_out_queue++;
6366 if (sp->pr_sctp_on) {
6367 asoc->pr_sctp_cnt++;
6368 chk->pr_sctp_on = 1;
6369 } else {
6370 chk->pr_sctp_on = 0;
6371 }
6372 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
6373 asoc->send_queue_cnt++;
6374 SCTP_TCB_SEND_UNLOCK(stcb);
6375 return (to_move);
6376}
6377
6378
6379static struct sctp_stream_out *
6380sctp_select_a_stream(struct sctp_tcb *stcb, struct sctp_association *asoc)
6381{
6382 struct sctp_stream_out *strq;
6383
6384 /* Find the next stream to use */
6385 if (asoc->last_out_stream == NULL) {
6386 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel);
6387 if (asoc->last_out_stream == NULL) {
6388 /* huh nothing on the wheel, TSNH */
6389 return (NULL);
6390 }
6391 goto done_it;
6392 }
6393 strq = TAILQ_NEXT(asoc->last_out_stream, next_spoke);
6394done_it:
6395 if (strq == NULL) {
6396 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel);
6397 }
6398 return (strq);
6399
6400}
6401
6402static void
6403sctp_fill_outqueue(struct sctp_tcb *stcb,
6404 struct sctp_nets *net, int frag_point, int eeor_mode)
6405{
6406 struct sctp_association *asoc;
5197 struct sctp_stream_out *strq, *strqn;
6407 struct sctp_stream_out *strq, *strqn, *strqt;
5198 int goal_mtu, moved_how_much, total_moved = 0;
5199 int locked, giveup;
5200 struct sctp_stream_queue_pending *sp;
5201
5202 SCTP_TCB_LOCK_ASSERT(stcb);
5203 asoc = &stcb->asoc;
6408 int goal_mtu, moved_how_much, total_moved = 0;
6409 int locked, giveup;
6410 struct sctp_stream_queue_pending *sp;
6411
6412 SCTP_TCB_LOCK_ASSERT(stcb);
6413 asoc = &stcb->asoc;
5204#ifdef AF_INET6
6414#ifdef INET6
5205 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
5206 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
5207 } else {
5208 /* ?? not sure what else to do */
5209 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
5210 }
5211#else
5212 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
5213 mtu_fromwheel = 0;
5214#endif
5215 /* Need an allowance for the data chunk header too */
5216 goal_mtu -= sizeof(struct sctp_data_chunk);
5217
5218 /* must make even word boundary */
5219 goal_mtu &= 0xfffffffc;
5220 if (asoc->locked_on_sending) {
5221 /* We are stuck on one stream until the message completes. */
5222 strqn = strq = asoc->locked_on_sending;
5223 locked = 1;
5224 } else {
5225 strqn = strq = sctp_select_a_stream(stcb, asoc);
5226 locked = 0;
5227 }
5228
5229 while ((goal_mtu > 0) && strq) {
5230 sp = TAILQ_FIRST(&strq->outqueue);
5231 /*
5232 * If CMT is off, we must validate that the stream in
5233 * question has the first item pointed towards are network
5234 * destionation requested by the caller. Note that if we
5235 * turn out to be locked to a stream (assigning TSN's then
5236 * we must stop, since we cannot look for another stream
5237 * with data to send to that destination). In CMT's case, by
5238 * skipping this check, we will send one data packet towards
5239 * the requested net.
5240 */
5241 if (sp == NULL) {
5242 break;
5243 }
5244 if ((sp->net != net) && (sctp_cmt_on_off == 0)) {
5245 /* none for this network */
5246 if (locked) {
5247 break;
5248 } else {
5249 strq = sctp_select_a_stream(stcb, asoc);
5250 if (strq == NULL)
5251 /* none left */
5252 break;
5253 if (strqn == strq) {
5254 /* I have circled */
5255 break;
5256 }
5257 continue;
5258 }
5259 }
5260 giveup = 0;
5261 moved_how_much = sctp_move_to_outqueue(stcb, net, strq, goal_mtu, frag_point, &locked,
5262 &giveup, eeor_mode);
5263 asoc->last_out_stream = strq;
5264 if (locked) {
5265 asoc->locked_on_sending = strq;
5266 if ((moved_how_much == 0) || (giveup))
5267 /* no more to move for now */
5268 break;
5269 } else {
5270 asoc->locked_on_sending = NULL;
6415 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
6416 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
6417 } else {
6418 /* ?? not sure what else to do */
6419 goal_mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
6420 }
6421#else
6422 goal_mtu = net->mtu - SCTP_MIN_OVERHEAD;
6423 mtu_fromwheel = 0;
6424#endif
6425 /* Need an allowance for the data chunk header too */
6426 goal_mtu -= sizeof(struct sctp_data_chunk);
6427
6428 /* must make even word boundary */
6429 goal_mtu &= 0xfffffffc;
6430 if (asoc->locked_on_sending) {
6431 /* We are stuck on one stream until the message completes. */
6432 strqn = strq = asoc->locked_on_sending;
6433 locked = 1;
6434 } else {
6435 strqn = strq = sctp_select_a_stream(stcb, asoc);
6436 locked = 0;
6437 }
6438
6439 while ((goal_mtu > 0) && strq) {
6440 sp = TAILQ_FIRST(&strq->outqueue);
6441 /*
6442 * If CMT is off, we must validate that the stream in
6443 * question has the first item pointed towards are network
6444 * destionation requested by the caller. Note that if we
6445 * turn out to be locked to a stream (assigning TSN's then
6446 * we must stop, since we cannot look for another stream
6447 * with data to send to that destination). In CMT's case, by
6448 * skipping this check, we will send one data packet towards
6449 * the requested net.
6450 */
6451 if (sp == NULL) {
6452 break;
6453 }
6454 if ((sp->net != net) && (sctp_cmt_on_off == 0)) {
6455 /* none for this network */
6456 if (locked) {
6457 break;
6458 } else {
6459 strq = sctp_select_a_stream(stcb, asoc);
6460 if (strq == NULL)
6461 /* none left */
6462 break;
6463 if (strqn == strq) {
6464 /* I have circled */
6465 break;
6466 }
6467 continue;
6468 }
6469 }
6470 giveup = 0;
6471 moved_how_much = sctp_move_to_outqueue(stcb, net, strq, goal_mtu, frag_point, &locked,
6472 &giveup, eeor_mode);
6473 asoc->last_out_stream = strq;
6474 if (locked) {
6475 asoc->locked_on_sending = strq;
6476 if ((moved_how_much == 0) || (giveup))
6477 /* no more to move for now */
6478 break;
6479 } else {
6480 asoc->locked_on_sending = NULL;
6481 strqt = sctp_select_a_stream(stcb, asoc);
5271 if (TAILQ_FIRST(&strq->outqueue) == NULL) {
5272 sctp_remove_from_wheel(stcb, asoc, strq);
5273 }
5274 if (giveup) {
5275 break;
5276 }
6482 if (TAILQ_FIRST(&strq->outqueue) == NULL) {
6483 sctp_remove_from_wheel(stcb, asoc, strq);
6484 }
6485 if (giveup) {
6486 break;
6487 }
5277 strq = sctp_select_a_stream(stcb, asoc);
6488 strq = strqt;
5278 if (strq == NULL) {
5279 break;
5280 }
5281 }
5282 total_moved += moved_how_much;
5283 goal_mtu -= moved_how_much;
5284 goal_mtu &= 0xfffffffc;
5285 }
5286 if (total_moved == 0) {
5287 if ((sctp_cmt_on_off == 0) &&
5288 (net == stcb->asoc.primary_destination)) {
5289 /* ran dry for primary network net */
5290 SCTP_STAT_INCR(sctps_primary_randry);
5291 } else if (sctp_cmt_on_off) {
5292 /* ran dry with CMT on */
5293 SCTP_STAT_INCR(sctps_cmt_randry);
5294 }
5295 }
5296}
5297
5298__inline void
5299sctp_fix_ecn_echo(struct sctp_association *asoc)
5300{
5301 struct sctp_tmit_chunk *chk;
5302
5303 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
5304 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
5305 chk->sent = SCTP_DATAGRAM_UNSENT;
5306 }
5307 }
5308}
5309
5310static void
5311sctp_move_to_an_alt(struct sctp_tcb *stcb,
5312 struct sctp_association *asoc,
5313 struct sctp_nets *net)
5314{
5315 struct sctp_tmit_chunk *chk;
5316 struct sctp_nets *a_net;
5317
5318 SCTP_TCB_LOCK_ASSERT(stcb);
5319 a_net = sctp_find_alternate_net(stcb, net, 0);
5320 if ((a_net != net) &&
5321 ((a_net->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE)) {
5322 /*
5323 * We only proceed if a valid alternate is found that is not
5324 * this one and is reachable. Here we must move all chunks
5325 * queued in the send queue off of the destination address
5326 * to our alternate.
5327 */
5328 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
5329 if (chk->whoTo == net) {
5330 /* Move the chunk to our alternate */
5331 sctp_free_remote_addr(chk->whoTo);
5332 chk->whoTo = a_net;
5333 atomic_add_int(&a_net->ref_count, 1);
5334 }
5335 }
5336 }
5337}
5338
6489 if (strq == NULL) {
6490 break;
6491 }
6492 }
6493 total_moved += moved_how_much;
6494 goal_mtu -= moved_how_much;
6495 goal_mtu &= 0xfffffffc;
6496 }
6497 if (total_moved == 0) {
6498 if ((sctp_cmt_on_off == 0) &&
6499 (net == stcb->asoc.primary_destination)) {
6500 /* ran dry for primary network net */
6501 SCTP_STAT_INCR(sctps_primary_randry);
6502 } else if (sctp_cmt_on_off) {
6503 /* ran dry with CMT on */
6504 SCTP_STAT_INCR(sctps_cmt_randry);
6505 }
6506 }
6507}
6508
6509__inline void
6510sctp_fix_ecn_echo(struct sctp_association *asoc)
6511{
6512 struct sctp_tmit_chunk *chk;
6513
6514 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
6515 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
6516 chk->sent = SCTP_DATAGRAM_UNSENT;
6517 }
6518 }
6519}
6520
6521static void
6522sctp_move_to_an_alt(struct sctp_tcb *stcb,
6523 struct sctp_association *asoc,
6524 struct sctp_nets *net)
6525{
6526 struct sctp_tmit_chunk *chk;
6527 struct sctp_nets *a_net;
6528
6529 SCTP_TCB_LOCK_ASSERT(stcb);
6530 a_net = sctp_find_alternate_net(stcb, net, 0);
6531 if ((a_net != net) &&
6532 ((a_net->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE)) {
6533 /*
6534 * We only proceed if a valid alternate is found that is not
6535 * this one and is reachable. Here we must move all chunks
6536 * queued in the send queue off of the destination address
6537 * to our alternate.
6538 */
6539 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
6540 if (chk->whoTo == net) {
6541 /* Move the chunk to our alternate */
6542 sctp_free_remote_addr(chk->whoTo);
6543 chk->whoTo = a_net;
6544 atomic_add_int(&a_net->ref_count, 1);
6545 }
6546 }
6547 }
6548}
6549
5339extern int sctp_early_fr;
5340
5341int
5342sctp_med_chunk_output(struct sctp_inpcb *inp,
5343 struct sctp_tcb *stcb,
5344 struct sctp_association *asoc,
5345 int *num_out,
5346 int *reason_code,
5347 int control_only, int *cwnd_full, int from_where,
5348 struct timeval *now, int *now_filled, int frag_point)
5349{
5350 /*
5351 * Ok this is the generic chunk service queue. we must do the
5352 * following: - Service the stream queue that is next, moving any
5353 * message (note I must get a complete message i.e. FIRST/MIDDLE and
5354 * LAST to the out queue in one pass) and assigning TSN's - Check to
5355 * see if the cwnd/rwnd allows any output, if so we go ahead and
5356 * fomulate and send the low level chunks. Making sure to combine
5357 * any control in the control chunk queue also.
5358 */
5359 struct sctp_nets *net;
5360 struct mbuf *outchain, *endoutchain;
5361 struct sctp_tmit_chunk *chk, *nchk;
5362 struct sctphdr *shdr;
5363
5364 /* temp arrays for unlinking */
5365 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
5366 int no_fragmentflg, error;
6550int
6551sctp_med_chunk_output(struct sctp_inpcb *inp,
6552 struct sctp_tcb *stcb,
6553 struct sctp_association *asoc,
6554 int *num_out,
6555 int *reason_code,
6556 int control_only, int *cwnd_full, int from_where,
6557 struct timeval *now, int *now_filled, int frag_point)
6558{
6559 /*
6560 * Ok this is the generic chunk service queue. we must do the
6561 * following: - Service the stream queue that is next, moving any
6562 * message (note I must get a complete message i.e. FIRST/MIDDLE and
6563 * LAST to the out queue in one pass) and assigning TSN's - Check to
6564 * see if the cwnd/rwnd allows any output, if so we go ahead and
6565 * fomulate and send the low level chunks. Making sure to combine
6566 * any control in the control chunk queue also.
6567 */
6568 struct sctp_nets *net;
6569 struct mbuf *outchain, *endoutchain;
6570 struct sctp_tmit_chunk *chk, *nchk;
6571 struct sctphdr *shdr;
6572
6573 /* temp arrays for unlinking */
6574 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
6575 int no_fragmentflg, error;
5367 int one_chunk, hbflag;
6576 int one_chunk, hbflag, skip_data_for_this_net;
5368 int asconf, cookie, no_out_cnt;
5369 int bundle_at, ctl_cnt, no_data_chunks, cwnd_full_ind, eeor_mode;
5370 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
5371 struct sctp_nets *start_at, *old_startat = NULL, *send_start_at;
5372 int tsns_sent = 0;
5373 uint32_t auth_offset = 0;
5374 struct sctp_auth_chunk *auth = NULL;
5375
5376 *num_out = 0;
5377 cwnd_full_ind = 0;
5378
5379 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
5380 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
5381 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
5382 eeor_mode = 1;
5383 } else {
5384 eeor_mode = 0;
5385 }
5386 ctl_cnt = no_out_cnt = asconf = cookie = 0;
5387 /*
5388 * First lets prime the pump. For each destination, if there is room
5389 * in the flight size, attempt to pull an MTU's worth out of the
5390 * stream queues into the general send_queue
5391 */
5392#ifdef SCTP_AUDITING_ENABLED
5393 sctp_audit_log(0xC2, 2);
5394#endif
5395 SCTP_TCB_LOCK_ASSERT(stcb);
5396 hbflag = 0;
5397 if ((control_only) || (asoc->stream_reset_outstanding))
5398 no_data_chunks = 1;
5399 else
5400 no_data_chunks = 0;
5401
5402 /* Nothing to possible to send? */
5403 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
5404 TAILQ_EMPTY(&asoc->send_queue) &&
5405 TAILQ_EMPTY(&asoc->out_wheel)) {
5406 *reason_code = 9;
5407 return (0);
5408 }
5409 if (asoc->peers_rwnd == 0) {
5410 /* No room in peers rwnd */
5411 *cwnd_full = 1;
5412 *reason_code = 1;
5413 if (asoc->total_flight > 0) {
5414 /* we are allowed one chunk in flight */
5415 no_data_chunks = 1;
5416 }
5417 }
5418 if ((no_data_chunks == 0) && (!TAILQ_EMPTY(&asoc->out_wheel))) {
5419 if (sctp_cmt_on_off) {
5420 /*
5421 * for CMT we start at the next one past the one we
5422 * last added data to.
5423 */
5424 if (TAILQ_FIRST(&asoc->send_queue) != NULL) {
5425 goto skip_the_fill_from_streams;
5426 }
5427 if (asoc->last_net_data_came_from) {
5428 net = TAILQ_NEXT(asoc->last_net_data_came_from, sctp_next);
5429 if (net == NULL) {
5430 net = TAILQ_FIRST(&asoc->nets);
5431 }
5432 } else {
5433 /* back to start */
5434 net = TAILQ_FIRST(&asoc->nets);
5435 }
5436
5437 } else {
5438 net = asoc->primary_destination;
5439 if (net == NULL) {
5440 /* TSNH */
5441 net = TAILQ_FIRST(&asoc->nets);
5442 }
5443 }
5444 start_at = net;
5445one_more_time:
5446 for (; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
5447 if (old_startat && (old_startat == net)) {
5448 break;
5449 }
5450 if ((sctp_cmt_on_off == 0) && (net->ref_count < 2)) {
5451 /* nothing can be in queue for this guy */
5452 continue;
5453 }
5454 if (net->flight_size >= net->cwnd) {
5455 /* skip this network, no room */
5456 cwnd_full_ind++;
5457 continue;
5458 }
5459 /*
5460 * @@@ JRI : this for loop we are in takes in each
5461 * net, if its's got space in cwnd and has data sent
5462 * to it (when CMT is off) then it calls
5463 * sctp_fill_outqueue for the net. This gets data on
5464 * the send queue for that network.
5465 *
5466 * In sctp_fill_outqueue TSN's are assigned and data is
5467 * copied out of the stream buffers. Note mostly
5468 * copy by reference (we hope).
5469 */
5470#ifdef SCTP_CWND_LOGGING
5471 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
5472#endif
5473 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode);
5474 }
5475 if (start_at != TAILQ_FIRST(&asoc->nets)) {
5476 /* got to pick up the beginning stuff. */
5477 old_startat = start_at;
5478 start_at = net = TAILQ_FIRST(&asoc->nets);
5479 goto one_more_time;
5480 }
5481 }
5482skip_the_fill_from_streams:
5483 *cwnd_full = cwnd_full_ind;
5484 /* now service each destination and send out what we can for it */
5485 /* Nothing to send? */
5486 if ((TAILQ_FIRST(&asoc->control_send_queue) == NULL) &&
5487 (TAILQ_FIRST(&asoc->send_queue) == NULL)) {
5488 *reason_code = 8;
5489 return (0);
5490 }
5491 chk = TAILQ_FIRST(&asoc->send_queue);
5492 if (chk) {
5493 send_start_at = chk->whoTo;
5494 } else {
5495 send_start_at = TAILQ_FIRST(&asoc->nets);
5496 }
5497 old_startat = NULL;
5498again_one_more_time:
5499 for (net = send_start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
5500 /* how much can we send? */
5501 /* printf("Examine for sending net:%x\n", (uint32_t)net); */
5502 if (old_startat && (old_startat == net)) {
5503 /* through list ocmpletely. */
5504 break;
5505 }
5506 tsns_sent = 0;
5507 if (net->ref_count < 2) {
5508 /*
5509 * Ref-count of 1 so we cannot have data or control
5510 * queued to this address. Skip it.
5511 */
5512 continue;
5513 }
5514 ctl_cnt = bundle_at = 0;
5515 endoutchain = outchain = NULL;
5516 no_fragmentflg = 1;
5517 one_chunk = 0;
6577 int asconf, cookie, no_out_cnt;
6578 int bundle_at, ctl_cnt, no_data_chunks, cwnd_full_ind, eeor_mode;
6579 unsigned int mtu, r_mtu, omtu, mx_mtu, to_out;
6580 struct sctp_nets *start_at, *old_startat = NULL, *send_start_at;
6581 int tsns_sent = 0;
6582 uint32_t auth_offset = 0;
6583 struct sctp_auth_chunk *auth = NULL;
6584
6585 *num_out = 0;
6586 cwnd_full_ind = 0;
6587
6588 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
6589 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED) ||
6590 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR))) {
6591 eeor_mode = 1;
6592 } else {
6593 eeor_mode = 0;
6594 }
6595 ctl_cnt = no_out_cnt = asconf = cookie = 0;
6596 /*
6597 * First lets prime the pump. For each destination, if there is room
6598 * in the flight size, attempt to pull an MTU's worth out of the
6599 * stream queues into the general send_queue
6600 */
6601#ifdef SCTP_AUDITING_ENABLED
6602 sctp_audit_log(0xC2, 2);
6603#endif
6604 SCTP_TCB_LOCK_ASSERT(stcb);
6605 hbflag = 0;
6606 if ((control_only) || (asoc->stream_reset_outstanding))
6607 no_data_chunks = 1;
6608 else
6609 no_data_chunks = 0;
6610
6611 /* Nothing to possible to send? */
6612 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
6613 TAILQ_EMPTY(&asoc->send_queue) &&
6614 TAILQ_EMPTY(&asoc->out_wheel)) {
6615 *reason_code = 9;
6616 return (0);
6617 }
6618 if (asoc->peers_rwnd == 0) {
6619 /* No room in peers rwnd */
6620 *cwnd_full = 1;
6621 *reason_code = 1;
6622 if (asoc->total_flight > 0) {
6623 /* we are allowed one chunk in flight */
6624 no_data_chunks = 1;
6625 }
6626 }
6627 if ((no_data_chunks == 0) && (!TAILQ_EMPTY(&asoc->out_wheel))) {
6628 if (sctp_cmt_on_off) {
6629 /*
6630 * for CMT we start at the next one past the one we
6631 * last added data to.
6632 */
6633 if (TAILQ_FIRST(&asoc->send_queue) != NULL) {
6634 goto skip_the_fill_from_streams;
6635 }
6636 if (asoc->last_net_data_came_from) {
6637 net = TAILQ_NEXT(asoc->last_net_data_came_from, sctp_next);
6638 if (net == NULL) {
6639 net = TAILQ_FIRST(&asoc->nets);
6640 }
6641 } else {
6642 /* back to start */
6643 net = TAILQ_FIRST(&asoc->nets);
6644 }
6645
6646 } else {
6647 net = asoc->primary_destination;
6648 if (net == NULL) {
6649 /* TSNH */
6650 net = TAILQ_FIRST(&asoc->nets);
6651 }
6652 }
6653 start_at = net;
6654one_more_time:
6655 for (; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
6656 if (old_startat && (old_startat == net)) {
6657 break;
6658 }
6659 if ((sctp_cmt_on_off == 0) && (net->ref_count < 2)) {
6660 /* nothing can be in queue for this guy */
6661 continue;
6662 }
6663 if (net->flight_size >= net->cwnd) {
6664 /* skip this network, no room */
6665 cwnd_full_ind++;
6666 continue;
6667 }
6668 /*
6669 * @@@ JRI : this for loop we are in takes in each
6670 * net, if its's got space in cwnd and has data sent
6671 * to it (when CMT is off) then it calls
6672 * sctp_fill_outqueue for the net. This gets data on
6673 * the send queue for that network.
6674 *
6675 * In sctp_fill_outqueue TSN's are assigned and data is
6676 * copied out of the stream buffers. Note mostly
6677 * copy by reference (we hope).
6678 */
6679#ifdef SCTP_CWND_LOGGING
6680 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FILL_OUTQ_CALLED);
6681#endif
6682 sctp_fill_outqueue(stcb, net, frag_point, eeor_mode);
6683 }
6684 if (start_at != TAILQ_FIRST(&asoc->nets)) {
6685 /* got to pick up the beginning stuff. */
6686 old_startat = start_at;
6687 start_at = net = TAILQ_FIRST(&asoc->nets);
6688 goto one_more_time;
6689 }
6690 }
6691skip_the_fill_from_streams:
6692 *cwnd_full = cwnd_full_ind;
6693 /* now service each destination and send out what we can for it */
6694 /* Nothing to send? */
6695 if ((TAILQ_FIRST(&asoc->control_send_queue) == NULL) &&
6696 (TAILQ_FIRST(&asoc->send_queue) == NULL)) {
6697 *reason_code = 8;
6698 return (0);
6699 }
6700 chk = TAILQ_FIRST(&asoc->send_queue);
6701 if (chk) {
6702 send_start_at = chk->whoTo;
6703 } else {
6704 send_start_at = TAILQ_FIRST(&asoc->nets);
6705 }
6706 old_startat = NULL;
6707again_one_more_time:
6708 for (net = send_start_at; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
6709 /* how much can we send? */
6710 /* printf("Examine for sending net:%x\n", (uint32_t)net); */
6711 if (old_startat && (old_startat == net)) {
6712 /* through list ocmpletely. */
6713 break;
6714 }
6715 tsns_sent = 0;
6716 if (net->ref_count < 2) {
6717 /*
6718 * Ref-count of 1 so we cannot have data or control
6719 * queued to this address. Skip it.
6720 */
6721 continue;
6722 }
6723 ctl_cnt = bundle_at = 0;
6724 endoutchain = outchain = NULL;
6725 no_fragmentflg = 1;
6726 one_chunk = 0;
5518
6727 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
6728 skip_data_for_this_net = 1;
6729 } else {
6730 skip_data_for_this_net = 0;
6731 }
5519 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
5520 /*
5521 * if we have a route and an ifp check to see if we
5522 * have room to send to this guy
5523 */
5524 struct ifnet *ifp;
5525
5526 ifp = net->ro.ro_rt->rt_ifp;
5527 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
5528 SCTP_STAT_INCR(sctps_ifnomemqueued);
5529#ifdef SCTP_LOG_MAXBURST
5530 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
5531#endif
5532 continue;
5533 }
5534 }
5535 if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
5536 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
5537 } else {
5538 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
5539 }
5540 mx_mtu = mtu;
5541 to_out = 0;
5542 if (mtu > asoc->peers_rwnd) {
5543 if (asoc->total_flight > 0) {
5544 /* We have a packet in flight somewhere */
5545 r_mtu = asoc->peers_rwnd;
5546 } else {
5547 /* We are always allowed to send one MTU out */
5548 one_chunk = 1;
5549 r_mtu = mtu;
5550 }
5551 } else {
5552 r_mtu = mtu;
5553 }
5554 /************************/
5555 /* Control transmission */
5556 /************************/
5557 /* Now first lets go through the control queue */
5558 for (chk = TAILQ_FIRST(&asoc->control_send_queue);
5559 chk; chk = nchk) {
5560 nchk = TAILQ_NEXT(chk, sctp_next);
5561 if (chk->whoTo != net) {
5562 /*
5563 * No, not sent to the network we are
5564 * looking at
5565 */
5566 continue;
5567 }
5568 if (chk->data == NULL) {
5569 continue;
5570 }
5571 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
5572 /*
5573 * It must be unsent. Cookies and ASCONF's
5574 * hang around but there timers will force
5575 * when marked for resend.
5576 */
5577 continue;
5578 }
5579 /*
5580 * if no AUTH is yet included and this chunk
5581 * requires it, make sure to account for it. We
5582 * don't apply the size until the AUTH chunk is
5583 * actually added below in case there is no room for
5584 * this chunk. NOTE: we overload the use of "omtu"
5585 * here
5586 */
5587 if ((auth == NULL) &&
5588 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
5589 stcb->asoc.peer_auth_chunks)) {
5590 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
5591 } else
5592 omtu = 0;
5593 /* Here we do NOT factor the r_mtu */
5594 if ((chk->send_size < (int)(mtu - omtu)) ||
5595 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
5596 /*
5597 * We probably should glom the mbuf chain
5598 * from the chk->data for control but the
5599 * problem is it becomes yet one more level
5600 * of tracking to do if for some reason
5601 * output fails. Then I have got to
5602 * reconstruct the merged control chain.. el
5603 * yucko.. for now we take the easy way and
5604 * do the copy
5605 */
5606 /*
5607 * Add an AUTH chunk, if chunk requires it
5608 * save the offset into the chain for AUTH
5609 */
5610 if ((auth == NULL) &&
5611 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
5612 stcb->asoc.peer_auth_chunks))) {
5613 outchain = sctp_add_auth_chunk(outchain,
5614 &endoutchain,
5615 &auth,
5616 &auth_offset,
5617 stcb,
5618 chk->rec.chunk_id.id);
5619 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5620 }
5621 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
5622 (int)chk->rec.chunk_id.can_take_data,
5623 chk->send_size, chk->copy_by_ref);
5624 if (outchain == NULL) {
5625 *reason_code = 8;
5626 return (ENOMEM);
5627 }
5628 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5629 /* update our MTU size */
5630 if (mtu > (chk->send_size + omtu))
5631 mtu -= (chk->send_size + omtu);
5632 else
5633 mtu = 0;
5634 to_out += (chk->send_size + omtu);
5635 /* Do clear IP_DF ? */
5636 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
5637 no_fragmentflg = 0;
5638 }
5639 if (chk->rec.chunk_id.can_take_data)
5640 chk->data = NULL;
5641 /* Mark things to be removed, if needed */
5642 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
5643 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
5644 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
5645 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
5646 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
5647 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
5648 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
5649 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
5650 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
5651 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
5652
5653 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST)
5654 hbflag = 1;
5655 /* remove these chunks at the end */
5656 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) {
5657 /* turn off the timer */
5658 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
5659 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
5660 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
5661 }
5662 }
5663 ctl_cnt++;
5664 } else {
5665 /*
5666 * Other chunks, since they have
5667 * timers running (i.e. COOKIE or
5668 * ASCONF) we just "trust" that it
5669 * gets sent or retransmitted.
5670 */
5671 ctl_cnt++;
5672 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
5673 cookie = 1;
5674 no_out_cnt = 1;
5675 } else if (chk->rec.chunk_id.id == SCTP_ASCONF) {
5676 /*
5677 * set hb flag since we can
5678 * use these for RTO
5679 */
5680 hbflag = 1;
5681 asconf = 1;
5682 }
5683 chk->sent = SCTP_DATAGRAM_SENT;
5684 chk->snd_count++;
5685 }
5686 if (mtu == 0) {
5687 /*
5688 * Ok we are out of room but we can
5689 * output without effecting the
5690 * flight size since this little guy
5691 * is a control only packet.
5692 */
5693 if (asconf) {
5694 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
5695 asconf = 0;
5696 }
5697 if (cookie) {
5698 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
5699 cookie = 0;
5700 }
5701 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT);
5702 if (outchain == NULL) {
5703 /* no memory */
5704 error = ENOBUFS;
5705 goto error_out_again;
5706 }
5707 shdr = mtod(outchain, struct sctphdr *);
5708 shdr->src_port = inp->sctp_lport;
5709 shdr->dest_port = stcb->rport;
5710 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
5711 shdr->checksum = 0;
5712 auth_offset += sizeof(struct sctphdr);
5713 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
5714 (struct sockaddr *)&net->ro._l_addr,
5715 outchain, auth_offset, auth,
5716 no_fragmentflg, 0, NULL, asconf))) {
5717 if (error == ENOBUFS) {
5718 asoc->ifp_had_enobuf = 1;
5719 }
5720 SCTP_STAT_INCR(sctps_lowlevelerr);
5721 if (from_where == 0) {
5722 SCTP_STAT_INCR(sctps_lowlevelerrusr);
5723 }
5724 error_out_again:
5725 /* error, could not output */
5726 if (hbflag) {
5727 if (*now_filled == 0) {
5728 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
5729 *now_filled = 1;
5730 *now = net->last_sent_time;
5731 } else {
5732 net->last_sent_time = *now;
5733 }
5734 hbflag = 0;
5735 }
5736 if (error == EHOSTUNREACH) {
5737 /*
5738 * Destination went
5739 * unreachable
5740 * during this send
5741 */
5742 sctp_move_to_an_alt(stcb, asoc, net);
5743 }
6732 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
6733 /*
6734 * if we have a route and an ifp check to see if we
6735 * have room to send to this guy
6736 */
6737 struct ifnet *ifp;
6738
6739 ifp = net->ro.ro_rt->rt_ifp;
6740 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
6741 SCTP_STAT_INCR(sctps_ifnomemqueued);
6742#ifdef SCTP_LOG_MAXBURST
6743 sctp_log_maxburst(stcb, net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
6744#endif
6745 continue;
6746 }
6747 }
6748 if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
6749 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
6750 } else {
6751 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
6752 }
6753 mx_mtu = mtu;
6754 to_out = 0;
6755 if (mtu > asoc->peers_rwnd) {
6756 if (asoc->total_flight > 0) {
6757 /* We have a packet in flight somewhere */
6758 r_mtu = asoc->peers_rwnd;
6759 } else {
6760 /* We are always allowed to send one MTU out */
6761 one_chunk = 1;
6762 r_mtu = mtu;
6763 }
6764 } else {
6765 r_mtu = mtu;
6766 }
6767 /************************/
6768 /* Control transmission */
6769 /************************/
6770 /* Now first lets go through the control queue */
6771 for (chk = TAILQ_FIRST(&asoc->control_send_queue);
6772 chk; chk = nchk) {
6773 nchk = TAILQ_NEXT(chk, sctp_next);
6774 if (chk->whoTo != net) {
6775 /*
6776 * No, not sent to the network we are
6777 * looking at
6778 */
6779 continue;
6780 }
6781 if (chk->data == NULL) {
6782 continue;
6783 }
6784 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
6785 /*
6786 * It must be unsent. Cookies and ASCONF's
6787 * hang around but there timers will force
6788 * when marked for resend.
6789 */
6790 continue;
6791 }
6792 /*
6793 * if no AUTH is yet included and this chunk
6794 * requires it, make sure to account for it. We
6795 * don't apply the size until the AUTH chunk is
6796 * actually added below in case there is no room for
6797 * this chunk. NOTE: we overload the use of "omtu"
6798 * here
6799 */
6800 if ((auth == NULL) &&
6801 sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
6802 stcb->asoc.peer_auth_chunks)) {
6803 omtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6804 } else
6805 omtu = 0;
6806 /* Here we do NOT factor the r_mtu */
6807 if ((chk->send_size < (int)(mtu - omtu)) ||
6808 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
6809 /*
6810 * We probably should glom the mbuf chain
6811 * from the chk->data for control but the
6812 * problem is it becomes yet one more level
6813 * of tracking to do if for some reason
6814 * output fails. Then I have got to
6815 * reconstruct the merged control chain.. el
6816 * yucko.. for now we take the easy way and
6817 * do the copy
6818 */
6819 /*
6820 * Add an AUTH chunk, if chunk requires it
6821 * save the offset into the chain for AUTH
6822 */
6823 if ((auth == NULL) &&
6824 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
6825 stcb->asoc.peer_auth_chunks))) {
6826 outchain = sctp_add_auth_chunk(outchain,
6827 &endoutchain,
6828 &auth,
6829 &auth_offset,
6830 stcb,
6831 chk->rec.chunk_id.id);
6832 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6833 }
6834 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain,
6835 (int)chk->rec.chunk_id.can_take_data,
6836 chk->send_size, chk->copy_by_ref);
6837 if (outchain == NULL) {
6838 *reason_code = 8;
6839 return (ENOMEM);
6840 }
6841 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
6842 /* update our MTU size */
6843 if (mtu > (chk->send_size + omtu))
6844 mtu -= (chk->send_size + omtu);
6845 else
6846 mtu = 0;
6847 to_out += (chk->send_size + omtu);
6848 /* Do clear IP_DF ? */
6849 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
6850 no_fragmentflg = 0;
6851 }
6852 if (chk->rec.chunk_id.can_take_data)
6853 chk->data = NULL;
6854 /* Mark things to be removed, if needed */
6855 if ((chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) ||
6856 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST) ||
6857 (chk->rec.chunk_id.id == SCTP_HEARTBEAT_ACK) ||
6858 (chk->rec.chunk_id.id == SCTP_SHUTDOWN) ||
6859 (chk->rec.chunk_id.id == SCTP_SHUTDOWN_ACK) ||
6860 (chk->rec.chunk_id.id == SCTP_OPERATION_ERROR) ||
6861 (chk->rec.chunk_id.id == SCTP_COOKIE_ACK) ||
6862 (chk->rec.chunk_id.id == SCTP_ECN_CWR) ||
6863 (chk->rec.chunk_id.id == SCTP_PACKET_DROPPED) ||
6864 (chk->rec.chunk_id.id == SCTP_ASCONF_ACK)) {
6865
6866 if (chk->rec.chunk_id.id == SCTP_HEARTBEAT_REQUEST)
6867 hbflag = 1;
6868 /* remove these chunks at the end */
6869 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) {
6870 /* turn off the timer */
6871 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
6872 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
6873 inp, stcb, net, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_1);
6874 }
6875 }
6876 ctl_cnt++;
6877 } else {
6878 /*
6879 * Other chunks, since they have
6880 * timers running (i.e. COOKIE or
6881 * ASCONF) we just "trust" that it
6882 * gets sent or retransmitted.
6883 */
6884 ctl_cnt++;
6885 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6886 cookie = 1;
6887 no_out_cnt = 1;
6888 } else if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6889 /*
6890 * set hb flag since we can
6891 * use these for RTO
6892 */
6893 hbflag = 1;
6894 asconf = 1;
6895 }
6896 chk->sent = SCTP_DATAGRAM_SENT;
6897 chk->snd_count++;
6898 }
6899 if (mtu == 0) {
6900 /*
6901 * Ok we are out of room but we can
6902 * output without effecting the
6903 * flight size since this little guy
6904 * is a control only packet.
6905 */
6906 if (asconf) {
6907 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
6908 asconf = 0;
6909 }
6910 if (cookie) {
6911 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
6912 cookie = 0;
6913 }
6914 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT);
6915 if (outchain == NULL) {
6916 /* no memory */
6917 error = ENOBUFS;
6918 goto error_out_again;
6919 }
6920 shdr = mtod(outchain, struct sctphdr *);
6921 shdr->src_port = inp->sctp_lport;
6922 shdr->dest_port = stcb->rport;
6923 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
6924 shdr->checksum = 0;
6925 auth_offset += sizeof(struct sctphdr);
6926 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
6927 (struct sockaddr *)&net->ro._l_addr,
6928 outchain, auth_offset, auth,
6929 no_fragmentflg, 0, NULL, asconf))) {
6930 if (error == ENOBUFS) {
6931 asoc->ifp_had_enobuf = 1;
6932 }
6933 SCTP_STAT_INCR(sctps_lowlevelerr);
6934 if (from_where == 0) {
6935 SCTP_STAT_INCR(sctps_lowlevelerrusr);
6936 }
6937 error_out_again:
6938 /* error, could not output */
6939 if (hbflag) {
6940 if (*now_filled == 0) {
6941 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
6942 *now_filled = 1;
6943 *now = net->last_sent_time;
6944 } else {
6945 net->last_sent_time = *now;
6946 }
6947 hbflag = 0;
6948 }
6949 if (error == EHOSTUNREACH) {
6950 /*
6951 * Destination went
6952 * unreachable
6953 * during this send
6954 */
6955 sctp_move_to_an_alt(stcb, asoc, net);
6956 }
5744 sctp_clean_up_ctl(stcb, asoc);
5745 *reason_code = 7;
6957 *reason_code = 7;
5746 return (error);
6958 continue;
5747 } else
5748 asoc->ifp_had_enobuf = 0;
5749 /* Only HB or ASCONF advances time */
5750 if (hbflag) {
5751 if (*now_filled == 0) {
5752 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
5753 *now_filled = 1;
5754 *now = net->last_sent_time;
5755 } else {
5756 net->last_sent_time = *now;
5757 }
5758 hbflag = 0;
5759 }
5760 /*
5761 * increase the number we sent, if a
5762 * cookie is sent we don't tell them
5763 * any was sent out.
5764 */
5765 outchain = endoutchain = NULL;
5766 auth = NULL;
5767 auth_offset = 0;
5768 if (!no_out_cnt)
5769 *num_out += ctl_cnt;
5770 /* recalc a clean slate and setup */
5771 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5772 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
5773 } else {
5774 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
5775 }
5776 to_out = 0;
5777 no_fragmentflg = 1;
5778 }
5779 }
5780 }
5781 /*********************/
5782 /* Data transmission */
5783 /*********************/
5784 /*
5785 * if AUTH for DATA is required and no AUTH has been added
5786 * yet, account for this in the mtu now... if no data can be
5787 * bundled, this adjustment won't matter anyways since the
5788 * packet will be going out...
5789 */
5790 if ((auth == NULL) &&
5791 sctp_auth_is_required_chunk(SCTP_DATA,
5792 stcb->asoc.peer_auth_chunks)) {
5793 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
5794 }
5795 /* now lets add any data within the MTU constraints */
5796 if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
5797 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
5798 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
5799 else
5800 omtu = 0;
5801 } else {
5802 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
5803 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
5804 else
5805 omtu = 0;
5806 }
6959 } else
6960 asoc->ifp_had_enobuf = 0;
6961 /* Only HB or ASCONF advances time */
6962 if (hbflag) {
6963 if (*now_filled == 0) {
6964 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
6965 *now_filled = 1;
6966 *now = net->last_sent_time;
6967 } else {
6968 net->last_sent_time = *now;
6969 }
6970 hbflag = 0;
6971 }
6972 /*
6973 * increase the number we sent, if a
6974 * cookie is sent we don't tell them
6975 * any was sent out.
6976 */
6977 outchain = endoutchain = NULL;
6978 auth = NULL;
6979 auth_offset = 0;
6980 if (!no_out_cnt)
6981 *num_out += ctl_cnt;
6982 /* recalc a clean slate and setup */
6983 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6984 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
6985 } else {
6986 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
6987 }
6988 to_out = 0;
6989 no_fragmentflg = 1;
6990 }
6991 }
6992 }
6993 /*********************/
6994 /* Data transmission */
6995 /*********************/
6996 /*
6997 * if AUTH for DATA is required and no AUTH has been added
6998 * yet, account for this in the mtu now... if no data can be
6999 * bundled, this adjustment won't matter anyways since the
7000 * packet will be going out...
7001 */
7002 if ((auth == NULL) &&
7003 sctp_auth_is_required_chunk(SCTP_DATA,
7004 stcb->asoc.peer_auth_chunks)) {
7005 mtu -= sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
7006 }
7007 /* now lets add any data within the MTU constraints */
7008 if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
7009 if (net->mtu > (sizeof(struct ip) + sizeof(struct sctphdr)))
7010 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
7011 else
7012 omtu = 0;
7013 } else {
7014 if (net->mtu > (sizeof(struct ip6_hdr) + sizeof(struct sctphdr)))
7015 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
7016 else
7017 omtu = 0;
7018 }
5807 if (((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) ||
7019 if ((((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) && (skip_data_for_this_net == 0)) ||
5808 (cookie)) {
5809 for (chk = TAILQ_FIRST(&asoc->send_queue); chk; chk = nchk) {
5810 if (no_data_chunks) {
5811 /* let only control go out */
5812 *reason_code = 1;
5813 break;
5814 }
5815 if (net->flight_size >= net->cwnd) {
5816 /* skip this net, no room for data */
5817 *reason_code = 2;
5818 break;
5819 }
5820 nchk = TAILQ_NEXT(chk, sctp_next);
5821 if (chk->whoTo != net) {
5822 /* No, not sent to this net */
5823 continue;
5824 }
5825 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
5826 /*
5827 * strange, we have a chunk that is
5828 * to bit for its destination and
5829 * yet no fragment ok flag.
5830 * Something went wrong when the
5831 * PMTU changed...we did not mark
5832 * this chunk for some reason?? I
5833 * will fix it here by letting IP
5834 * fragment it for now and printing
5835 * a warning. This really should not
5836 * happen ...
5837 */
5838#ifdef SCTP_DEBUG
5839 printf("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
5840 chk->send_size, mtu);
5841#endif
5842 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
5843 }
5844 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
5845 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
5846 /* ok we will add this one */
5847
5848 /*
5849 * Add an AUTH chunk, if chunk
5850 * requires it, save the offset into
5851 * the chain for AUTH
5852 */
5853 if ((auth == NULL) &&
5854 (sctp_auth_is_required_chunk(SCTP_DATA,
5855 stcb->asoc.peer_auth_chunks))) {
5856
5857 outchain = sctp_add_auth_chunk(outchain,
5858 &endoutchain,
5859 &auth,
5860 &auth_offset,
5861 stcb,
5862 SCTP_DATA);
5863 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
5864 }
5865 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
5866 chk->send_size, chk->copy_by_ref);
5867 if (outchain == NULL) {
5868#ifdef SCTP_DEBUG
5869 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5870 printf("No memory?\n");
5871 }
5872#endif
5873 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5874 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
5875 }
5876 *reason_code = 3;
5877 return (ENOMEM);
5878 }
5879 /* upate our MTU size */
5880 /* Do clear IP_DF ? */
5881 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
5882 no_fragmentflg = 0;
5883 }
5884 /* unsigned subtraction of mtu */
5885 if (mtu > chk->send_size)
5886 mtu -= chk->send_size;
5887 else
5888 mtu = 0;
5889 /* unsigned subtraction of r_mtu */
5890 if (r_mtu > chk->send_size)
5891 r_mtu -= chk->send_size;
5892 else
5893 r_mtu = 0;
5894
5895 to_out += chk->send_size;
5896 if (to_out > mx_mtu) {
5897#ifdef INVARIANTS
5898 panic("gag");
5899#else
5900 printf("Exceeding mtu of %d out size is %d\n",
5901 mx_mtu, to_out);
5902#endif
5903 }
5904 data_list[bundle_at++] = chk;
5905 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
5906 mtu = 0;
5907 break;
5908 }
5909 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
5910 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
5911 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
5912 } else {
5913 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
5914 }
5915 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
5916 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
5917 /*
5918 * Count number of
5919 * user msg's that
5920 * were fragmented
5921 * we do this by
5922 * counting when we
5923 * see a LAST
5924 * fragment only.
5925 */
5926 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
5927 }
5928 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
5929 break;
5930 }
5931 } else {
5932 /*
5933 * Must be sent in order of the
5934 * TSN's (on a network)
5935 */
5936 break;
5937 }
5938 } /* for () */
5939 } /* if asoc.state OPEN */
5940 /* Is there something to send for this destination? */
5941 if (outchain) {
5942 /* We may need to start a control timer or two */
5943 if (asconf) {
5944 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
5945 asconf = 0;
5946 }
5947 if (cookie) {
5948 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
5949 cookie = 0;
5950 }
5951 /* must start a send timer if data is being sent */
5952 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
5953 /*
5954 * no timer running on this destination
5955 * restart it.
5956 */
5957 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
5958 }
5959 /* Now send it, if there is anything to send :> */
5960 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT);
5961 if (outchain == NULL) {
5962 /* out of mbufs */
5963 error = ENOBUFS;
5964 goto errored_send;
5965 }
5966 shdr = mtod(outchain, struct sctphdr *);
5967 shdr->src_port = inp->sctp_lport;
5968 shdr->dest_port = stcb->rport;
5969 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
5970 shdr->checksum = 0;
5971 auth_offset += sizeof(struct sctphdr);
5972 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
5973 (struct sockaddr *)&net->ro._l_addr,
5974 outchain,
5975 auth_offset,
5976 auth,
5977 no_fragmentflg,
5978 bundle_at,
5979 data_list[0],
5980 asconf))) {
5981 /* error, we could not output */
5982 if (error == ENOBUFS) {
5983 asoc->ifp_had_enobuf = 1;
5984 }
5985 SCTP_STAT_INCR(sctps_lowlevelerr);
5986 if (from_where == 0) {
5987 SCTP_STAT_INCR(sctps_lowlevelerrusr);
5988 }
5989 errored_send:
5990#ifdef SCTP_DEBUG
5991 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5992 printf("Gak send error %d\n", error);
5993 }
5994#endif
5995 if (hbflag) {
5996 if (*now_filled == 0) {
5997 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
5998 *now_filled = 1;
5999 *now = net->last_sent_time;
6000 } else {
6001 net->last_sent_time = *now;
6002 }
6003 hbflag = 0;
6004 }
6005 if (error == EHOSTUNREACH) {
6006 /*
6007 * Destination went unreachable
6008 * during this send
6009 */
6010 sctp_move_to_an_alt(stcb, asoc, net);
6011 }
7020 (cookie)) {
7021 for (chk = TAILQ_FIRST(&asoc->send_queue); chk; chk = nchk) {
7022 if (no_data_chunks) {
7023 /* let only control go out */
7024 *reason_code = 1;
7025 break;
7026 }
7027 if (net->flight_size >= net->cwnd) {
7028 /* skip this net, no room for data */
7029 *reason_code = 2;
7030 break;
7031 }
7032 nchk = TAILQ_NEXT(chk, sctp_next);
7033 if (chk->whoTo != net) {
7034 /* No, not sent to this net */
7035 continue;
7036 }
7037 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
7038 /*
7039 * strange, we have a chunk that is
7040 * to bit for its destination and
7041 * yet no fragment ok flag.
7042 * Something went wrong when the
7043 * PMTU changed...we did not mark
7044 * this chunk for some reason?? I
7045 * will fix it here by letting IP
7046 * fragment it for now and printing
7047 * a warning. This really should not
7048 * happen ...
7049 */
7050#ifdef SCTP_DEBUG
7051 printf("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
7052 chk->send_size, mtu);
7053#endif
7054 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
7055 }
7056 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
7057 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
7058 /* ok we will add this one */
7059
7060 /*
7061 * Add an AUTH chunk, if chunk
7062 * requires it, save the offset into
7063 * the chain for AUTH
7064 */
7065 if ((auth == NULL) &&
7066 (sctp_auth_is_required_chunk(SCTP_DATA,
7067 stcb->asoc.peer_auth_chunks))) {
7068
7069 outchain = sctp_add_auth_chunk(outchain,
7070 &endoutchain,
7071 &auth,
7072 &auth_offset,
7073 stcb,
7074 SCTP_DATA);
7075 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7076 }
7077 outchain = sctp_copy_mbufchain(chk->data, outchain, &endoutchain, 0,
7078 chk->send_size, chk->copy_by_ref);
7079 if (outchain == NULL) {
7080#ifdef SCTP_DEBUG
7081 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
7082 printf("No memory?\n");
7083 }
7084#endif
7085 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
7086 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
7087 }
7088 *reason_code = 3;
7089 return (ENOMEM);
7090 }
7091 /* upate our MTU size */
7092 /* Do clear IP_DF ? */
7093 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
7094 no_fragmentflg = 0;
7095 }
7096 /* unsigned subtraction of mtu */
7097 if (mtu > chk->send_size)
7098 mtu -= chk->send_size;
7099 else
7100 mtu = 0;
7101 /* unsigned subtraction of r_mtu */
7102 if (r_mtu > chk->send_size)
7103 r_mtu -= chk->send_size;
7104 else
7105 r_mtu = 0;
7106
7107 to_out += chk->send_size;
7108 if (to_out > mx_mtu) {
7109#ifdef INVARIANTS
7110 panic("gag");
7111#else
7112 printf("Exceeding mtu of %d out size is %d\n",
7113 mx_mtu, to_out);
7114#endif
7115 }
7116 data_list[bundle_at++] = chk;
7117 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
7118 mtu = 0;
7119 break;
7120 }
7121 if (chk->sent == SCTP_DATAGRAM_UNSENT) {
7122 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
7123 SCTP_STAT_INCR_COUNTER64(sctps_outorderchunks);
7124 } else {
7125 SCTP_STAT_INCR_COUNTER64(sctps_outunorderchunks);
7126 }
7127 if (((chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) == SCTP_DATA_LAST_FRAG) &&
7128 ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0))
7129 /*
7130 * Count number of
7131 * user msg's that
7132 * were fragmented
7133 * we do this by
7134 * counting when we
7135 * see a LAST
7136 * fragment only.
7137 */
7138 SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
7139 }
7140 if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
7141 break;
7142 }
7143 } else {
7144 /*
7145 * Must be sent in order of the
7146 * TSN's (on a network)
7147 */
7148 break;
7149 }
7150 } /* for () */
7151 } /* if asoc.state OPEN */
7152 /* Is there something to send for this destination? */
7153 if (outchain) {
7154 /* We may need to start a control timer or two */
7155 if (asconf) {
7156 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
7157 asconf = 0;
7158 }
7159 if (cookie) {
7160 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
7161 cookie = 0;
7162 }
7163 /* must start a send timer if data is being sent */
7164 if (bundle_at && (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
7165 /*
7166 * no timer running on this destination
7167 * restart it.
7168 */
7169 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
7170 }
7171 /* Now send it, if there is anything to send :> */
7172 SCTP_BUF_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT);
7173 if (outchain == NULL) {
7174 /* out of mbufs */
7175 error = ENOBUFS;
7176 goto errored_send;
7177 }
7178 shdr = mtod(outchain, struct sctphdr *);
7179 shdr->src_port = inp->sctp_lport;
7180 shdr->dest_port = stcb->rport;
7181 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
7182 shdr->checksum = 0;
7183 auth_offset += sizeof(struct sctphdr);
7184 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
7185 (struct sockaddr *)&net->ro._l_addr,
7186 outchain,
7187 auth_offset,
7188 auth,
7189 no_fragmentflg,
7190 bundle_at,
7191 data_list[0],
7192 asconf))) {
7193 /* error, we could not output */
7194 if (error == ENOBUFS) {
7195 asoc->ifp_had_enobuf = 1;
7196 }
7197 SCTP_STAT_INCR(sctps_lowlevelerr);
7198 if (from_where == 0) {
7199 SCTP_STAT_INCR(sctps_lowlevelerrusr);
7200 }
7201 errored_send:
7202#ifdef SCTP_DEBUG
7203 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
7204 printf("Gak send error %d\n", error);
7205 }
7206#endif
7207 if (hbflag) {
7208 if (*now_filled == 0) {
7209 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
7210 *now_filled = 1;
7211 *now = net->last_sent_time;
7212 } else {
7213 net->last_sent_time = *now;
7214 }
7215 hbflag = 0;
7216 }
7217 if (error == EHOSTUNREACH) {
7218 /*
7219 * Destination went unreachable
7220 * during this send
7221 */
7222 sctp_move_to_an_alt(stcb, asoc, net);
7223 }
6012 sctp_clean_up_ctl(stcb, asoc);
6013 *reason_code = 6;
7224 *reason_code = 6;
6014 return (error);
7225 continue;
6015 } else {
6016 asoc->ifp_had_enobuf = 0;
6017 }
6018 outchain = endoutchain = NULL;
6019 auth = NULL;
6020 auth_offset = 0;
6021 if (bundle_at || hbflag) {
6022 /* For data/asconf and hb set time */
6023 if (*now_filled == 0) {
6024 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
6025 *now_filled = 1;
6026 *now = net->last_sent_time;
6027 } else {
6028 net->last_sent_time = *now;
6029 }
6030 }
6031 if (!no_out_cnt) {
6032 *num_out += (ctl_cnt + bundle_at);
6033 }
6034 if (bundle_at) {
6035 /* if (!net->rto_pending) { */
6036 /* setup for a RTO measurement */
6037 /* net->rto_pending = 1; */
6038 tsns_sent = data_list[0]->rec.data.TSN_seq;
6039
6040 data_list[0]->do_rtt = 1;
6041 /* } else { */
6042 /* data_list[0]->do_rtt = 0; */
6043 /* } */
6044 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
6045 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
6046 if (sctp_early_fr) {
6047 if (net->flight_size < net->cwnd) {
6048 /* start or restart it */
6049 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
6050 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
6051 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
6052 }
6053 SCTP_STAT_INCR(sctps_earlyfrstrout);
6054 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net);
6055 } else {
6056 /* stop it if its running */
6057 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
6058 SCTP_STAT_INCR(sctps_earlyfrstpout);
6059 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
6060 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
6061 }
6062 }
6063 }
6064 }
6065 if (one_chunk) {
6066 break;
6067 }
6068 }
6069#ifdef SCTP_CWND_LOGGING
6070 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
6071#endif
6072 }
6073 if (old_startat == NULL) {
6074 old_startat = send_start_at;
6075 send_start_at = TAILQ_FIRST(&asoc->nets);
6076 goto again_one_more_time;
6077 }
6078 /*
6079 * At the end there should be no NON timed chunks hanging on this
6080 * queue.
6081 */
6082#ifdef SCTP_CWND_LOGGING
6083 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
6084#endif
6085 if ((*num_out == 0) && (*reason_code == 0)) {
6086 *reason_code = 4;
6087 } else {
6088 *reason_code = 5;
6089 }
6090 sctp_clean_up_ctl(stcb, asoc);
6091 return (0);
6092}
6093
6094void
6095sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
6096{
6097 /*
6098 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
6099 * the control chunk queue.
6100 */
6101 struct sctp_chunkhdr *hdr;
6102 struct sctp_tmit_chunk *chk;
6103 struct mbuf *mat;
6104
6105 SCTP_TCB_LOCK_ASSERT(stcb);
6106 sctp_alloc_a_chunk(stcb, chk);
6107 if (chk == NULL) {
6108 /* no memory */
6109 sctp_m_freem(op_err);
6110 return;
6111 }
6112 chk->copy_by_ref = 0;
6113 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT);
6114 if (op_err == NULL) {
6115 sctp_free_a_chunk(stcb, chk);
6116 return;
6117 }
6118 chk->send_size = 0;
6119 mat = op_err;
6120 while (mat != NULL) {
6121 chk->send_size += SCTP_BUF_LEN(mat);
6122 mat = SCTP_BUF_NEXT(mat);
6123 }
6124 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
6125 chk->rec.chunk_id.can_take_data = 1;
6126 chk->sent = SCTP_DATAGRAM_UNSENT;
6127 chk->snd_count = 0;
6128 chk->flags = 0;
6129 chk->asoc = &stcb->asoc;
6130 chk->data = op_err;
6131 chk->whoTo = chk->asoc->primary_destination;
6132 atomic_add_int(&chk->whoTo->ref_count, 1);
6133 hdr = mtod(op_err, struct sctp_chunkhdr *);
6134 hdr->chunk_type = SCTP_OPERATION_ERROR;
6135 hdr->chunk_flags = 0;
6136 hdr->chunk_length = htons(chk->send_size);
6137 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
6138 chk,
6139 sctp_next);
6140 chk->asoc->ctrl_queue_cnt++;
6141}
6142
6143int
6144sctp_send_cookie_echo(struct mbuf *m,
6145 int offset,
6146 struct sctp_tcb *stcb,
6147 struct sctp_nets *net)
6148{
6149 /*
6150 * pull out the cookie and put it at the front of the control chunk
6151 * queue.
6152 */
6153 int at;
6154 struct mbuf *cookie;
6155 struct sctp_paramhdr parm, *phdr;
6156 struct sctp_chunkhdr *hdr;
6157 struct sctp_tmit_chunk *chk;
6158 uint16_t ptype, plen;
6159
6160 /* First find the cookie in the param area */
6161 cookie = NULL;
6162 at = offset + sizeof(struct sctp_init_chunk);
6163
6164 SCTP_TCB_LOCK_ASSERT(stcb);
6165 do {
6166 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
6167 if (phdr == NULL) {
6168 return (-3);
6169 }
6170 ptype = ntohs(phdr->param_type);
6171 plen = ntohs(phdr->param_length);
6172 if (ptype == SCTP_STATE_COOKIE) {
6173 int pad;
6174
6175 /* found the cookie */
6176 if ((pad = (plen % 4))) {
6177 plen += 4 - pad;
6178 }
6179 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT);
6180 if (cookie == NULL) {
6181 /* No memory */
6182 return (-2);
6183 }
6184 break;
6185 }
6186 at += SCTP_SIZE32(plen);
6187 } while (phdr);
6188 if (cookie == NULL) {
6189 /* Did not find the cookie */
6190 return (-3);
6191 }
6192 /* ok, we got the cookie lets change it into a cookie echo chunk */
6193
6194 /* first the change from param to cookie */
6195 hdr = mtod(cookie, struct sctp_chunkhdr *);
6196 hdr->chunk_type = SCTP_COOKIE_ECHO;
6197 hdr->chunk_flags = 0;
6198 /* get the chunk stuff now and place it in the FRONT of the queue */
6199 sctp_alloc_a_chunk(stcb, chk);
6200 if (chk == NULL) {
6201 /* no memory */
6202 sctp_m_freem(cookie);
6203 return (-5);
6204 }
6205 chk->copy_by_ref = 0;
6206 chk->send_size = plen;
6207 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
6208 chk->rec.chunk_id.can_take_data = 0;
6209 chk->sent = SCTP_DATAGRAM_UNSENT;
6210 chk->snd_count = 0;
6211 chk->flags = 0;
6212 chk->asoc = &stcb->asoc;
6213 chk->data = cookie;
6214 chk->whoTo = chk->asoc->primary_destination;
6215 atomic_add_int(&chk->whoTo->ref_count, 1);
6216 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
6217 chk->asoc->ctrl_queue_cnt++;
6218 return (0);
6219}
6220
6221void
6222sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
6223 struct mbuf *m,
6224 int offset,
6225 int chk_length,
6226 struct sctp_nets *net)
6227{
6228 /*
6229 * take a HB request and make it into a HB ack and send it.
6230 */
6231 struct mbuf *outchain;
6232 struct sctp_chunkhdr *chdr;
6233 struct sctp_tmit_chunk *chk;
6234
6235
6236 if (net == NULL)
6237 /* must have a net pointer */
6238 return;
6239
6240 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT);
6241 if (outchain == NULL) {
6242 /* gak out of memory */
6243 return;
6244 }
6245 chdr = mtod(outchain, struct sctp_chunkhdr *);
6246 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
6247 chdr->chunk_flags = 0;
6248 if (chk_length % 4) {
6249 /* need pad */
6250 uint32_t cpthis = 0;
6251 int padlen;
6252
6253 padlen = 4 - (chk_length % 4);
6254 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
6255 }
6256 sctp_alloc_a_chunk(stcb, chk);
6257 if (chk == NULL) {
6258 /* no memory */
6259 sctp_m_freem(outchain);
6260 return;
6261 }
6262 chk->copy_by_ref = 0;
6263 chk->send_size = chk_length;
6264 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
6265 chk->rec.chunk_id.can_take_data = 1;
6266 chk->sent = SCTP_DATAGRAM_UNSENT;
6267 chk->snd_count = 0;
6268 chk->flags = 0;
6269 chk->asoc = &stcb->asoc;
6270 chk->data = outchain;
6271 chk->whoTo = net;
6272 atomic_add_int(&chk->whoTo->ref_count, 1);
6273 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6274 chk->asoc->ctrl_queue_cnt++;
6275}
6276
6277int
6278sctp_send_cookie_ack(struct sctp_tcb *stcb)
6279{
6280 /* formulate and queue a cookie-ack back to sender */
6281 struct mbuf *cookie_ack;
6282 struct sctp_chunkhdr *hdr;
6283 struct sctp_tmit_chunk *chk;
6284
6285 cookie_ack = NULL;
6286 SCTP_TCB_LOCK_ASSERT(stcb);
6287
6288 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
6289 if (cookie_ack == NULL) {
6290 /* no mbuf's */
6291 return (-1);
6292 }
6293 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
6294 sctp_alloc_a_chunk(stcb, chk);
6295 if (chk == NULL) {
6296 /* no memory */
6297 sctp_m_freem(cookie_ack);
6298 return (-1);
6299 }
6300 chk->copy_by_ref = 0;
6301 chk->send_size = sizeof(struct sctp_chunkhdr);
6302 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
6303 chk->rec.chunk_id.can_take_data = 1;
6304 chk->sent = SCTP_DATAGRAM_UNSENT;
6305 chk->snd_count = 0;
6306 chk->flags = 0;
6307 chk->asoc = &stcb->asoc;
6308 chk->data = cookie_ack;
6309 if (chk->asoc->last_control_chunk_from != NULL) {
6310 chk->whoTo = chk->asoc->last_control_chunk_from;
6311 } else {
6312 chk->whoTo = chk->asoc->primary_destination;
6313 }
6314 atomic_add_int(&chk->whoTo->ref_count, 1);
6315 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
6316 hdr->chunk_type = SCTP_COOKIE_ACK;
6317 hdr->chunk_flags = 0;
6318 hdr->chunk_length = htons(chk->send_size);
6319 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
6320 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6321 chk->asoc->ctrl_queue_cnt++;
6322 return (0);
6323}
6324
6325
6326int
6327sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
6328{
6329 /* formulate and queue a SHUTDOWN-ACK back to the sender */
6330 struct mbuf *m_shutdown_ack;
6331 struct sctp_shutdown_ack_chunk *ack_cp;
6332 struct sctp_tmit_chunk *chk;
6333
6334 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
6335 if (m_shutdown_ack == NULL) {
6336 /* no mbuf's */
6337 return (-1);
6338 }
6339 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
6340 sctp_alloc_a_chunk(stcb, chk);
6341 if (chk == NULL) {
6342 /* no memory */
6343 sctp_m_freem(m_shutdown_ack);
6344 return (-1);
6345 }
6346 chk->copy_by_ref = 0;
6347
6348 chk->send_size = sizeof(struct sctp_chunkhdr);
6349 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
6350 chk->rec.chunk_id.can_take_data = 1;
6351 chk->sent = SCTP_DATAGRAM_UNSENT;
6352 chk->snd_count = 0;
6353 chk->flags = 0;
6354 chk->asoc = &stcb->asoc;
6355 chk->data = m_shutdown_ack;
6356 chk->whoTo = net;
6357 atomic_add_int(&net->ref_count, 1);
6358
6359 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
6360 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
6361 ack_cp->ch.chunk_flags = 0;
6362 ack_cp->ch.chunk_length = htons(chk->send_size);
6363 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
6364 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6365 chk->asoc->ctrl_queue_cnt++;
6366 return (0);
6367}
6368
6369int
6370sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
6371{
6372 /* formulate and queue a SHUTDOWN to the sender */
6373 struct mbuf *m_shutdown;
6374 struct sctp_shutdown_chunk *shutdown_cp;
6375 struct sctp_tmit_chunk *chk;
6376
6377 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
6378 if (m_shutdown == NULL) {
6379 /* no mbuf's */
6380 return (-1);
6381 }
6382 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
6383 sctp_alloc_a_chunk(stcb, chk);
6384 if (chk == NULL) {
6385 /* no memory */
6386 sctp_m_freem(m_shutdown);
6387 return (-1);
6388 }
6389 chk->copy_by_ref = 0;
6390 chk->send_size = sizeof(struct sctp_shutdown_chunk);
6391 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
6392 chk->rec.chunk_id.can_take_data = 1;
6393 chk->sent = SCTP_DATAGRAM_UNSENT;
6394 chk->snd_count = 0;
6395 chk->flags = 0;
6396 chk->asoc = &stcb->asoc;
6397 chk->data = m_shutdown;
6398 chk->whoTo = net;
6399 atomic_add_int(&net->ref_count, 1);
6400
6401 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
6402 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
6403 shutdown_cp->ch.chunk_flags = 0;
6404 shutdown_cp->ch.chunk_length = htons(chk->send_size);
6405 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
6406 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
6407 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6408 chk->asoc->ctrl_queue_cnt++;
6409 return (0);
6410}
6411
6412int
6413sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net)
6414{
6415 /*
6416 * formulate and queue an ASCONF to the peer ASCONF parameters
6417 * should be queued on the assoc queue
6418 */
6419 struct sctp_tmit_chunk *chk;
6420 struct mbuf *m_asconf;
6421 struct sctp_asconf_chunk *acp;
6422 int len;
6423
6424
6425 SCTP_TCB_LOCK_ASSERT(stcb);
6426 /* compose an ASCONF chunk, maximum length is PMTU */
6427 m_asconf = sctp_compose_asconf(stcb, &len);
6428 if (m_asconf == NULL) {
6429 return (-1);
6430 }
6431 acp = mtod(m_asconf, struct sctp_asconf_chunk *);
6432 sctp_alloc_a_chunk(stcb, chk);
6433 if (chk == NULL) {
6434 /* no memory */
6435 sctp_m_freem(m_asconf);
6436 return (-1);
6437 }
6438 chk->copy_by_ref = 0;
6439 chk->data = m_asconf;
6440 chk->send_size = len;
6441 chk->rec.chunk_id.id = SCTP_ASCONF;
6442 chk->rec.chunk_id.can_take_data = 0;
6443 chk->sent = SCTP_DATAGRAM_UNSENT;
6444 chk->snd_count = 0;
6445 chk->flags = 0;
6446 chk->asoc = &stcb->asoc;
6447 chk->whoTo = chk->asoc->primary_destination;
6448 atomic_add_int(&chk->whoTo->ref_count, 1);
6449 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6450 chk->asoc->ctrl_queue_cnt++;
6451 return (0);
6452}
6453
6454int
6455sctp_send_asconf_ack(struct sctp_tcb *stcb, uint32_t retrans)
6456{
6457 /*
6458 * formulate and queue a asconf-ack back to sender the asconf-ack
6459 * must be stored in the tcb
6460 */
6461 struct sctp_tmit_chunk *chk;
6462 struct mbuf *m_ack, *m;
6463
6464 SCTP_TCB_LOCK_ASSERT(stcb);
6465 /* is there a asconf-ack mbuf chain to send? */
6466 if (stcb->asoc.last_asconf_ack_sent == NULL) {
6467 return (-1);
6468 }
6469 /* copy the asconf_ack */
6470 m_ack = SCTP_M_COPYM(stcb->asoc.last_asconf_ack_sent, 0, M_COPYALL, M_DONTWAIT);
6471 if (m_ack == NULL) {
6472 /* couldn't copy it */
6473
6474 return (-1);
6475 }
6476 sctp_alloc_a_chunk(stcb, chk);
6477 if (chk == NULL) {
6478 /* no memory */
6479 if (m_ack)
6480 sctp_m_freem(m_ack);
6481 return (-1);
6482 }
6483 chk->copy_by_ref = 0;
6484 /* figure out where it goes to */
6485 if (retrans) {
6486 /* we're doing a retransmission */
6487 if (stcb->asoc.used_alt_asconfack > 2) {
6488 /* tried alternate nets already, go back */
6489 chk->whoTo = NULL;
6490 } else {
6491 /* need to try and alternate net */
6492 chk->whoTo = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
6493 stcb->asoc.used_alt_asconfack++;
6494 }
6495 if (chk->whoTo == NULL) {
6496 /* no alternate */
6497 if (stcb->asoc.last_control_chunk_from == NULL)
6498 chk->whoTo = stcb->asoc.primary_destination;
6499 else
6500 chk->whoTo = stcb->asoc.last_control_chunk_from;
6501 stcb->asoc.used_alt_asconfack = 0;
6502 }
6503 } else {
6504 /* normal case */
6505 if (stcb->asoc.last_control_chunk_from == NULL)
6506 chk->whoTo = stcb->asoc.primary_destination;
6507 else
6508 chk->whoTo = stcb->asoc.last_control_chunk_from;
6509 stcb->asoc.used_alt_asconfack = 0;
6510 }
6511 chk->data = m_ack;
6512 chk->send_size = 0;
6513 /* Get size */
6514 m = m_ack;
6515 while (m) {
6516 chk->send_size += SCTP_BUF_LEN(m);
6517 m = SCTP_BUF_NEXT(m);
6518 }
6519 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
6520 chk->rec.chunk_id.can_take_data = 1;
6521 chk->sent = SCTP_DATAGRAM_UNSENT;
6522 chk->snd_count = 0;
6523 chk->flags = 0;
6524 chk->asoc = &stcb->asoc;
6525 atomic_add_int(&chk->whoTo->ref_count, 1);
6526 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6527 chk->asoc->ctrl_queue_cnt++;
6528 return (0);
6529}
6530
6531
6532static int
6533sctp_chunk_retransmission(struct sctp_inpcb *inp,
6534 struct sctp_tcb *stcb,
6535 struct sctp_association *asoc,
6536 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done)
6537{
6538 /*
6539 * send out one MTU of retransmission. If fast_retransmit is
6540 * happening we ignore the cwnd. Otherwise we obey the cwnd and
6541 * rwnd. For a Cookie or Asconf in the control chunk queue we
6542 * retransmit them by themselves.
6543 *
6544 * For data chunks we will pick out the lowest TSN's in the sent_queue
6545 * marked for resend and bundle them all together (up to a MTU of
6546 * destination). The address to send to should have been
6547 * selected/changed where the retransmission was marked (i.e. in FR
6548 * or t3-timeout routines).
6549 */
6550 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
6551 struct sctp_tmit_chunk *chk, *fwd;
6552 struct mbuf *m, *endofchain;
6553 struct sctphdr *shdr;
6554 int asconf;
6555 struct sctp_nets *net;
6556 uint32_t tsns_sent = 0;
6557 int no_fragmentflg, bundle_at, cnt_thru;
6558 unsigned int mtu;
6559 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
6560 struct sctp_auth_chunk *auth = NULL;
6561 uint32_t auth_offset = 0;
6562 uint32_t dmtu = 0;
6563
6564 SCTP_TCB_LOCK_ASSERT(stcb);
6565 tmr_started = ctl_cnt = bundle_at = error = 0;
6566 no_fragmentflg = 1;
6567 asconf = 0;
6568 fwd_tsn = 0;
6569 *cnt_out = 0;
6570 fwd = NULL;
6571 endofchain = m = NULL;
6572#ifdef SCTP_AUDITING_ENABLED
6573 sctp_audit_log(0xC3, 1);
6574#endif
6575 if (TAILQ_EMPTY(&asoc->sent_queue)) {
6576#ifdef SCTP_DEBUG
6577 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
6578 printf("SCTP hits empty queue with cnt set to %d?\n",
6579 asoc->sent_queue_retran_cnt);
6580 }
6581#endif
6582 asoc->sent_queue_cnt = 0;
6583 asoc->sent_queue_cnt_removeable = 0;
6584 }
6585 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
6586 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
6587 (chk->rec.chunk_id.id == SCTP_ASCONF) ||
6588 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
6589 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
6590 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
6591 if (chk != asoc->str_reset) {
6592 /*
6593 * not eligible for retran if its
6594 * not ours
6595 */
6596 continue;
6597 }
6598 }
6599 ctl_cnt++;
6600 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
6601 no_fragmentflg = 1;
6602 asconf = 1;
6603 }
6604 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
6605 fwd_tsn = 1;
6606 fwd = chk;
6607 }
6608 /*
6609 * Add an AUTH chunk, if chunk requires it save the
6610 * offset into the chain for AUTH
6611 */
6612 if ((auth == NULL) &&
6613 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
6614 stcb->asoc.peer_auth_chunks))) {
6615 m = sctp_add_auth_chunk(m, &endofchain,
6616 &auth, &auth_offset,
6617 stcb,
6618 chk->rec.chunk_id.id);
6619 }
6620 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
6621 break;
6622 }
6623 }
6624 one_chunk = 0;
6625 cnt_thru = 0;
6626 /* do we have control chunks to retransmit? */
6627 if (m != NULL) {
6628 /* Start a timer no matter if we suceed or fail */
6629 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
6630 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
6631 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
6632 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
6633
6634 SCTP_BUF_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT);
6635 if (m == NULL) {
6636 return (ENOBUFS);
6637 }
6638 shdr = mtod(m, struct sctphdr *);
6639 shdr->src_port = inp->sctp_lport;
6640 shdr->dest_port = stcb->rport;
6641 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
6642 shdr->checksum = 0;
6643 auth_offset += sizeof(struct sctphdr);
6644 chk->snd_count++; /* update our count */
6645
6646 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
6647 (struct sockaddr *)&chk->whoTo->ro._l_addr, m, auth_offset,
6648 auth, no_fragmentflg, 0, NULL, asconf))) {
6649 SCTP_STAT_INCR(sctps_lowlevelerr);
6650 return (error);
6651 }
6652 m = endofchain = NULL;
6653 auth = NULL;
6654 auth_offset = 0;
6655 /*
6656 * We don't want to mark the net->sent time here since this
6657 * we use this for HB and retrans cannot measure RTT
6658 */
6659 /* SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
6660 *cnt_out += 1;
6661 chk->sent = SCTP_DATAGRAM_SENT;
6662 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
6663 if (fwd_tsn == 0) {
6664 return (0);
6665 } else {
6666 /* Clean up the fwd-tsn list */
6667 sctp_clean_up_ctl(stcb, asoc);
6668 return (0);
6669 }
6670 }
6671 /*
6672 * Ok, it is just data retransmission we need to do or that and a
6673 * fwd-tsn with it all.
6674 */
6675 if (TAILQ_EMPTY(&asoc->sent_queue)) {
6676 return (-1);
6677 }
6678 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
6679 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
6680 /* not yet open, resend the cookie and that is it */
6681 return (1);
6682 }
6683#ifdef SCTP_AUDITING_ENABLED
6684 sctp_auditing(20, inp, stcb, NULL);
6685#endif
6686 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6687 if (chk->sent != SCTP_DATAGRAM_RESEND) {
6688 /* No, not sent to this net or not ready for rtx */
6689 continue;
6690
6691 }
6692 /* pick up the net */
6693 net = chk->whoTo;
6694 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6695 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
6696 } else {
6697 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
6698 }
6699
6700 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
6701 /* No room in peers rwnd */
6702 uint32_t tsn;
6703
6704 tsn = asoc->last_acked_seq + 1;
6705 if (tsn == chk->rec.data.TSN_seq) {
6706 /*
6707 * we make a special exception for this
6708 * case. The peer has no rwnd but is missing
6709 * the lowest chunk.. which is probably what
6710 * is holding up the rwnd.
6711 */
6712 goto one_chunk_around;
6713 }
6714 return (1);
6715 }
6716one_chunk_around:
6717 if (asoc->peers_rwnd < mtu) {
6718 one_chunk = 1;
6719 }
6720#ifdef SCTP_AUDITING_ENABLED
6721 sctp_audit_log(0xC3, 2);
6722#endif
6723 bundle_at = 0;
6724 m = NULL;
6725 net->fast_retran_ip = 0;
6726 if (chk->rec.data.doing_fast_retransmit == 0) {
6727 /*
6728 * if no FR in progress skip destination that have
6729 * flight_size > cwnd.
6730 */
6731 if (net->flight_size >= net->cwnd) {
6732 continue;
6733 }
6734 } else {
6735 /*
6736 * Mark the destination net to have FR recovery
6737 * limits put on it.
6738 */
6739 *fr_done = 1;
6740 net->fast_retran_ip = 1;
6741 }
6742
6743 /*
6744 * if no AUTH is yet included and this chunk requires it,
6745 * make sure to account for it. We don't apply the size
6746 * until the AUTH chunk is actually added below in case
6747 * there is no room for this chunk.
6748 */
6749 if ((auth == NULL) &&
6750 sctp_auth_is_required_chunk(SCTP_DATA,
6751 stcb->asoc.peer_auth_chunks)) {
6752 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6753 } else
6754 dmtu = 0;
6755
6756 if ((chk->send_size <= (mtu - dmtu)) ||
6757 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
6758 /* ok we will add this one */
6759 if ((auth == NULL) &&
6760 (sctp_auth_is_required_chunk(SCTP_DATA,
6761 stcb->asoc.peer_auth_chunks))) {
6762 m = sctp_add_auth_chunk(m, &endofchain,
6763 &auth, &auth_offset,
6764 stcb, SCTP_DATA);
6765 }
6766 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
6767 if (m == NULL) {
6768 return (ENOMEM);
6769 }
6770 /* Do clear IP_DF ? */
6771 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
6772 no_fragmentflg = 0;
6773 }
6774 /* upate our MTU size */
6775 if (mtu > (chk->send_size + dmtu))
6776 mtu -= (chk->send_size + dmtu);
6777 else
6778 mtu = 0;
6779 data_list[bundle_at++] = chk;
6780 if (one_chunk && (asoc->total_flight <= 0)) {
6781 SCTP_STAT_INCR(sctps_windowprobed);
6782 chk->rec.data.state_flags |= SCTP_WINDOW_PROBE;
6783 }
6784 }
6785 if (one_chunk == 0) {
6786 /*
6787 * now are there anymore forward from chk to pick
6788 * up?
6789 */
6790 fwd = TAILQ_NEXT(chk, sctp_next);
6791 while (fwd) {
6792 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
6793 /* Nope, not for retran */
6794 fwd = TAILQ_NEXT(fwd, sctp_next);
6795 continue;
6796 }
6797 if (fwd->whoTo != net) {
6798 /* Nope, not the net in question */
6799 fwd = TAILQ_NEXT(fwd, sctp_next);
6800 continue;
6801 }
6802 if ((auth == NULL) &&
6803 sctp_auth_is_required_chunk(SCTP_DATA,
6804 stcb->asoc.peer_auth_chunks)) {
6805 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
6806 } else
6807 dmtu = 0;
6808 if (fwd->send_size <= (mtu - dmtu)) {
6809 if ((auth == NULL) &&
6810 (sctp_auth_is_required_chunk(SCTP_DATA,
6811 stcb->asoc.peer_auth_chunks))) {
6812 m = sctp_add_auth_chunk(m,
6813 &endofchain,
6814 &auth, &auth_offset,
6815 stcb,
6816 SCTP_DATA);
6817 }
6818 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
6819 if (m == NULL) {
6820 return (ENOMEM);
6821 }
6822 /* Do clear IP_DF ? */
6823 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
6824 no_fragmentflg = 0;
6825 }
6826 /* upate our MTU size */
6827 if (mtu > (fwd->send_size + dmtu))
6828 mtu -= (fwd->send_size + dmtu);
6829 else
6830 mtu = 0;
6831 data_list[bundle_at++] = fwd;
6832 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
6833 break;
6834 }
6835 fwd = TAILQ_NEXT(fwd, sctp_next);
6836 } else {
6837 /* can't fit so we are done */
6838 break;
6839 }
6840 }
6841 }
6842 /* Is there something to send for this destination? */
6843 if (m) {
6844 /*
6845 * No matter if we fail/or suceed we should start a
6846 * timer. A failure is like a lost IP packet :-)
6847 */
6848 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
6849 /*
6850 * no timer running on this destination
6851 * restart it.
6852 */
6853 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
6854 tmr_started = 1;
6855 }
6856 SCTP_BUF_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT);
6857 if (m == NULL) {
6858 return (ENOBUFS);
6859 }
6860 shdr = mtod(m, struct sctphdr *);
6861 shdr->src_port = inp->sctp_lport;
6862 shdr->dest_port = stcb->rport;
6863 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
6864 shdr->checksum = 0;
6865 auth_offset += sizeof(struct sctphdr);
6866 /* Now lets send it, if there is anything to send :> */
6867 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
6868 (struct sockaddr *)&net->ro._l_addr, m, auth_offset,
6869 auth, no_fragmentflg, 0, NULL, asconf))) {
6870 /* error, we could not output */
6871 SCTP_STAT_INCR(sctps_lowlevelerr);
6872 return (error);
6873 }
6874 m = endofchain = NULL;
6875 auth = NULL;
6876 auth_offset = 0;
6877 /* For HB's */
6878 /*
6879 * We don't want to mark the net->sent time here
6880 * since this we use this for HB and retrans cannot
6881 * measure RTT
6882 */
6883 /* SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
6884
6885 /* For auto-close */
6886 cnt_thru++;
6887 if (*now_filled == 0) {
6888 SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
6889 *now = asoc->time_last_sent;
6890 *now_filled = 1;
6891 } else {
6892 asoc->time_last_sent = *now;
6893 }
6894 *cnt_out += bundle_at;
6895#ifdef SCTP_AUDITING_ENABLED
6896 sctp_audit_log(0xC4, bundle_at);
6897#endif
6898 if (bundle_at) {
6899 tsns_sent = data_list[0]->rec.data.TSN_seq;
6900 }
6901 for (i = 0; i < bundle_at; i++) {
6902 SCTP_STAT_INCR(sctps_sendretransdata);
6903 data_list[i]->sent = SCTP_DATAGRAM_SENT;
6904 /*
6905 * When we have a revoked data, and we
6906 * retransmit it, then we clear the revoked
6907 * flag since this flag dictates if we
6908 * subtracted from the fs
6909 */
7226 } else {
7227 asoc->ifp_had_enobuf = 0;
7228 }
7229 outchain = endoutchain = NULL;
7230 auth = NULL;
7231 auth_offset = 0;
7232 if (bundle_at || hbflag) {
7233 /* For data/asconf and hb set time */
7234 if (*now_filled == 0) {
7235 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
7236 *now_filled = 1;
7237 *now = net->last_sent_time;
7238 } else {
7239 net->last_sent_time = *now;
7240 }
7241 }
7242 if (!no_out_cnt) {
7243 *num_out += (ctl_cnt + bundle_at);
7244 }
7245 if (bundle_at) {
7246 /* if (!net->rto_pending) { */
7247 /* setup for a RTO measurement */
7248 /* net->rto_pending = 1; */
7249 tsns_sent = data_list[0]->rec.data.TSN_seq;
7250
7251 data_list[0]->do_rtt = 1;
7252 /* } else { */
7253 /* data_list[0]->do_rtt = 0; */
7254 /* } */
7255 SCTP_STAT_INCR_BY(sctps_senddata, bundle_at);
7256 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
7257 if (sctp_early_fr) {
7258 if (net->flight_size < net->cwnd) {
7259 /* start or restart it */
7260 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7261 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
7262 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_2);
7263 }
7264 SCTP_STAT_INCR(sctps_earlyfrstrout);
7265 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net);
7266 } else {
7267 /* stop it if its running */
7268 if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
7269 SCTP_STAT_INCR(sctps_earlyfrstpout);
7270 sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, inp, stcb, net,
7271 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_3);
7272 }
7273 }
7274 }
7275 }
7276 if (one_chunk) {
7277 break;
7278 }
7279 }
7280#ifdef SCTP_CWND_LOGGING
7281 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_SEND);
7282#endif
7283 }
7284 if (old_startat == NULL) {
7285 old_startat = send_start_at;
7286 send_start_at = TAILQ_FIRST(&asoc->nets);
7287 goto again_one_more_time;
7288 }
7289 /*
7290 * At the end there should be no NON timed chunks hanging on this
7291 * queue.
7292 */
7293#ifdef SCTP_CWND_LOGGING
7294 sctp_log_cwnd(stcb, net, *num_out, SCTP_CWND_LOG_FROM_SEND);
7295#endif
7296 if ((*num_out == 0) && (*reason_code == 0)) {
7297 *reason_code = 4;
7298 } else {
7299 *reason_code = 5;
7300 }
7301 sctp_clean_up_ctl(stcb, asoc);
7302 return (0);
7303}
7304
7305void
7306sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
7307{
7308 /*
7309 * Prepend a OPERATIONAL_ERROR chunk header and put on the end of
7310 * the control chunk queue.
7311 */
7312 struct sctp_chunkhdr *hdr;
7313 struct sctp_tmit_chunk *chk;
7314 struct mbuf *mat;
7315
7316 SCTP_TCB_LOCK_ASSERT(stcb);
7317 sctp_alloc_a_chunk(stcb, chk);
7318 if (chk == NULL) {
7319 /* no memory */
7320 sctp_m_freem(op_err);
7321 return;
7322 }
7323 chk->copy_by_ref = 0;
7324 SCTP_BUF_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT);
7325 if (op_err == NULL) {
7326 sctp_free_a_chunk(stcb, chk);
7327 return;
7328 }
7329 chk->send_size = 0;
7330 mat = op_err;
7331 while (mat != NULL) {
7332 chk->send_size += SCTP_BUF_LEN(mat);
7333 mat = SCTP_BUF_NEXT(mat);
7334 }
7335 chk->rec.chunk_id.id = SCTP_OPERATION_ERROR;
7336 chk->rec.chunk_id.can_take_data = 1;
7337 chk->sent = SCTP_DATAGRAM_UNSENT;
7338 chk->snd_count = 0;
7339 chk->flags = 0;
7340 chk->asoc = &stcb->asoc;
7341 chk->data = op_err;
7342 chk->whoTo = chk->asoc->primary_destination;
7343 atomic_add_int(&chk->whoTo->ref_count, 1);
7344 hdr = mtod(op_err, struct sctp_chunkhdr *);
7345 hdr->chunk_type = SCTP_OPERATION_ERROR;
7346 hdr->chunk_flags = 0;
7347 hdr->chunk_length = htons(chk->send_size);
7348 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
7349 chk,
7350 sctp_next);
7351 chk->asoc->ctrl_queue_cnt++;
7352}
7353
7354int
7355sctp_send_cookie_echo(struct mbuf *m,
7356 int offset,
7357 struct sctp_tcb *stcb,
7358 struct sctp_nets *net)
7359{
7360 /*
7361 * pull out the cookie and put it at the front of the control chunk
7362 * queue.
7363 */
7364 int at;
7365 struct mbuf *cookie;
7366 struct sctp_paramhdr parm, *phdr;
7367 struct sctp_chunkhdr *hdr;
7368 struct sctp_tmit_chunk *chk;
7369 uint16_t ptype, plen;
7370
7371 /* First find the cookie in the param area */
7372 cookie = NULL;
7373 at = offset + sizeof(struct sctp_init_chunk);
7374
7375 SCTP_TCB_LOCK_ASSERT(stcb);
7376 do {
7377 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
7378 if (phdr == NULL) {
7379 return (-3);
7380 }
7381 ptype = ntohs(phdr->param_type);
7382 plen = ntohs(phdr->param_length);
7383 if (ptype == SCTP_STATE_COOKIE) {
7384 int pad;
7385
7386 /* found the cookie */
7387 if ((pad = (plen % 4))) {
7388 plen += 4 - pad;
7389 }
7390 cookie = SCTP_M_COPYM(m, at, plen, M_DONTWAIT);
7391 if (cookie == NULL) {
7392 /* No memory */
7393 return (-2);
7394 }
7395 break;
7396 }
7397 at += SCTP_SIZE32(plen);
7398 } while (phdr);
7399 if (cookie == NULL) {
7400 /* Did not find the cookie */
7401 return (-3);
7402 }
7403 /* ok, we got the cookie lets change it into a cookie echo chunk */
7404
7405 /* first the change from param to cookie */
7406 hdr = mtod(cookie, struct sctp_chunkhdr *);
7407 hdr->chunk_type = SCTP_COOKIE_ECHO;
7408 hdr->chunk_flags = 0;
7409 /* get the chunk stuff now and place it in the FRONT of the queue */
7410 sctp_alloc_a_chunk(stcb, chk);
7411 if (chk == NULL) {
7412 /* no memory */
7413 sctp_m_freem(cookie);
7414 return (-5);
7415 }
7416 chk->copy_by_ref = 0;
7417 chk->send_size = plen;
7418 chk->rec.chunk_id.id = SCTP_COOKIE_ECHO;
7419 chk->rec.chunk_id.can_take_data = 0;
7420 chk->sent = SCTP_DATAGRAM_UNSENT;
7421 chk->snd_count = 0;
7422 chk->flags = 0;
7423 chk->asoc = &stcb->asoc;
7424 chk->data = cookie;
7425 chk->whoTo = chk->asoc->primary_destination;
7426 atomic_add_int(&chk->whoTo->ref_count, 1);
7427 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
7428 chk->asoc->ctrl_queue_cnt++;
7429 return (0);
7430}
7431
7432void
7433sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
7434 struct mbuf *m,
7435 int offset,
7436 int chk_length,
7437 struct sctp_nets *net)
7438{
7439 /*
7440 * take a HB request and make it into a HB ack and send it.
7441 */
7442 struct mbuf *outchain;
7443 struct sctp_chunkhdr *chdr;
7444 struct sctp_tmit_chunk *chk;
7445
7446
7447 if (net == NULL)
7448 /* must have a net pointer */
7449 return;
7450
7451 outchain = SCTP_M_COPYM(m, offset, chk_length, M_DONTWAIT);
7452 if (outchain == NULL) {
7453 /* gak out of memory */
7454 return;
7455 }
7456 chdr = mtod(outchain, struct sctp_chunkhdr *);
7457 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
7458 chdr->chunk_flags = 0;
7459 if (chk_length % 4) {
7460 /* need pad */
7461 uint32_t cpthis = 0;
7462 int padlen;
7463
7464 padlen = 4 - (chk_length % 4);
7465 m_copyback(outchain, chk_length, padlen, (caddr_t)&cpthis);
7466 }
7467 sctp_alloc_a_chunk(stcb, chk);
7468 if (chk == NULL) {
7469 /* no memory */
7470 sctp_m_freem(outchain);
7471 return;
7472 }
7473 chk->copy_by_ref = 0;
7474 chk->send_size = chk_length;
7475 chk->rec.chunk_id.id = SCTP_HEARTBEAT_ACK;
7476 chk->rec.chunk_id.can_take_data = 1;
7477 chk->sent = SCTP_DATAGRAM_UNSENT;
7478 chk->snd_count = 0;
7479 chk->flags = 0;
7480 chk->asoc = &stcb->asoc;
7481 chk->data = outchain;
7482 chk->whoTo = net;
7483 atomic_add_int(&chk->whoTo->ref_count, 1);
7484 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
7485 chk->asoc->ctrl_queue_cnt++;
7486}
7487
7488int
7489sctp_send_cookie_ack(struct sctp_tcb *stcb)
7490{
7491 /* formulate and queue a cookie-ack back to sender */
7492 struct mbuf *cookie_ack;
7493 struct sctp_chunkhdr *hdr;
7494 struct sctp_tmit_chunk *chk;
7495
7496 cookie_ack = NULL;
7497 SCTP_TCB_LOCK_ASSERT(stcb);
7498
7499 cookie_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_chunkhdr), 0, M_DONTWAIT, 1, MT_HEADER);
7500 if (cookie_ack == NULL) {
7501 /* no mbuf's */
7502 return (-1);
7503 }
7504 SCTP_BUF_RESV_UF(cookie_ack, SCTP_MIN_OVERHEAD);
7505 sctp_alloc_a_chunk(stcb, chk);
7506 if (chk == NULL) {
7507 /* no memory */
7508 sctp_m_freem(cookie_ack);
7509 return (-1);
7510 }
7511 chk->copy_by_ref = 0;
7512 chk->send_size = sizeof(struct sctp_chunkhdr);
7513 chk->rec.chunk_id.id = SCTP_COOKIE_ACK;
7514 chk->rec.chunk_id.can_take_data = 1;
7515 chk->sent = SCTP_DATAGRAM_UNSENT;
7516 chk->snd_count = 0;
7517 chk->flags = 0;
7518 chk->asoc = &stcb->asoc;
7519 chk->data = cookie_ack;
7520 if (chk->asoc->last_control_chunk_from != NULL) {
7521 chk->whoTo = chk->asoc->last_control_chunk_from;
7522 } else {
7523 chk->whoTo = chk->asoc->primary_destination;
7524 }
7525 atomic_add_int(&chk->whoTo->ref_count, 1);
7526 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
7527 hdr->chunk_type = SCTP_COOKIE_ACK;
7528 hdr->chunk_flags = 0;
7529 hdr->chunk_length = htons(chk->send_size);
7530 SCTP_BUF_LEN(cookie_ack) = chk->send_size;
7531 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
7532 chk->asoc->ctrl_queue_cnt++;
7533 return (0);
7534}
7535
7536
7537int
7538sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
7539{
7540 /* formulate and queue a SHUTDOWN-ACK back to the sender */
7541 struct mbuf *m_shutdown_ack;
7542 struct sctp_shutdown_ack_chunk *ack_cp;
7543 struct sctp_tmit_chunk *chk;
7544
7545 m_shutdown_ack = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_ack_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
7546 if (m_shutdown_ack == NULL) {
7547 /* no mbuf's */
7548 return (-1);
7549 }
7550 SCTP_BUF_RESV_UF(m_shutdown_ack, SCTP_MIN_OVERHEAD);
7551 sctp_alloc_a_chunk(stcb, chk);
7552 if (chk == NULL) {
7553 /* no memory */
7554 sctp_m_freem(m_shutdown_ack);
7555 return (-1);
7556 }
7557 chk->copy_by_ref = 0;
7558
7559 chk->send_size = sizeof(struct sctp_chunkhdr);
7560 chk->rec.chunk_id.id = SCTP_SHUTDOWN_ACK;
7561 chk->rec.chunk_id.can_take_data = 1;
7562 chk->sent = SCTP_DATAGRAM_UNSENT;
7563 chk->snd_count = 0;
7564 chk->flags = 0;
7565 chk->asoc = &stcb->asoc;
7566 chk->data = m_shutdown_ack;
7567 chk->whoTo = net;
7568 atomic_add_int(&net->ref_count, 1);
7569
7570 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
7571 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
7572 ack_cp->ch.chunk_flags = 0;
7573 ack_cp->ch.chunk_length = htons(chk->send_size);
7574 SCTP_BUF_LEN(m_shutdown_ack) = chk->send_size;
7575 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
7576 chk->asoc->ctrl_queue_cnt++;
7577 return (0);
7578}
7579
7580int
7581sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
7582{
7583 /* formulate and queue a SHUTDOWN to the sender */
7584 struct mbuf *m_shutdown;
7585 struct sctp_shutdown_chunk *shutdown_cp;
7586 struct sctp_tmit_chunk *chk;
7587
7588 m_shutdown = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
7589 if (m_shutdown == NULL) {
7590 /* no mbuf's */
7591 return (-1);
7592 }
7593 SCTP_BUF_RESV_UF(m_shutdown, SCTP_MIN_OVERHEAD);
7594 sctp_alloc_a_chunk(stcb, chk);
7595 if (chk == NULL) {
7596 /* no memory */
7597 sctp_m_freem(m_shutdown);
7598 return (-1);
7599 }
7600 chk->copy_by_ref = 0;
7601 chk->send_size = sizeof(struct sctp_shutdown_chunk);
7602 chk->rec.chunk_id.id = SCTP_SHUTDOWN;
7603 chk->rec.chunk_id.can_take_data = 1;
7604 chk->sent = SCTP_DATAGRAM_UNSENT;
7605 chk->snd_count = 0;
7606 chk->flags = 0;
7607 chk->asoc = &stcb->asoc;
7608 chk->data = m_shutdown;
7609 chk->whoTo = net;
7610 atomic_add_int(&net->ref_count, 1);
7611
7612 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
7613 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
7614 shutdown_cp->ch.chunk_flags = 0;
7615 shutdown_cp->ch.chunk_length = htons(chk->send_size);
7616 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
7617 SCTP_BUF_LEN(m_shutdown) = chk->send_size;
7618 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
7619 chk->asoc->ctrl_queue_cnt++;
7620 return (0);
7621}
7622
7623int
7624sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net)
7625{
7626 /*
7627 * formulate and queue an ASCONF to the peer ASCONF parameters
7628 * should be queued on the assoc queue
7629 */
7630 struct sctp_tmit_chunk *chk;
7631 struct mbuf *m_asconf;
7632 struct sctp_asconf_chunk *acp;
7633 int len;
7634
7635
7636 SCTP_TCB_LOCK_ASSERT(stcb);
7637 /* compose an ASCONF chunk, maximum length is PMTU */
7638 m_asconf = sctp_compose_asconf(stcb, &len);
7639 if (m_asconf == NULL) {
7640 return (-1);
7641 }
7642 acp = mtod(m_asconf, struct sctp_asconf_chunk *);
7643 sctp_alloc_a_chunk(stcb, chk);
7644 if (chk == NULL) {
7645 /* no memory */
7646 sctp_m_freem(m_asconf);
7647 return (-1);
7648 }
7649 chk->copy_by_ref = 0;
7650 chk->data = m_asconf;
7651 chk->send_size = len;
7652 chk->rec.chunk_id.id = SCTP_ASCONF;
7653 chk->rec.chunk_id.can_take_data = 0;
7654 chk->sent = SCTP_DATAGRAM_UNSENT;
7655 chk->snd_count = 0;
7656 chk->flags = 0;
7657 chk->asoc = &stcb->asoc;
7658 chk->whoTo = chk->asoc->primary_destination;
7659 atomic_add_int(&chk->whoTo->ref_count, 1);
7660 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
7661 chk->asoc->ctrl_queue_cnt++;
7662 return (0);
7663}
7664
7665int
7666sctp_send_asconf_ack(struct sctp_tcb *stcb, uint32_t retrans)
7667{
7668 /*
7669 * formulate and queue a asconf-ack back to sender the asconf-ack
7670 * must be stored in the tcb
7671 */
7672 struct sctp_tmit_chunk *chk;
7673 struct mbuf *m_ack, *m;
7674
7675 SCTP_TCB_LOCK_ASSERT(stcb);
7676 /* is there a asconf-ack mbuf chain to send? */
7677 if (stcb->asoc.last_asconf_ack_sent == NULL) {
7678 return (-1);
7679 }
7680 /* copy the asconf_ack */
7681 m_ack = SCTP_M_COPYM(stcb->asoc.last_asconf_ack_sent, 0, M_COPYALL, M_DONTWAIT);
7682 if (m_ack == NULL) {
7683 /* couldn't copy it */
7684
7685 return (-1);
7686 }
7687 sctp_alloc_a_chunk(stcb, chk);
7688 if (chk == NULL) {
7689 /* no memory */
7690 if (m_ack)
7691 sctp_m_freem(m_ack);
7692 return (-1);
7693 }
7694 chk->copy_by_ref = 0;
7695 /* figure out where it goes to */
7696 if (retrans) {
7697 /* we're doing a retransmission */
7698 if (stcb->asoc.used_alt_asconfack > 2) {
7699 /* tried alternate nets already, go back */
7700 chk->whoTo = NULL;
7701 } else {
7702 /* need to try and alternate net */
7703 chk->whoTo = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from, 0);
7704 stcb->asoc.used_alt_asconfack++;
7705 }
7706 if (chk->whoTo == NULL) {
7707 /* no alternate */
7708 if (stcb->asoc.last_control_chunk_from == NULL)
7709 chk->whoTo = stcb->asoc.primary_destination;
7710 else
7711 chk->whoTo = stcb->asoc.last_control_chunk_from;
7712 stcb->asoc.used_alt_asconfack = 0;
7713 }
7714 } else {
7715 /* normal case */
7716 if (stcb->asoc.last_control_chunk_from == NULL)
7717 chk->whoTo = stcb->asoc.primary_destination;
7718 else
7719 chk->whoTo = stcb->asoc.last_control_chunk_from;
7720 stcb->asoc.used_alt_asconfack = 0;
7721 }
7722 chk->data = m_ack;
7723 chk->send_size = 0;
7724 /* Get size */
7725 m = m_ack;
7726 while (m) {
7727 chk->send_size += SCTP_BUF_LEN(m);
7728 m = SCTP_BUF_NEXT(m);
7729 }
7730 chk->rec.chunk_id.id = SCTP_ASCONF_ACK;
7731 chk->rec.chunk_id.can_take_data = 1;
7732 chk->sent = SCTP_DATAGRAM_UNSENT;
7733 chk->snd_count = 0;
7734 chk->flags = 0;
7735 chk->asoc = &stcb->asoc;
7736 atomic_add_int(&chk->whoTo->ref_count, 1);
7737 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
7738 chk->asoc->ctrl_queue_cnt++;
7739 return (0);
7740}
7741
7742
7743static int
7744sctp_chunk_retransmission(struct sctp_inpcb *inp,
7745 struct sctp_tcb *stcb,
7746 struct sctp_association *asoc,
7747 int *cnt_out, struct timeval *now, int *now_filled, int *fr_done)
7748{
7749 /*
7750 * send out one MTU of retransmission. If fast_retransmit is
7751 * happening we ignore the cwnd. Otherwise we obey the cwnd and
7752 * rwnd. For a Cookie or Asconf in the control chunk queue we
7753 * retransmit them by themselves.
7754 *
7755 * For data chunks we will pick out the lowest TSN's in the sent_queue
7756 * marked for resend and bundle them all together (up to a MTU of
7757 * destination). The address to send to should have been
7758 * selected/changed where the retransmission was marked (i.e. in FR
7759 * or t3-timeout routines).
7760 */
7761 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
7762 struct sctp_tmit_chunk *chk, *fwd;
7763 struct mbuf *m, *endofchain;
7764 struct sctphdr *shdr;
7765 int asconf;
7766 struct sctp_nets *net;
7767 uint32_t tsns_sent = 0;
7768 int no_fragmentflg, bundle_at, cnt_thru;
7769 unsigned int mtu;
7770 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
7771 struct sctp_auth_chunk *auth = NULL;
7772 uint32_t auth_offset = 0;
7773 uint32_t dmtu = 0;
7774
7775 SCTP_TCB_LOCK_ASSERT(stcb);
7776 tmr_started = ctl_cnt = bundle_at = error = 0;
7777 no_fragmentflg = 1;
7778 asconf = 0;
7779 fwd_tsn = 0;
7780 *cnt_out = 0;
7781 fwd = NULL;
7782 endofchain = m = NULL;
7783#ifdef SCTP_AUDITING_ENABLED
7784 sctp_audit_log(0xC3, 1);
7785#endif
7786 if (TAILQ_EMPTY(&asoc->sent_queue)) {
7787#ifdef SCTP_DEBUG
7788 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7789 printf("SCTP hits empty queue with cnt set to %d?\n",
7790 asoc->sent_queue_retran_cnt);
7791 }
7792#endif
7793 asoc->sent_queue_cnt = 0;
7794 asoc->sent_queue_cnt_removeable = 0;
7795 }
7796 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7797 if ((chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) ||
7798 (chk->rec.chunk_id.id == SCTP_ASCONF) ||
7799 (chk->rec.chunk_id.id == SCTP_STREAM_RESET) ||
7800 (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)) {
7801 if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
7802 if (chk != asoc->str_reset) {
7803 /*
7804 * not eligible for retran if its
7805 * not ours
7806 */
7807 continue;
7808 }
7809 }
7810 ctl_cnt++;
7811 if (chk->rec.chunk_id.id == SCTP_ASCONF) {
7812 no_fragmentflg = 1;
7813 asconf = 1;
7814 }
7815 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
7816 fwd_tsn = 1;
7817 fwd = chk;
7818 }
7819 /*
7820 * Add an AUTH chunk, if chunk requires it save the
7821 * offset into the chain for AUTH
7822 */
7823 if ((auth == NULL) &&
7824 (sctp_auth_is_required_chunk(chk->rec.chunk_id.id,
7825 stcb->asoc.peer_auth_chunks))) {
7826 m = sctp_add_auth_chunk(m, &endofchain,
7827 &auth, &auth_offset,
7828 stcb,
7829 chk->rec.chunk_id.id);
7830 }
7831 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
7832 break;
7833 }
7834 }
7835 one_chunk = 0;
7836 cnt_thru = 0;
7837 /* do we have control chunks to retransmit? */
7838 if (m != NULL) {
7839 /* Start a timer no matter if we suceed or fail */
7840 if (chk->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
7841 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
7842 } else if (chk->rec.chunk_id.id == SCTP_ASCONF)
7843 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
7844
7845 SCTP_BUF_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT);
7846 if (m == NULL) {
7847 return (ENOBUFS);
7848 }
7849 shdr = mtod(m, struct sctphdr *);
7850 shdr->src_port = inp->sctp_lport;
7851 shdr->dest_port = stcb->rport;
7852 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
7853 shdr->checksum = 0;
7854 auth_offset += sizeof(struct sctphdr);
7855 chk->snd_count++; /* update our count */
7856
7857 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
7858 (struct sockaddr *)&chk->whoTo->ro._l_addr, m, auth_offset,
7859 auth, no_fragmentflg, 0, NULL, asconf))) {
7860 SCTP_STAT_INCR(sctps_lowlevelerr);
7861 return (error);
7862 }
7863 m = endofchain = NULL;
7864 auth = NULL;
7865 auth_offset = 0;
7866 /*
7867 * We don't want to mark the net->sent time here since this
7868 * we use this for HB and retrans cannot measure RTT
7869 */
7870 /* SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time); */
7871 *cnt_out += 1;
7872 chk->sent = SCTP_DATAGRAM_SENT;
7873 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
7874 if (fwd_tsn == 0) {
7875 return (0);
7876 } else {
7877 /* Clean up the fwd-tsn list */
7878 sctp_clean_up_ctl(stcb, asoc);
7879 return (0);
7880 }
7881 }
7882 /*
7883 * Ok, it is just data retransmission we need to do or that and a
7884 * fwd-tsn with it all.
7885 */
7886 if (TAILQ_EMPTY(&asoc->sent_queue)) {
7887 return (-1);
7888 }
7889 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
7890 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
7891 /* not yet open, resend the cookie and that is it */
7892 return (1);
7893 }
7894#ifdef SCTP_AUDITING_ENABLED
7895 sctp_auditing(20, inp, stcb, NULL);
7896#endif
7897 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
7898 if (chk->sent != SCTP_DATAGRAM_RESEND) {
7899 /* No, not sent to this net or not ready for rtx */
7900 continue;
7901
7902 }
7903 /* pick up the net */
7904 net = chk->whoTo;
7905 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
7906 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
7907 } else {
7908 mtu = net->mtu - SCTP_MIN_V4_OVERHEAD;
7909 }
7910
7911 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
7912 /* No room in peers rwnd */
7913 uint32_t tsn;
7914
7915 tsn = asoc->last_acked_seq + 1;
7916 if (tsn == chk->rec.data.TSN_seq) {
7917 /*
7918 * we make a special exception for this
7919 * case. The peer has no rwnd but is missing
7920 * the lowest chunk.. which is probably what
7921 * is holding up the rwnd.
7922 */
7923 goto one_chunk_around;
7924 }
7925 return (1);
7926 }
7927one_chunk_around:
7928 if (asoc->peers_rwnd < mtu) {
7929 one_chunk = 1;
7930 }
7931#ifdef SCTP_AUDITING_ENABLED
7932 sctp_audit_log(0xC3, 2);
7933#endif
7934 bundle_at = 0;
7935 m = NULL;
7936 net->fast_retran_ip = 0;
7937 if (chk->rec.data.doing_fast_retransmit == 0) {
7938 /*
7939 * if no FR in progress skip destination that have
7940 * flight_size > cwnd.
7941 */
7942 if (net->flight_size >= net->cwnd) {
7943 continue;
7944 }
7945 } else {
7946 /*
7947 * Mark the destination net to have FR recovery
7948 * limits put on it.
7949 */
7950 *fr_done = 1;
7951 net->fast_retran_ip = 1;
7952 }
7953
7954 /*
7955 * if no AUTH is yet included and this chunk requires it,
7956 * make sure to account for it. We don't apply the size
7957 * until the AUTH chunk is actually added below in case
7958 * there is no room for this chunk.
7959 */
7960 if ((auth == NULL) &&
7961 sctp_auth_is_required_chunk(SCTP_DATA,
7962 stcb->asoc.peer_auth_chunks)) {
7963 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
7964 } else
7965 dmtu = 0;
7966
7967 if ((chk->send_size <= (mtu - dmtu)) ||
7968 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
7969 /* ok we will add this one */
7970 if ((auth == NULL) &&
7971 (sctp_auth_is_required_chunk(SCTP_DATA,
7972 stcb->asoc.peer_auth_chunks))) {
7973 m = sctp_add_auth_chunk(m, &endofchain,
7974 &auth, &auth_offset,
7975 stcb, SCTP_DATA);
7976 }
7977 m = sctp_copy_mbufchain(chk->data, m, &endofchain, 0, chk->send_size, chk->copy_by_ref);
7978 if (m == NULL) {
7979 return (ENOMEM);
7980 }
7981 /* Do clear IP_DF ? */
7982 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
7983 no_fragmentflg = 0;
7984 }
7985 /* upate our MTU size */
7986 if (mtu > (chk->send_size + dmtu))
7987 mtu -= (chk->send_size + dmtu);
7988 else
7989 mtu = 0;
7990 data_list[bundle_at++] = chk;
7991 if (one_chunk && (asoc->total_flight <= 0)) {
7992 SCTP_STAT_INCR(sctps_windowprobed);
7993 chk->rec.data.state_flags |= SCTP_WINDOW_PROBE;
7994 }
7995 }
7996 if (one_chunk == 0) {
7997 /*
7998 * now are there anymore forward from chk to pick
7999 * up?
8000 */
8001 fwd = TAILQ_NEXT(chk, sctp_next);
8002 while (fwd) {
8003 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
8004 /* Nope, not for retran */
8005 fwd = TAILQ_NEXT(fwd, sctp_next);
8006 continue;
8007 }
8008 if (fwd->whoTo != net) {
8009 /* Nope, not the net in question */
8010 fwd = TAILQ_NEXT(fwd, sctp_next);
8011 continue;
8012 }
8013 if ((auth == NULL) &&
8014 sctp_auth_is_required_chunk(SCTP_DATA,
8015 stcb->asoc.peer_auth_chunks)) {
8016 dmtu = sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
8017 } else
8018 dmtu = 0;
8019 if (fwd->send_size <= (mtu - dmtu)) {
8020 if ((auth == NULL) &&
8021 (sctp_auth_is_required_chunk(SCTP_DATA,
8022 stcb->asoc.peer_auth_chunks))) {
8023 m = sctp_add_auth_chunk(m,
8024 &endofchain,
8025 &auth, &auth_offset,
8026 stcb,
8027 SCTP_DATA);
8028 }
8029 m = sctp_copy_mbufchain(fwd->data, m, &endofchain, 0, fwd->send_size, fwd->copy_by_ref);
8030 if (m == NULL) {
8031 return (ENOMEM);
8032 }
8033 /* Do clear IP_DF ? */
8034 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
8035 no_fragmentflg = 0;
8036 }
8037 /* upate our MTU size */
8038 if (mtu > (fwd->send_size + dmtu))
8039 mtu -= (fwd->send_size + dmtu);
8040 else
8041 mtu = 0;
8042 data_list[bundle_at++] = fwd;
8043 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
8044 break;
8045 }
8046 fwd = TAILQ_NEXT(fwd, sctp_next);
8047 } else {
8048 /* can't fit so we are done */
8049 break;
8050 }
8051 }
8052 }
8053 /* Is there something to send for this destination? */
8054 if (m) {
8055 /*
8056 * No matter if we fail/or suceed we should start a
8057 * timer. A failure is like a lost IP packet :-)
8058 */
8059 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8060 /*
8061 * no timer running on this destination
8062 * restart it.
8063 */
8064 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8065 tmr_started = 1;
8066 }
8067 SCTP_BUF_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT);
8068 if (m == NULL) {
8069 return (ENOBUFS);
8070 }
8071 shdr = mtod(m, struct sctphdr *);
8072 shdr->src_port = inp->sctp_lport;
8073 shdr->dest_port = stcb->rport;
8074 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
8075 shdr->checksum = 0;
8076 auth_offset += sizeof(struct sctphdr);
8077 /* Now lets send it, if there is anything to send :> */
8078 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
8079 (struct sockaddr *)&net->ro._l_addr, m, auth_offset,
8080 auth, no_fragmentflg, 0, NULL, asconf))) {
8081 /* error, we could not output */
8082 SCTP_STAT_INCR(sctps_lowlevelerr);
8083 return (error);
8084 }
8085 m = endofchain = NULL;
8086 auth = NULL;
8087 auth_offset = 0;
8088 /* For HB's */
8089 /*
8090 * We don't want to mark the net->sent time here
8091 * since this we use this for HB and retrans cannot
8092 * measure RTT
8093 */
8094 /* SCTP_GETTIME_TIMEVAL(&net->last_sent_time); */
8095
8096 /* For auto-close */
8097 cnt_thru++;
8098 if (*now_filled == 0) {
8099 SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
8100 *now = asoc->time_last_sent;
8101 *now_filled = 1;
8102 } else {
8103 asoc->time_last_sent = *now;
8104 }
8105 *cnt_out += bundle_at;
8106#ifdef SCTP_AUDITING_ENABLED
8107 sctp_audit_log(0xC4, bundle_at);
8108#endif
8109 if (bundle_at) {
8110 tsns_sent = data_list[0]->rec.data.TSN_seq;
8111 }
8112 for (i = 0; i < bundle_at; i++) {
8113 SCTP_STAT_INCR(sctps_sendretransdata);
8114 data_list[i]->sent = SCTP_DATAGRAM_SENT;
8115 /*
8116 * When we have a revoked data, and we
8117 * retransmit it, then we clear the revoked
8118 * flag since this flag dictates if we
8119 * subtracted from the fs
8120 */
6910 data_list[i]->rec.data.chunk_was_revoked = 0;
8121 if (data_list[i]->rec.data.chunk_was_revoked) {
8122 /* Deflate the cwnd */
8123 data_list[i]->whoTo->cwnd -= data_list[i]->book_size;
8124 data_list[i]->rec.data.chunk_was_revoked = 0;
8125 }
6911 data_list[i]->snd_count++;
6912 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
6913 /* record the time */
6914 data_list[i]->sent_rcv_time = asoc->time_last_sent;
6915 if (asoc->sent_queue_retran_cnt < 0) {
6916 asoc->sent_queue_retran_cnt = 0;
6917 }
6918 if (data_list[i]->book_size_scale) {
6919 /*
6920 * need to double the book size on
6921 * this one
6922 */
6923 data_list[i]->book_size_scale = 0;
6924 /*
6925 * Since we double the booksize, we
6926 * must also double the output queue
6927 * size, since this get shrunk when
6928 * we free by this amount.
6929 */
6930 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
6931 data_list[i]->book_size *= 2;
6932
6933
6934 } else {
6935 sctp_ucount_incr(asoc->total_flight_count);
6936#ifdef SCTP_LOG_RWND
6937 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
6938 asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh);
6939#endif
6940 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
6941 (uint32_t) (data_list[i]->send_size +
6942 sctp_peer_chunk_oh));
6943 }
6944#ifdef SCTP_FLIGHT_LOGGING
6945 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
6946 data_list[i]->whoTo->flight_size,
6947 data_list[i]->book_size,
6948 (uintptr_t) stcb,
6949 data_list[i]->rec.data.TSN_seq);
6950#endif
6951 net->flight_size += data_list[i]->book_size;
6952 asoc->total_flight += data_list[i]->book_size;
6953 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6954 /* SWS sender side engages */
6955 asoc->peers_rwnd = 0;
6956 }
6957 if ((i == 0) &&
6958 (data_list[i]->rec.data.doing_fast_retransmit)) {
6959 SCTP_STAT_INCR(sctps_sendfastretrans);
6960 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
6961 (tmr_started == 0)) {
6962 /*
6963 * ok we just fast-retrans'd
6964 * the lowest TSN, i.e the
6965 * first on the list. In
6966 * this case we want to give
6967 * some more time to get a
6968 * SACK back without a
6969 * t3-expiring.
6970 */
6971 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
6972 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
6973 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
6974 }
6975 }
6976 }
6977#ifdef SCTP_CWND_LOGGING
6978 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
6979#endif
6980#ifdef SCTP_AUDITING_ENABLED
6981 sctp_auditing(21, inp, stcb, NULL);
6982#endif
6983 } else {
6984 /* None will fit */
6985 return (1);
6986 }
6987 if (asoc->sent_queue_retran_cnt <= 0) {
6988 /* all done we have no more to retran */
6989 asoc->sent_queue_retran_cnt = 0;
6990 break;
6991 }
6992 if (one_chunk) {
6993 /* No more room in rwnd */
6994 return (1);
6995 }
6996 /* stop the for loop here. we sent out a packet */
6997 break;
6998 }
6999 return (0);
7000}
7001
7002
7003static int
7004sctp_timer_validation(struct sctp_inpcb *inp,
7005 struct sctp_tcb *stcb,
7006 struct sctp_association *asoc,
7007 int ret)
7008{
7009 struct sctp_nets *net;
7010
7011 /* Validate that a timer is running somewhere */
7012 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7013 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
7014 /* Here is a timer */
7015 return (ret);
7016 }
7017 }
7018 SCTP_TCB_LOCK_ASSERT(stcb);
7019 /* Gak, we did not have a timer somewhere */
7020#ifdef SCTP_DEBUG
7021 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
7022 printf("Deadlock avoided starting timer on a dest at retran\n");
7023 }
7024#endif
7025 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
7026 return (ret);
7027}
7028
7029int
7030sctp_chunk_output(struct sctp_inpcb *inp,
7031 struct sctp_tcb *stcb,
7032 int from_where)
7033{
7034 /*
7035 * Ok this is the generic chunk service queue. we must do the
7036 * following: - See if there are retransmits pending, if so we must
7037 * do these first and return. - Service the stream queue that is
7038 * next, moving any message (note I must get a complete message i.e.
7039 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
7040 * TSN's - Check to see if the cwnd/rwnd allows any output, if so we
7041 * go ahead and fomulate and send the low level chunks. Making sure
7042 * to combine any control in the control chunk queue also.
7043 */
7044 struct sctp_association *asoc;
7045 struct sctp_nets *net;
7046 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0,
7047 burst_cnt = 0, burst_limit = 0;
7048 struct timeval now;
7049 int now_filled = 0;
7050 int cwnd_full = 0;
7051 int nagle_on = 0;
7052 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
7053 int un_sent = 0;
7054 int fr_done, tot_frs = 0;
7055
7056 asoc = &stcb->asoc;
7057 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
7058 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
7059 nagle_on = 0;
7060 } else {
7061 nagle_on = 1;
7062 }
7063 }
7064 SCTP_TCB_LOCK_ASSERT(stcb);
7065
7066 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
7067
7068 if ((un_sent <= 0) &&
7069 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
7070 (asoc->sent_queue_retran_cnt == 0)) {
7071 /* Nothing to do unless there is something to be sent left */
7072 return (error);
7073 }
7074 /*
7075 * Do we have something to send, data or control AND a sack timer
7076 * running, if so piggy-back the sack.
7077 */
7078 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
7079 sctp_send_sack(stcb);
7080 SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
7081 }
7082 while (asoc->sent_queue_retran_cnt) {
7083 /*
7084 * Ok, it is retransmission time only, we send out only ONE
7085 * packet with a single call off to the retran code.
7086 */
7087 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
7088 /*
7089 * Special hook for handling cookiess discarded by
7090 * peer that carried data. Send cookie-ack only and
7091 * then the next call with get the retran's.
7092 */
7093 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
7094 &cwnd_full, from_where,
7095 &now, &now_filled, frag_point);
7096 return (0);
7097 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
7098 /* if its not from a HB then do it */
7099 fr_done = 0;
7100 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done);
7101 if (fr_done) {
7102 tot_frs++;
7103 }
7104 } else {
7105 /*
7106 * its from any other place, we don't allow retran
7107 * output (only control)
7108 */
7109 ret = 1;
7110 }
7111 if (ret > 0) {
7112 /* Can't send anymore */
7113 /*
7114 * now lets push out control by calling med-level
7115 * output once. this assures that we WILL send HB's
7116 * if queued too.
7117 */
7118 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
7119 &cwnd_full, from_where,
7120 &now, &now_filled, frag_point);
7121#ifdef SCTP_AUDITING_ENABLED
7122 sctp_auditing(8, inp, stcb, NULL);
7123#endif
7124 return (sctp_timer_validation(inp, stcb, asoc, ret));
7125 }
7126 if (ret < 0) {
7127 /*
7128 * The count was off.. retran is not happening so do
7129 * the normal retransmission.
7130 */
7131#ifdef SCTP_AUDITING_ENABLED
7132 sctp_auditing(9, inp, stcb, NULL);
7133#endif
7134 break;
7135 }
7136 if (from_where == SCTP_OUTPUT_FROM_T3) {
7137 /* Only one transmission allowed out of a timeout */
7138#ifdef SCTP_AUDITING_ENABLED
7139 sctp_auditing(10, inp, stcb, NULL);
7140#endif
7141 /* Push out any control */
7142 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, &cwnd_full, from_where,
7143 &now, &now_filled, frag_point);
7144 return (ret);
7145 }
7146 if (tot_frs > asoc->max_burst) {
7147 /* Hit FR burst limit */
7148 return (0);
7149 }
7150 if ((num_out == 0) && (ret == 0)) {
7151
7152 /* No more retrans to send */
7153 break;
7154 }
7155 }
7156#ifdef SCTP_AUDITING_ENABLED
7157 sctp_auditing(12, inp, stcb, NULL);
7158#endif
7159 /* Check for bad destinations, if they exist move chunks around. */
7160 burst_limit = asoc->max_burst;
7161 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7162 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
7163 SCTP_ADDR_NOT_REACHABLE) {
7164 /*
7165 * if possible move things off of this address we
7166 * still may send below due to the dormant state but
7167 * we try to find an alternate address to send to
7168 * and if we have one we move all queued data on the
7169 * out wheel to this alternate address.
7170 */
7171 if (net->ref_count > 1)
7172 sctp_move_to_an_alt(stcb, asoc, net);
7173 } else {
7174 /*
7175 * if ((asoc->sat_network) || (net->addr_is_local))
7176 * { burst_limit = asoc->max_burst *
7177 * SCTP_SAT_NETWORK_BURST_INCR; }
7178 */
7179 if (sctp_use_cwnd_based_maxburst) {
7180 if ((net->flight_size + (burst_limit * net->mtu)) < net->cwnd) {
7181 int old_cwnd;
7182
7183 if (net->ssthresh < net->cwnd)
7184 net->ssthresh = net->cwnd;
7185 old_cwnd = net->cwnd;
7186 net->cwnd = (net->flight_size + (burst_limit * net->mtu));
7187
7188#ifdef SCTP_CWND_MONITOR
7189 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
7190#endif
7191
7192#ifdef SCTP_LOG_MAXBURST
7193 sctp_log_maxburst(stcb, net, 0, burst_limit, SCTP_MAX_BURST_APPLIED);
7194#endif
7195 SCTP_STAT_INCR(sctps_maxburstqueued);
7196 }
7197 net->fast_retran_ip = 0;
7198 } else {
7199 if (net->flight_size == 0) {
7200 /* Should be decaying the cwnd here */
7201 ;
7202 }
7203 }
7204 }
7205
7206 }
7207 burst_cnt = 0;
7208 cwnd_full = 0;
7209 do {
7210 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
7211 &reason_code, 0, &cwnd_full, from_where,
7212 &now, &now_filled, frag_point);
7213 if (error) {
7214#ifdef SCTP_DEBUG
7215 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7216 printf("Error %d was returned from med-c-op\n", error);
7217 }
7218#endif
7219#ifdef SCTP_LOG_MAXBURST
7220 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
7221#endif
7222#ifdef SCTP_CWND_LOGGING
7223 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
7224 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
7225#endif
7226
7227 break;
7228 }
7229#ifdef SCTP_DEBUG
7230 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
7231 printf("m-c-o put out %d\n", num_out);
7232 }
7233#endif
7234 tot_out += num_out;
7235 burst_cnt++;
7236#ifdef SCTP_CWND_LOGGING
7237 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
7238 if (num_out == 0) {
7239 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
7240 }
7241#endif
7242 if (nagle_on) {
7243 /*
7244 * When nagle is on, we look at how much is un_sent,
7245 * then if its smaller than an MTU and we have data
7246 * in flight we stop.
7247 */
7248 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
7249 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count)
7250 * sizeof(struct sctp_data_chunk)));
7251 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
7252 (stcb->asoc.total_flight > 0)) {
7253 break;
7254 }
7255 }
7256 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
7257 TAILQ_EMPTY(&asoc->send_queue) &&
7258 TAILQ_EMPTY(&asoc->out_wheel)) {
7259 /* Nothing left to send */
7260 break;
7261 }
7262 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
7263 /* Nothing left to send */
7264 break;
7265 }
7266 } while (num_out && (sctp_use_cwnd_based_maxburst ||
7267 (burst_cnt < burst_limit)));
7268
7269 if (sctp_use_cwnd_based_maxburst == 0) {
7270 if (burst_cnt >= burst_limit) {
7271 SCTP_STAT_INCR(sctps_maxburstqueued);
7272 asoc->burst_limit_applied = 1;
7273#ifdef SCTP_LOG_MAXBURST
7274 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
7275#endif
7276 } else {
7277 asoc->burst_limit_applied = 0;
7278 }
7279 }
7280#ifdef SCTP_CWND_LOGGING
7281 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
7282#endif
7283#ifdef SCTP_DEBUG
7284 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7285 printf("Ok, we have put out %d chunks\n", tot_out);
7286 }
7287#endif
7288 /*
7289 * Now we need to clean up the control chunk chain if a ECNE is on
7290 * it. It must be marked as UNSENT again so next call will continue
7291 * to send it until such time that we get a CWR, to remove it.
7292 */
7293 if (stcb->asoc.ecn_echo_cnt_onq)
7294 sctp_fix_ecn_echo(asoc);
7295 return (error);
7296}
7297
7298
7299int
7300sctp_output(inp, m, addr, control, p, flags)
7301 struct sctp_inpcb *inp;
7302 struct mbuf *m;
7303 struct sockaddr *addr;
7304 struct mbuf *control;
7305
7306 struct thread *p;
7307 int flags;
7308{
7309 if (inp == NULL) {
7310 return (EINVAL);
7311 }
7312 if (inp->sctp_socket == NULL) {
7313 return (EINVAL);
7314 }
7315 return (sctp_sosend(inp->sctp_socket,
7316 addr,
7317 (struct uio *)NULL,
7318 m,
7319 control,
7320 flags,
7321 p));
7322}
7323
7324void
7325send_forward_tsn(struct sctp_tcb *stcb,
7326 struct sctp_association *asoc)
7327{
7328 struct sctp_tmit_chunk *chk;
7329 struct sctp_forward_tsn_chunk *fwdtsn;
7330
7331 SCTP_TCB_LOCK_ASSERT(stcb);
7332 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7333 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
7334 /* mark it to unsent */
7335 chk->sent = SCTP_DATAGRAM_UNSENT;
7336 chk->snd_count = 0;
7337 /* Do we correct its output location? */
7338 if (chk->whoTo != asoc->primary_destination) {
7339 sctp_free_remote_addr(chk->whoTo);
7340 chk->whoTo = asoc->primary_destination;
7341 atomic_add_int(&chk->whoTo->ref_count, 1);
7342 }
7343 goto sctp_fill_in_rest;
7344 }
7345 }
7346 /* Ok if we reach here we must build one */
7347 sctp_alloc_a_chunk(stcb, chk);
7348 if (chk == NULL) {
7349 return;
7350 }
7351 chk->copy_by_ref = 0;
7352 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
7353 chk->rec.chunk_id.can_take_data = 0;
7354 chk->asoc = asoc;
7355 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
7356 if (chk->data == NULL) {
7357 atomic_subtract_int(&chk->whoTo->ref_count, 1);
7358 sctp_free_a_chunk(stcb, chk);
7359 return;
7360 }
7361 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
7362 chk->sent = SCTP_DATAGRAM_UNSENT;
7363 chk->snd_count = 0;
7364 chk->whoTo = asoc->primary_destination;
7365 atomic_add_int(&chk->whoTo->ref_count, 1);
7366 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
7367 asoc->ctrl_queue_cnt++;
7368sctp_fill_in_rest:
7369 /*
7370 * Here we go through and fill out the part that deals with
7371 * stream/seq of the ones we skip.
7372 */
7373 SCTP_BUF_LEN(chk->data) = 0;
7374 {
7375 struct sctp_tmit_chunk *at, *tp1, *last;
7376 struct sctp_strseq *strseq;
7377 unsigned int cnt_of_space, i, ovh;
7378 unsigned int space_needed;
7379 unsigned int cnt_of_skipped = 0;
7380
7381 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
7382 if (at->sent != SCTP_FORWARD_TSN_SKIP) {
7383 /* no more to look at */
7384 break;
7385 }
7386 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
7387 /* We don't report these */
7388 continue;
7389 }
7390 cnt_of_skipped++;
7391 }
7392 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
7393 (cnt_of_skipped * sizeof(struct sctp_strseq)));
7394
7395 cnt_of_space = M_TRAILINGSPACE(chk->data);
7396
7397 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
7398 ovh = SCTP_MIN_OVERHEAD;
7399 } else {
7400 ovh = SCTP_MIN_V4_OVERHEAD;
7401 }
7402 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
7403 /* trim to a mtu size */
7404 cnt_of_space = asoc->smallest_mtu - ovh;
7405 }
7406 if (cnt_of_space < space_needed) {
7407 /*
7408 * ok we must trim down the chunk by lowering the
7409 * advance peer ack point.
7410 */
7411 cnt_of_skipped = (cnt_of_space -
7412 ((sizeof(struct sctp_forward_tsn_chunk)) /
7413 sizeof(struct sctp_strseq)));
7414 /*
7415 * Go through and find the TSN that will be the one
7416 * we report.
7417 */
7418 at = TAILQ_FIRST(&asoc->sent_queue);
7419 for (i = 0; i < cnt_of_skipped; i++) {
7420 tp1 = TAILQ_NEXT(at, sctp_next);
7421 at = tp1;
7422 }
7423 last = at;
7424 /*
7425 * last now points to last one I can report, update
7426 * peer ack point
7427 */
7428 asoc->advanced_peer_ack_point = last->rec.data.TSN_seq;
7429 space_needed -= (cnt_of_skipped * sizeof(struct sctp_strseq));
7430 }
7431 chk->send_size = space_needed;
7432 /* Setup the chunk */
7433 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
7434 fwdtsn->ch.chunk_length = htons(chk->send_size);
7435 fwdtsn->ch.chunk_flags = 0;
7436 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
7437 fwdtsn->new_cumulative_tsn = htonl(asoc->advanced_peer_ack_point);
7438 chk->send_size = (sizeof(struct sctp_forward_tsn_chunk) +
7439 (cnt_of_skipped * sizeof(struct sctp_strseq)));
7440 SCTP_BUF_LEN(chk->data) = chk->send_size;
7441 fwdtsn++;
7442 /*
7443 * Move pointer to after the fwdtsn and transfer to the
7444 * strseq pointer.
7445 */
7446 strseq = (struct sctp_strseq *)fwdtsn;
7447 /*
7448 * Now populate the strseq list. This is done blindly
7449 * without pulling out duplicate stream info. This is
7450 * inefficent but won't harm the process since the peer will
7451 * look at these in sequence and will thus release anything.
7452 * It could mean we exceed the PMTU and chop off some that
7453 * we could have included.. but this is unlikely (aka 1432/4
7454 * would mean 300+ stream seq's would have to be reported in
7455 * one FWD-TSN. With a bit of work we can later FIX this to
7456 * optimize and pull out duplcates.. but it does add more
7457 * overhead. So for now... not!
7458 */
7459 at = TAILQ_FIRST(&asoc->sent_queue);
7460 for (i = 0; i < cnt_of_skipped; i++) {
7461 tp1 = TAILQ_NEXT(at, sctp_next);
7462 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
7463 /* We don't report these */
7464 i--;
7465 at = tp1;
7466 continue;
7467 }
7468 strseq->stream = ntohs(at->rec.data.stream_number);
7469 strseq->sequence = ntohs(at->rec.data.stream_seq);
7470 strseq++;
7471 at = tp1;
7472 }
7473 }
7474 return;
7475
7476}
7477
7478void
7479sctp_send_sack(struct sctp_tcb *stcb)
7480{
7481 /*
7482 * Queue up a SACK in the control queue. We must first check to see
7483 * if a SACK is somehow on the control queue. If so, we will take
7484 * and and remove the old one.
7485 */
7486 struct sctp_association *asoc;
7487 struct sctp_tmit_chunk *chk, *a_chk;
7488 struct sctp_sack_chunk *sack;
7489 struct sctp_gap_ack_block *gap_descriptor;
7490 struct sack_track *selector;
7491 int mergeable = 0;
7492 int offset;
7493 caddr_t limit;
7494 uint32_t *dup;
7495 int limit_reached = 0;
7496 unsigned int i, jstart, siz, j;
7497 unsigned int num_gap_blocks = 0, space;
7498 int num_dups = 0;
7499 int space_req;
7500
7501
7502 a_chk = NULL;
7503 asoc = &stcb->asoc;
7504 SCTP_TCB_LOCK_ASSERT(stcb);
7505 if (asoc->last_data_chunk_from == NULL) {
7506 /* Hmm we never received anything */
7507 return;
7508 }
7509 sctp_set_rwnd(stcb, asoc);
7510 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7511 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) {
7512 /* Hmm, found a sack already on queue, remove it */
7513 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7514 asoc->ctrl_queue_cnt++;
7515 a_chk = chk;
7516 if (a_chk->data) {
7517 sctp_m_freem(a_chk->data);
7518 a_chk->data = NULL;
7519 }
7520 sctp_free_remote_addr(a_chk->whoTo);
7521 a_chk->whoTo = NULL;
7522 break;
7523 }
7524 }
7525 if (a_chk == NULL) {
7526 sctp_alloc_a_chunk(stcb, a_chk);
7527 if (a_chk == NULL) {
7528 /* No memory so we drop the idea, and set a timer */
8126 data_list[i]->snd_count++;
8127 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
8128 /* record the time */
8129 data_list[i]->sent_rcv_time = asoc->time_last_sent;
8130 if (asoc->sent_queue_retran_cnt < 0) {
8131 asoc->sent_queue_retran_cnt = 0;
8132 }
8133 if (data_list[i]->book_size_scale) {
8134 /*
8135 * need to double the book size on
8136 * this one
8137 */
8138 data_list[i]->book_size_scale = 0;
8139 /*
8140 * Since we double the booksize, we
8141 * must also double the output queue
8142 * size, since this get shrunk when
8143 * we free by this amount.
8144 */
8145 atomic_add_int(&((asoc)->total_output_queue_size), data_list[i]->book_size);
8146 data_list[i]->book_size *= 2;
8147
8148
8149 } else {
8150 sctp_ucount_incr(asoc->total_flight_count);
8151#ifdef SCTP_LOG_RWND
8152 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
8153 asoc->peers_rwnd, data_list[i]->send_size, sctp_peer_chunk_oh);
8154#endif
8155 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
8156 (uint32_t) (data_list[i]->send_size +
8157 sctp_peer_chunk_oh));
8158 }
8159#ifdef SCTP_FLIGHT_LOGGING
8160 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
8161 data_list[i]->whoTo->flight_size,
8162 data_list[i]->book_size,
8163 (uintptr_t) stcb,
8164 data_list[i]->rec.data.TSN_seq);
8165#endif
8166 net->flight_size += data_list[i]->book_size;
8167 asoc->total_flight += data_list[i]->book_size;
8168 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
8169 /* SWS sender side engages */
8170 asoc->peers_rwnd = 0;
8171 }
8172 if ((i == 0) &&
8173 (data_list[i]->rec.data.doing_fast_retransmit)) {
8174 SCTP_STAT_INCR(sctps_sendfastretrans);
8175 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
8176 (tmr_started == 0)) {
8177 /*
8178 * ok we just fast-retrans'd
8179 * the lowest TSN, i.e the
8180 * first on the list. In
8181 * this case we want to give
8182 * some more time to get a
8183 * SACK back without a
8184 * t3-expiring.
8185 */
8186 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net,
8187 SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_4);
8188 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
8189 }
8190 }
8191 }
8192#ifdef SCTP_CWND_LOGGING
8193 sctp_log_cwnd(stcb, net, tsns_sent, SCTP_CWND_LOG_FROM_RESEND);
8194#endif
8195#ifdef SCTP_AUDITING_ENABLED
8196 sctp_auditing(21, inp, stcb, NULL);
8197#endif
8198 } else {
8199 /* None will fit */
8200 return (1);
8201 }
8202 if (asoc->sent_queue_retran_cnt <= 0) {
8203 /* all done we have no more to retran */
8204 asoc->sent_queue_retran_cnt = 0;
8205 break;
8206 }
8207 if (one_chunk) {
8208 /* No more room in rwnd */
8209 return (1);
8210 }
8211 /* stop the for loop here. we sent out a packet */
8212 break;
8213 }
8214 return (0);
8215}
8216
8217
8218static int
8219sctp_timer_validation(struct sctp_inpcb *inp,
8220 struct sctp_tcb *stcb,
8221 struct sctp_association *asoc,
8222 int ret)
8223{
8224 struct sctp_nets *net;
8225
8226 /* Validate that a timer is running somewhere */
8227 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8228 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
8229 /* Here is a timer */
8230 return (ret);
8231 }
8232 }
8233 SCTP_TCB_LOCK_ASSERT(stcb);
8234 /* Gak, we did not have a timer somewhere */
8235#ifdef SCTP_DEBUG
8236 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
8237 printf("Deadlock avoided starting timer on a dest at retran\n");
8238 }
8239#endif
8240 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
8241 return (ret);
8242}
8243
8244int
8245sctp_chunk_output(struct sctp_inpcb *inp,
8246 struct sctp_tcb *stcb,
8247 int from_where)
8248{
8249 /*
8250 * Ok this is the generic chunk service queue. we must do the
8251 * following: - See if there are retransmits pending, if so we must
8252 * do these first and return. - Service the stream queue that is
8253 * next, moving any message (note I must get a complete message i.e.
8254 * FIRST/MIDDLE and LAST to the out queue in one pass) and assigning
8255 * TSN's - Check to see if the cwnd/rwnd allows any output, if so we
8256 * go ahead and fomulate and send the low level chunks. Making sure
8257 * to combine any control in the control chunk queue also.
8258 */
8259 struct sctp_association *asoc;
8260 struct sctp_nets *net;
8261 int error = 0, num_out = 0, tot_out = 0, ret = 0, reason_code = 0,
8262 burst_cnt = 0, burst_limit = 0;
8263 struct timeval now;
8264 int now_filled = 0;
8265 int cwnd_full = 0;
8266 int nagle_on = 0;
8267 int frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
8268 int un_sent = 0;
8269 int fr_done, tot_frs = 0;
8270
8271 asoc = &stcb->asoc;
8272 if (from_where == SCTP_OUTPUT_FROM_USR_SEND) {
8273 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY)) {
8274 nagle_on = 0;
8275 } else {
8276 nagle_on = 1;
8277 }
8278 }
8279 SCTP_TCB_LOCK_ASSERT(stcb);
8280
8281 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight);
8282
8283 if ((un_sent <= 0) &&
8284 (TAILQ_EMPTY(&asoc->control_send_queue)) &&
8285 (asoc->sent_queue_retran_cnt == 0)) {
8286 /* Nothing to do unless there is something to be sent left */
8287 return (error);
8288 }
8289 /*
8290 * Do we have something to send, data or control AND a sack timer
8291 * running, if so piggy-back the sack.
8292 */
8293 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
8294 sctp_send_sack(stcb);
8295 SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
8296 }
8297 while (asoc->sent_queue_retran_cnt) {
8298 /*
8299 * Ok, it is retransmission time only, we send out only ONE
8300 * packet with a single call off to the retran code.
8301 */
8302 if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
8303 /*
8304 * Special hook for handling cookiess discarded by
8305 * peer that carried data. Send cookie-ack only and
8306 * then the next call with get the retran's.
8307 */
8308 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
8309 &cwnd_full, from_where,
8310 &now, &now_filled, frag_point);
8311 return (0);
8312 } else if (from_where != SCTP_OUTPUT_FROM_HB_TMR) {
8313 /* if its not from a HB then do it */
8314 fr_done = 0;
8315 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled, &fr_done);
8316 if (fr_done) {
8317 tot_frs++;
8318 }
8319 } else {
8320 /*
8321 * its from any other place, we don't allow retran
8322 * output (only control)
8323 */
8324 ret = 1;
8325 }
8326 if (ret > 0) {
8327 /* Can't send anymore */
8328 /*
8329 * now lets push out control by calling med-level
8330 * output once. this assures that we WILL send HB's
8331 * if queued too.
8332 */
8333 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
8334 &cwnd_full, from_where,
8335 &now, &now_filled, frag_point);
8336#ifdef SCTP_AUDITING_ENABLED
8337 sctp_auditing(8, inp, stcb, NULL);
8338#endif
8339 return (sctp_timer_validation(inp, stcb, asoc, ret));
8340 }
8341 if (ret < 0) {
8342 /*
8343 * The count was off.. retran is not happening so do
8344 * the normal retransmission.
8345 */
8346#ifdef SCTP_AUDITING_ENABLED
8347 sctp_auditing(9, inp, stcb, NULL);
8348#endif
8349 break;
8350 }
8351 if (from_where == SCTP_OUTPUT_FROM_T3) {
8352 /* Only one transmission allowed out of a timeout */
8353#ifdef SCTP_AUDITING_ENABLED
8354 sctp_auditing(10, inp, stcb, NULL);
8355#endif
8356 /* Push out any control */
8357 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, &cwnd_full, from_where,
8358 &now, &now_filled, frag_point);
8359 return (ret);
8360 }
8361 if (tot_frs > asoc->max_burst) {
8362 /* Hit FR burst limit */
8363 return (0);
8364 }
8365 if ((num_out == 0) && (ret == 0)) {
8366
8367 /* No more retrans to send */
8368 break;
8369 }
8370 }
8371#ifdef SCTP_AUDITING_ENABLED
8372 sctp_auditing(12, inp, stcb, NULL);
8373#endif
8374 /* Check for bad destinations, if they exist move chunks around. */
8375 burst_limit = asoc->max_burst;
8376 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
8377 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
8378 SCTP_ADDR_NOT_REACHABLE) {
8379 /*
8380 * if possible move things off of this address we
8381 * still may send below due to the dormant state but
8382 * we try to find an alternate address to send to
8383 * and if we have one we move all queued data on the
8384 * out wheel to this alternate address.
8385 */
8386 if (net->ref_count > 1)
8387 sctp_move_to_an_alt(stcb, asoc, net);
8388 } else {
8389 /*
8390 * if ((asoc->sat_network) || (net->addr_is_local))
8391 * { burst_limit = asoc->max_burst *
8392 * SCTP_SAT_NETWORK_BURST_INCR; }
8393 */
8394 if (sctp_use_cwnd_based_maxburst) {
8395 if ((net->flight_size + (burst_limit * net->mtu)) < net->cwnd) {
8396 int old_cwnd;
8397
8398 if (net->ssthresh < net->cwnd)
8399 net->ssthresh = net->cwnd;
8400 old_cwnd = net->cwnd;
8401 net->cwnd = (net->flight_size + (burst_limit * net->mtu));
8402
8403#ifdef SCTP_CWND_MONITOR
8404 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
8405#endif
8406
8407#ifdef SCTP_LOG_MAXBURST
8408 sctp_log_maxburst(stcb, net, 0, burst_limit, SCTP_MAX_BURST_APPLIED);
8409#endif
8410 SCTP_STAT_INCR(sctps_maxburstqueued);
8411 }
8412 net->fast_retran_ip = 0;
8413 } else {
8414 if (net->flight_size == 0) {
8415 /* Should be decaying the cwnd here */
8416 ;
8417 }
8418 }
8419 }
8420
8421 }
8422 burst_cnt = 0;
8423 cwnd_full = 0;
8424 do {
8425 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
8426 &reason_code, 0, &cwnd_full, from_where,
8427 &now, &now_filled, frag_point);
8428 if (error) {
8429#ifdef SCTP_DEBUG
8430 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
8431 printf("Error %d was returned from med-c-op\n", error);
8432 }
8433#endif
8434#ifdef SCTP_LOG_MAXBURST
8435 sctp_log_maxburst(stcb, asoc->primary_destination, error, burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
8436#endif
8437#ifdef SCTP_CWND_LOGGING
8438 sctp_log_cwnd(stcb, NULL, error, SCTP_SEND_NOW_COMPLETES);
8439 sctp_log_cwnd(stcb, NULL, 0xdeadbeef, SCTP_SEND_NOW_COMPLETES);
8440#endif
8441
8442 break;
8443 }
8444#ifdef SCTP_DEBUG
8445 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
8446 printf("m-c-o put out %d\n", num_out);
8447 }
8448#endif
8449 tot_out += num_out;
8450 burst_cnt++;
8451#ifdef SCTP_CWND_LOGGING
8452 sctp_log_cwnd(stcb, NULL, num_out, SCTP_SEND_NOW_COMPLETES);
8453 if (num_out == 0) {
8454 sctp_log_cwnd(stcb, NULL, reason_code, SCTP_SEND_NOW_COMPLETES);
8455 }
8456#endif
8457 if (nagle_on) {
8458 /*
8459 * When nagle is on, we look at how much is un_sent,
8460 * then if its smaller than an MTU and we have data
8461 * in flight we stop.
8462 */
8463 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
8464 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count)
8465 * sizeof(struct sctp_data_chunk)));
8466 if ((un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD)) &&
8467 (stcb->asoc.total_flight > 0)) {
8468 break;
8469 }
8470 }
8471 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
8472 TAILQ_EMPTY(&asoc->send_queue) &&
8473 TAILQ_EMPTY(&asoc->out_wheel)) {
8474 /* Nothing left to send */
8475 break;
8476 }
8477 if ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) <= 0) {
8478 /* Nothing left to send */
8479 break;
8480 }
8481 } while (num_out && (sctp_use_cwnd_based_maxburst ||
8482 (burst_cnt < burst_limit)));
8483
8484 if (sctp_use_cwnd_based_maxburst == 0) {
8485 if (burst_cnt >= burst_limit) {
8486 SCTP_STAT_INCR(sctps_maxburstqueued);
8487 asoc->burst_limit_applied = 1;
8488#ifdef SCTP_LOG_MAXBURST
8489 sctp_log_maxburst(stcb, asoc->primary_destination, 0, burst_cnt, SCTP_MAX_BURST_APPLIED);
8490#endif
8491 } else {
8492 asoc->burst_limit_applied = 0;
8493 }
8494 }
8495#ifdef SCTP_CWND_LOGGING
8496 sctp_log_cwnd(stcb, NULL, tot_out, SCTP_SEND_NOW_COMPLETES);
8497#endif
8498#ifdef SCTP_DEBUG
8499 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
8500 printf("Ok, we have put out %d chunks\n", tot_out);
8501 }
8502#endif
8503 /*
8504 * Now we need to clean up the control chunk chain if a ECNE is on
8505 * it. It must be marked as UNSENT again so next call will continue
8506 * to send it until such time that we get a CWR, to remove it.
8507 */
8508 if (stcb->asoc.ecn_echo_cnt_onq)
8509 sctp_fix_ecn_echo(asoc);
8510 return (error);
8511}
8512
8513
8514int
8515sctp_output(inp, m, addr, control, p, flags)
8516 struct sctp_inpcb *inp;
8517 struct mbuf *m;
8518 struct sockaddr *addr;
8519 struct mbuf *control;
8520
8521 struct thread *p;
8522 int flags;
8523{
8524 if (inp == NULL) {
8525 return (EINVAL);
8526 }
8527 if (inp->sctp_socket == NULL) {
8528 return (EINVAL);
8529 }
8530 return (sctp_sosend(inp->sctp_socket,
8531 addr,
8532 (struct uio *)NULL,
8533 m,
8534 control,
8535 flags,
8536 p));
8537}
8538
8539void
8540send_forward_tsn(struct sctp_tcb *stcb,
8541 struct sctp_association *asoc)
8542{
8543 struct sctp_tmit_chunk *chk;
8544 struct sctp_forward_tsn_chunk *fwdtsn;
8545
8546 SCTP_TCB_LOCK_ASSERT(stcb);
8547 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8548 if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN) {
8549 /* mark it to unsent */
8550 chk->sent = SCTP_DATAGRAM_UNSENT;
8551 chk->snd_count = 0;
8552 /* Do we correct its output location? */
8553 if (chk->whoTo != asoc->primary_destination) {
8554 sctp_free_remote_addr(chk->whoTo);
8555 chk->whoTo = asoc->primary_destination;
8556 atomic_add_int(&chk->whoTo->ref_count, 1);
8557 }
8558 goto sctp_fill_in_rest;
8559 }
8560 }
8561 /* Ok if we reach here we must build one */
8562 sctp_alloc_a_chunk(stcb, chk);
8563 if (chk == NULL) {
8564 return;
8565 }
8566 chk->copy_by_ref = 0;
8567 chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
8568 chk->rec.chunk_id.can_take_data = 0;
8569 chk->asoc = asoc;
8570 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
8571 if (chk->data == NULL) {
8572 atomic_subtract_int(&chk->whoTo->ref_count, 1);
8573 sctp_free_a_chunk(stcb, chk);
8574 return;
8575 }
8576 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
8577 chk->sent = SCTP_DATAGRAM_UNSENT;
8578 chk->snd_count = 0;
8579 chk->whoTo = asoc->primary_destination;
8580 atomic_add_int(&chk->whoTo->ref_count, 1);
8581 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
8582 asoc->ctrl_queue_cnt++;
8583sctp_fill_in_rest:
8584 /*
8585 * Here we go through and fill out the part that deals with
8586 * stream/seq of the ones we skip.
8587 */
8588 SCTP_BUF_LEN(chk->data) = 0;
8589 {
8590 struct sctp_tmit_chunk *at, *tp1, *last;
8591 struct sctp_strseq *strseq;
8592 unsigned int cnt_of_space, i, ovh;
8593 unsigned int space_needed;
8594 unsigned int cnt_of_skipped = 0;
8595
8596 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
8597 if (at->sent != SCTP_FORWARD_TSN_SKIP) {
8598 /* no more to look at */
8599 break;
8600 }
8601 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
8602 /* We don't report these */
8603 continue;
8604 }
8605 cnt_of_skipped++;
8606 }
8607 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
8608 (cnt_of_skipped * sizeof(struct sctp_strseq)));
8609
8610 cnt_of_space = M_TRAILINGSPACE(chk->data);
8611
8612 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
8613 ovh = SCTP_MIN_OVERHEAD;
8614 } else {
8615 ovh = SCTP_MIN_V4_OVERHEAD;
8616 }
8617 if (cnt_of_space > (asoc->smallest_mtu - ovh)) {
8618 /* trim to a mtu size */
8619 cnt_of_space = asoc->smallest_mtu - ovh;
8620 }
8621 if (cnt_of_space < space_needed) {
8622 /*
8623 * ok we must trim down the chunk by lowering the
8624 * advance peer ack point.
8625 */
8626 cnt_of_skipped = (cnt_of_space -
8627 ((sizeof(struct sctp_forward_tsn_chunk)) /
8628 sizeof(struct sctp_strseq)));
8629 /*
8630 * Go through and find the TSN that will be the one
8631 * we report.
8632 */
8633 at = TAILQ_FIRST(&asoc->sent_queue);
8634 for (i = 0; i < cnt_of_skipped; i++) {
8635 tp1 = TAILQ_NEXT(at, sctp_next);
8636 at = tp1;
8637 }
8638 last = at;
8639 /*
8640 * last now points to last one I can report, update
8641 * peer ack point
8642 */
8643 asoc->advanced_peer_ack_point = last->rec.data.TSN_seq;
8644 space_needed -= (cnt_of_skipped * sizeof(struct sctp_strseq));
8645 }
8646 chk->send_size = space_needed;
8647 /* Setup the chunk */
8648 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
8649 fwdtsn->ch.chunk_length = htons(chk->send_size);
8650 fwdtsn->ch.chunk_flags = 0;
8651 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
8652 fwdtsn->new_cumulative_tsn = htonl(asoc->advanced_peer_ack_point);
8653 chk->send_size = (sizeof(struct sctp_forward_tsn_chunk) +
8654 (cnt_of_skipped * sizeof(struct sctp_strseq)));
8655 SCTP_BUF_LEN(chk->data) = chk->send_size;
8656 fwdtsn++;
8657 /*
8658 * Move pointer to after the fwdtsn and transfer to the
8659 * strseq pointer.
8660 */
8661 strseq = (struct sctp_strseq *)fwdtsn;
8662 /*
8663 * Now populate the strseq list. This is done blindly
8664 * without pulling out duplicate stream info. This is
8665 * inefficent but won't harm the process since the peer will
8666 * look at these in sequence and will thus release anything.
8667 * It could mean we exceed the PMTU and chop off some that
8668 * we could have included.. but this is unlikely (aka 1432/4
8669 * would mean 300+ stream seq's would have to be reported in
8670 * one FWD-TSN. With a bit of work we can later FIX this to
8671 * optimize and pull out duplcates.. but it does add more
8672 * overhead. So for now... not!
8673 */
8674 at = TAILQ_FIRST(&asoc->sent_queue);
8675 for (i = 0; i < cnt_of_skipped; i++) {
8676 tp1 = TAILQ_NEXT(at, sctp_next);
8677 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
8678 /* We don't report these */
8679 i--;
8680 at = tp1;
8681 continue;
8682 }
8683 strseq->stream = ntohs(at->rec.data.stream_number);
8684 strseq->sequence = ntohs(at->rec.data.stream_seq);
8685 strseq++;
8686 at = tp1;
8687 }
8688 }
8689 return;
8690
8691}
8692
8693void
8694sctp_send_sack(struct sctp_tcb *stcb)
8695{
8696 /*
8697 * Queue up a SACK in the control queue. We must first check to see
8698 * if a SACK is somehow on the control queue. If so, we will take
8699 * and and remove the old one.
8700 */
8701 struct sctp_association *asoc;
8702 struct sctp_tmit_chunk *chk, *a_chk;
8703 struct sctp_sack_chunk *sack;
8704 struct sctp_gap_ack_block *gap_descriptor;
8705 struct sack_track *selector;
8706 int mergeable = 0;
8707 int offset;
8708 caddr_t limit;
8709 uint32_t *dup;
8710 int limit_reached = 0;
8711 unsigned int i, jstart, siz, j;
8712 unsigned int num_gap_blocks = 0, space;
8713 int num_dups = 0;
8714 int space_req;
8715
8716
8717 a_chk = NULL;
8718 asoc = &stcb->asoc;
8719 SCTP_TCB_LOCK_ASSERT(stcb);
8720 if (asoc->last_data_chunk_from == NULL) {
8721 /* Hmm we never received anything */
8722 return;
8723 }
8724 sctp_set_rwnd(stcb, asoc);
8725 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8726 if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) {
8727 /* Hmm, found a sack already on queue, remove it */
8728 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
8729 asoc->ctrl_queue_cnt++;
8730 a_chk = chk;
8731 if (a_chk->data) {
8732 sctp_m_freem(a_chk->data);
8733 a_chk->data = NULL;
8734 }
8735 sctp_free_remote_addr(a_chk->whoTo);
8736 a_chk->whoTo = NULL;
8737 break;
8738 }
8739 }
8740 if (a_chk == NULL) {
8741 sctp_alloc_a_chunk(stcb, a_chk);
8742 if (a_chk == NULL) {
8743 /* No memory so we drop the idea, and set a timer */
7529 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
7530 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
7531 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
7532 stcb->sctp_ep, stcb, NULL);
8744 if (stcb->asoc.delayed_ack) {
8745 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8746 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
8747 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
8748 stcb->sctp_ep, stcb, NULL);
8749 } else {
8750 stcb->asoc.send_sack = 1;
8751 }
7533 return;
7534 }
7535 a_chk->copy_by_ref = 0;
7536 /* a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; */
7537 a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK;
7538 a_chk->rec.chunk_id.can_take_data = 1;
7539 }
8752 return;
8753 }
8754 a_chk->copy_by_ref = 0;
8755 /* a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; */
8756 a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK;
8757 a_chk->rec.chunk_id.can_take_data = 1;
8758 }
8759 /* Clear our pkt counts */
8760 asoc->data_pkts_seen = 0;
8761
7540 a_chk->asoc = asoc;
7541 a_chk->snd_count = 0;
7542 a_chk->send_size = 0; /* fill in later */
7543 a_chk->sent = SCTP_DATAGRAM_UNSENT;
7544
7545 if ((asoc->numduptsns) ||
7546 (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)
7547 ) {
7548 /*
7549 * Ok, we have some duplicates or the destination for the
7550 * sack is unreachable, lets see if we can select an
7551 * alternate than asoc->last_data_chunk_from
7552 */
7553 if ((!(asoc->last_data_chunk_from->dest_state &
7554 SCTP_ADDR_NOT_REACHABLE)) &&
7555 (asoc->used_alt_onsack > asoc->numnets)) {
7556 /* We used an alt last time, don't this time */
7557 a_chk->whoTo = NULL;
7558 } else {
7559 asoc->used_alt_onsack++;
7560 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
7561 }
7562 if (a_chk->whoTo == NULL) {
7563 /* Nope, no alternate */
7564 a_chk->whoTo = asoc->last_data_chunk_from;
7565 asoc->used_alt_onsack = 0;
7566 }
7567 } else {
7568 /*
7569 * No duplicates so we use the last place we received data
7570 * from.
7571 */
7572 asoc->used_alt_onsack = 0;
7573 a_chk->whoTo = asoc->last_data_chunk_from;
7574 }
7575 if (a_chk->whoTo) {
7576 atomic_add_int(&a_chk->whoTo->ref_count, 1);
7577 }
7578 if (asoc->highest_tsn_inside_map == asoc->cumulative_tsn) {
7579 /* no gaps */
7580 space_req = sizeof(struct sctp_sack_chunk);
7581 } else {
7582 /* gaps get a cluster */
7583 space_req = MCLBYTES;
7584 }
7585 /* Ok now lets formulate a MBUF with our sack */
7586 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA);
7587 if ((a_chk->data == NULL) ||
7588 (a_chk->whoTo == NULL)) {
7589 /* rats, no mbuf memory */
7590 if (a_chk->data) {
7591 /* was a problem with the destination */
7592 sctp_m_freem(a_chk->data);
7593 a_chk->data = NULL;
7594 }
7595 if (a_chk->whoTo)
7596 atomic_subtract_int(&a_chk->whoTo->ref_count, 1);
7597 sctp_free_a_chunk(stcb, a_chk);
8762 a_chk->asoc = asoc;
8763 a_chk->snd_count = 0;
8764 a_chk->send_size = 0; /* fill in later */
8765 a_chk->sent = SCTP_DATAGRAM_UNSENT;
8766
8767 if ((asoc->numduptsns) ||
8768 (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)
8769 ) {
8770 /*
8771 * Ok, we have some duplicates or the destination for the
8772 * sack is unreachable, lets see if we can select an
8773 * alternate than asoc->last_data_chunk_from
8774 */
8775 if ((!(asoc->last_data_chunk_from->dest_state &
8776 SCTP_ADDR_NOT_REACHABLE)) &&
8777 (asoc->used_alt_onsack > asoc->numnets)) {
8778 /* We used an alt last time, don't this time */
8779 a_chk->whoTo = NULL;
8780 } else {
8781 asoc->used_alt_onsack++;
8782 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
8783 }
8784 if (a_chk->whoTo == NULL) {
8785 /* Nope, no alternate */
8786 a_chk->whoTo = asoc->last_data_chunk_from;
8787 asoc->used_alt_onsack = 0;
8788 }
8789 } else {
8790 /*
8791 * No duplicates so we use the last place we received data
8792 * from.
8793 */
8794 asoc->used_alt_onsack = 0;
8795 a_chk->whoTo = asoc->last_data_chunk_from;
8796 }
8797 if (a_chk->whoTo) {
8798 atomic_add_int(&a_chk->whoTo->ref_count, 1);
8799 }
8800 if (asoc->highest_tsn_inside_map == asoc->cumulative_tsn) {
8801 /* no gaps */
8802 space_req = sizeof(struct sctp_sack_chunk);
8803 } else {
8804 /* gaps get a cluster */
8805 space_req = MCLBYTES;
8806 }
8807 /* Ok now lets formulate a MBUF with our sack */
8808 a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA);
8809 if ((a_chk->data == NULL) ||
8810 (a_chk->whoTo == NULL)) {
8811 /* rats, no mbuf memory */
8812 if (a_chk->data) {
8813 /* was a problem with the destination */
8814 sctp_m_freem(a_chk->data);
8815 a_chk->data = NULL;
8816 }
8817 if (a_chk->whoTo)
8818 atomic_subtract_int(&a_chk->whoTo->ref_count, 1);
8819 sctp_free_a_chunk(stcb, a_chk);
7598 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
7599 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
7600 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
7601 stcb->sctp_ep, stcb, NULL);
8820 if (stcb->asoc.delayed_ack) {
8821 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8822 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
8823 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
8824 stcb->sctp_ep, stcb, NULL);
8825 } else {
8826 stcb->asoc.send_sack = 1;
8827 }
7602 return;
7603 }
7604 /* ok, lets go through and fill it in */
7605 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
7606 space = M_TRAILINGSPACE(a_chk->data);
7607 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
7608 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
7609 }
7610 limit = mtod(a_chk->data, caddr_t);
7611 limit += space;
7612
7613 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
7614 sack->ch.chunk_type = SCTP_SELECTIVE_ACK;
7615 /* 0x01 is used by nonce for ecn */
7616 if ((sctp_ecn_enable) &&
7617 (sctp_ecn_nonce) &&
7618 (asoc->peer_supports_ecn_nonce))
7619 sack->ch.chunk_flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM);
7620 else
7621 sack->ch.chunk_flags = 0;
7622
7623 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
7624 /*
7625 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
7626 * received, then set high bit to 1, else 0. Reset
7627 * pkts_rcvd.
7628 */
7629 sack->ch.chunk_flags |= (asoc->cmt_dac_pkts_rcvd << 6);
7630 asoc->cmt_dac_pkts_rcvd = 0;
7631 }
7632 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
7633 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
7634 asoc->my_last_reported_rwnd = asoc->my_rwnd;
7635
7636 /* reset the readers interpretation */
7637 stcb->freed_by_sorcv_sincelast = 0;
7638
7639 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
7640
7641
7642 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
7643 if (asoc->cumulative_tsn < asoc->mapping_array_base_tsn) {
7644 offset = 1;
7645 /*
7646 * cum-ack behind the mapping array, so we start and use all
7647 * entries.
7648 */
7649 jstart = 0;
7650 } else {
7651 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
7652 /*
7653 * we skip the first one when the cum-ack is at or above the
7654 * mapping array base.
7655 */
7656 jstart = 1;
7657 }
7658 if (compare_with_wrap(asoc->highest_tsn_inside_map, asoc->cumulative_tsn, MAX_TSN)) {
7659 /* we have a gap .. maybe */
7660 for (i = 0; i < siz; i++) {
7661 selector = &sack_array[asoc->mapping_array[i]];
7662 if (mergeable && selector->right_edge) {
7663 /*
7664 * Backup, left and right edges were ok to
7665 * merge.
7666 */
7667 num_gap_blocks--;
7668 gap_descriptor--;
7669 }
7670 if (selector->num_entries == 0)
7671 mergeable = 0;
7672 else {
7673 for (j = jstart; j < selector->num_entries; j++) {
7674 if (mergeable && selector->right_edge) {
7675 /*
7676 * do a merge by NOT setting
7677 * the left side
7678 */
7679 mergeable = 0;
7680 } else {
7681 /*
7682 * no merge, set the left
7683 * side
7684 */
7685 mergeable = 0;
7686 gap_descriptor->start = htons((selector->gaps[j].start + offset));
7687 }
7688 gap_descriptor->end = htons((selector->gaps[j].end + offset));
7689 num_gap_blocks++;
7690 gap_descriptor++;
7691 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
7692 /* no more room */
7693 limit_reached = 1;
7694 break;
7695 }
7696 }
7697 if (selector->left_edge) {
7698 mergeable = 1;
7699 }
7700 }
7701 if (limit_reached) {
7702 /* Reached the limit stop */
7703 break;
7704 }
7705 jstart = 0;
7706 offset += 8;
7707 }
7708 if (num_gap_blocks == 0) {
7709 /* reneged all chunks */
7710 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
7711 }
7712 }
7713 /* now we must add any dups we are going to report. */
7714 if ((limit_reached == 0) && (asoc->numduptsns)) {
7715 dup = (uint32_t *) gap_descriptor;
7716 for (i = 0; i < asoc->numduptsns; i++) {
7717 *dup = htonl(asoc->dup_tsns[i]);
7718 dup++;
7719 num_dups++;
7720 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
7721 /* no more room */
7722 break;
7723 }
7724 }
7725 asoc->numduptsns = 0;
7726 }
7727 /*
7728 * now that the chunk is prepared queue it to the control chunk
7729 * queue.
7730 */
7731 a_chk->send_size = (sizeof(struct sctp_sack_chunk) +
7732 (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) +
7733 (num_dups * sizeof(int32_t)));
7734 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
7735 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
7736 sack->sack.num_dup_tsns = htons(num_dups);
7737 sack->ch.chunk_length = htons(a_chk->send_size);
7738 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
7739 asoc->ctrl_queue_cnt++;
8828 return;
8829 }
8830 /* ok, lets go through and fill it in */
8831 SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
8832 space = M_TRAILINGSPACE(a_chk->data);
8833 if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
8834 space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
8835 }
8836 limit = mtod(a_chk->data, caddr_t);
8837 limit += space;
8838
8839 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
8840 sack->ch.chunk_type = SCTP_SELECTIVE_ACK;
8841 /* 0x01 is used by nonce for ecn */
8842 if ((sctp_ecn_enable) &&
8843 (sctp_ecn_nonce) &&
8844 (asoc->peer_supports_ecn_nonce))
8845 sack->ch.chunk_flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM);
8846 else
8847 sack->ch.chunk_flags = 0;
8848
8849 if (sctp_cmt_on_off && sctp_cmt_use_dac) {
8850 /*
8851 * CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
8852 * received, then set high bit to 1, else 0. Reset
8853 * pkts_rcvd.
8854 */
8855 sack->ch.chunk_flags |= (asoc->cmt_dac_pkts_rcvd << 6);
8856 asoc->cmt_dac_pkts_rcvd = 0;
8857 }
8858 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
8859 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
8860 asoc->my_last_reported_rwnd = asoc->my_rwnd;
8861
8862 /* reset the readers interpretation */
8863 stcb->freed_by_sorcv_sincelast = 0;
8864
8865 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
8866
8867
8868 siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
8869 if (asoc->cumulative_tsn < asoc->mapping_array_base_tsn) {
8870 offset = 1;
8871 /*
8872 * cum-ack behind the mapping array, so we start and use all
8873 * entries.
8874 */
8875 jstart = 0;
8876 } else {
8877 offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
8878 /*
8879 * we skip the first one when the cum-ack is at or above the
8880 * mapping array base.
8881 */
8882 jstart = 1;
8883 }
8884 if (compare_with_wrap(asoc->highest_tsn_inside_map, asoc->cumulative_tsn, MAX_TSN)) {
8885 /* we have a gap .. maybe */
8886 for (i = 0; i < siz; i++) {
8887 selector = &sack_array[asoc->mapping_array[i]];
8888 if (mergeable && selector->right_edge) {
8889 /*
8890 * Backup, left and right edges were ok to
8891 * merge.
8892 */
8893 num_gap_blocks--;
8894 gap_descriptor--;
8895 }
8896 if (selector->num_entries == 0)
8897 mergeable = 0;
8898 else {
8899 for (j = jstart; j < selector->num_entries; j++) {
8900 if (mergeable && selector->right_edge) {
8901 /*
8902 * do a merge by NOT setting
8903 * the left side
8904 */
8905 mergeable = 0;
8906 } else {
8907 /*
8908 * no merge, set the left
8909 * side
8910 */
8911 mergeable = 0;
8912 gap_descriptor->start = htons((selector->gaps[j].start + offset));
8913 }
8914 gap_descriptor->end = htons((selector->gaps[j].end + offset));
8915 num_gap_blocks++;
8916 gap_descriptor++;
8917 if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
8918 /* no more room */
8919 limit_reached = 1;
8920 break;
8921 }
8922 }
8923 if (selector->left_edge) {
8924 mergeable = 1;
8925 }
8926 }
8927 if (limit_reached) {
8928 /* Reached the limit stop */
8929 break;
8930 }
8931 jstart = 0;
8932 offset += 8;
8933 }
8934 if (num_gap_blocks == 0) {
8935 /* reneged all chunks */
8936 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
8937 }
8938 }
8939 /* now we must add any dups we are going to report. */
8940 if ((limit_reached == 0) && (asoc->numduptsns)) {
8941 dup = (uint32_t *) gap_descriptor;
8942 for (i = 0; i < asoc->numduptsns; i++) {
8943 *dup = htonl(asoc->dup_tsns[i]);
8944 dup++;
8945 num_dups++;
8946 if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
8947 /* no more room */
8948 break;
8949 }
8950 }
8951 asoc->numduptsns = 0;
8952 }
8953 /*
8954 * now that the chunk is prepared queue it to the control chunk
8955 * queue.
8956 */
8957 a_chk->send_size = (sizeof(struct sctp_sack_chunk) +
8958 (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) +
8959 (num_dups * sizeof(int32_t)));
8960 SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
8961 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
8962 sack->sack.num_dup_tsns = htons(num_dups);
8963 sack->ch.chunk_length = htons(a_chk->send_size);
8964 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
8965 asoc->ctrl_queue_cnt++;
8966 asoc->send_sack = 0;
7740 SCTP_STAT_INCR(sctps_sendsacks);
7741 return;
7742}
7743
7744
7745void
7746sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr)
7747{
7748 struct mbuf *m_abort;
7749 struct mbuf *m_out = NULL, *m_end = NULL;
7750 struct sctp_abort_chunk *abort = NULL;
7751 int sz;
7752 uint32_t auth_offset = 0;
7753 struct sctp_auth_chunk *auth = NULL;
7754 struct sctphdr *shdr;
7755
7756 /*
7757 * Add an AUTH chunk, if chunk requires it and save the offset into
7758 * the chain for AUTH
7759 */
7760 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
7761 stcb->asoc.peer_auth_chunks)) {
7762 m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset,
7763 stcb, SCTP_ABORT_ASSOCIATION);
7764 }
7765 SCTP_TCB_LOCK_ASSERT(stcb);
7766 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
7767 if (m_abort == NULL) {
7768 /* no mbuf's */
7769 if (m_out)
7770 sctp_m_freem(m_out);
7771 return;
7772 }
7773 /* link in any error */
7774 SCTP_BUF_NEXT(m_abort) = operr;
7775 sz = 0;
7776 if (operr) {
7777 struct mbuf *n;
7778
7779 n = operr;
7780 while (n) {
7781 sz += SCTP_BUF_LEN(n);
7782 n = SCTP_BUF_NEXT(n);
7783 }
7784 }
7785 SCTP_BUF_LEN(m_abort) = sizeof(*abort);
7786 if (m_out == NULL) {
7787 /* NO Auth chunk prepended, so reserve space in front */
7788 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
7789 m_out = m_abort;
7790 } else {
7791 /* Put AUTH chunk at the front of the chain */
7792 SCTP_BUF_NEXT(m_end) = m_abort;
7793 }
7794
7795 /* fill in the ABORT chunk */
7796 abort = mtod(m_abort, struct sctp_abort_chunk *);
7797 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
7798 abort->ch.chunk_flags = 0;
7799 abort->ch.chunk_length = htons(sizeof(*abort) + sz);
7800
7801 /* prepend and fill in the SCTP header */
7802 SCTP_BUF_PREPEND(m_out, sizeof(struct sctphdr), M_DONTWAIT);
7803 if (m_out == NULL) {
7804 /* TSNH: no memory */
7805 return;
7806 }
7807 shdr = mtod(m_out, struct sctphdr *);
7808 shdr->src_port = stcb->sctp_ep->sctp_lport;
7809 shdr->dest_port = stcb->rport;
7810 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
7811 shdr->checksum = 0;
7812 auth_offset += sizeof(struct sctphdr);
7813
7814 sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb,
7815 stcb->asoc.primary_destination,
7816 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr,
7817 m_out, auth_offset, auth, 1, 0, NULL, 0);
7818 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7819}
7820
7821int
7822sctp_send_shutdown_complete(struct sctp_tcb *stcb,
7823 struct sctp_nets *net)
7824{
7825 /* formulate and SEND a SHUTDOWN-COMPLETE */
7826 struct mbuf *m_shutdown_comp;
7827 struct sctp_shutdown_complete_msg *comp_cp;
7828
7829 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_complete_msg), 0, M_DONTWAIT, 1, MT_HEADER);
7830 if (m_shutdown_comp == NULL) {
7831 /* no mbuf's */
7832 return (-1);
7833 }
7834 comp_cp = mtod(m_shutdown_comp, struct sctp_shutdown_complete_msg *);
7835 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
7836 comp_cp->shut_cmp.ch.chunk_flags = 0;
7837 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
7838 comp_cp->sh.src_port = stcb->sctp_ep->sctp_lport;
7839 comp_cp->sh.dest_port = stcb->rport;
7840 comp_cp->sh.v_tag = htonl(stcb->asoc.peer_vtag);
7841 comp_cp->sh.checksum = 0;
7842
7843 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_msg);
7844 sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
7845 (struct sockaddr *)&net->ro._l_addr,
7846 m_shutdown_comp, 0, NULL, 1, 0, NULL, 0);
7847 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7848 return (0);
7849}
7850
7851int
7852sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh)
7853{
7854 /* formulate and SEND a SHUTDOWN-COMPLETE */
7855 struct mbuf *o_pak;
7856 struct mbuf *mout;
7857 struct ip *iph, *iph_out;
7858 struct ip6_hdr *ip6, *ip6_out;
7859 int offset_out, len;
7860 struct sctp_shutdown_complete_msg *comp_cp;
7861
7862 /* Get room for the largest message */
7863 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg));
7864
7865 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(len);
7866 if (o_pak == NULL) {
7867 /* no mbuf's */
7868 return (-1);
7869 }
7870 mout = SCTP_HEADER_TO_CHAIN(o_pak);
7871 iph = mtod(m, struct ip *);
7872 iph_out = NULL;
7873 ip6_out = NULL;
7874 offset_out = 0;
7875 if (iph->ip_v == IPVERSION) {
7876 SCTP_BUF_LEN(mout) = sizeof(struct ip) +
7877 sizeof(struct sctp_shutdown_complete_msg);
7878 SCTP_BUF_NEXT(mout) = NULL;
7879 iph_out = mtod(mout, struct ip *);
7880
7881 /* Fill in the IP header for the ABORT */
7882 iph_out->ip_v = IPVERSION;
7883 iph_out->ip_hl = (sizeof(struct ip) / 4);
7884 iph_out->ip_tos = (u_char)0;
7885 iph_out->ip_id = 0;
7886 iph_out->ip_off = 0;
7887 iph_out->ip_ttl = MAXTTL;
7888 iph_out->ip_p = IPPROTO_SCTP;
7889 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
7890 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
7891
7892 /* let IP layer calculate this */
7893 iph_out->ip_sum = 0;
7894 offset_out += sizeof(*iph_out);
7895 comp_cp = (struct sctp_shutdown_complete_msg *)(
7896 (caddr_t)iph_out + offset_out);
7897 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
7898 ip6 = (struct ip6_hdr *)iph;
7899 SCTP_BUF_LEN(mout) = sizeof(struct ip6_hdr) +
7900 sizeof(struct sctp_shutdown_complete_msg);
7901 SCTP_BUF_NEXT(mout) = NULL;
7902 ip6_out = mtod(mout, struct ip6_hdr *);
7903
7904 /* Fill in the IPv6 header for the ABORT */
7905 ip6_out->ip6_flow = ip6->ip6_flow;
7906 ip6_out->ip6_hlim = ip6_defhlim;
7907 ip6_out->ip6_nxt = IPPROTO_SCTP;
7908 ip6_out->ip6_src = ip6->ip6_dst;
7909 ip6_out->ip6_dst = ip6->ip6_src;
7910 /*
7911 * ?? The old code had both the iph len + payload, I think
7912 * this is wrong and would never have worked
7913 */
7914 ip6_out->ip6_plen = sizeof(struct sctp_shutdown_complete_msg);
7915 offset_out += sizeof(*ip6_out);
7916 comp_cp = (struct sctp_shutdown_complete_msg *)(
7917 (caddr_t)ip6_out + offset_out);
7918 } else {
7919 /* Currently not supported. */
7920 return (-1);
7921 }
7922
7923 SCTP_HEADER_LEN(o_pak) = SCTP_BUF_LEN(mout);
7924 /* Now copy in and fill in the ABORT tags etc. */
7925 comp_cp->sh.src_port = sh->dest_port;
7926 comp_cp->sh.dest_port = sh->src_port;
7927 comp_cp->sh.checksum = 0;
7928 comp_cp->sh.v_tag = sh->v_tag;
7929 comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB;
7930 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
7931 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
7932
7933 /* add checksum */
7934 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(o_pak)) {
7935 comp_cp->sh.checksum = 0;
7936 } else {
7937 comp_cp->sh.checksum = sctp_calculate_sum(mout, NULL, offset_out);
7938 }
7939 if (iph_out != NULL) {
7940 struct route ro;
7941
7942 bzero(&ro, sizeof ro);
7943 /* set IPv4 length */
7944 iph_out->ip_len = SCTP_HEADER_LEN(o_pak);
7945 /* out it goes */
7946 ip_output(o_pak, 0, &ro, IP_RAWOUTPUT, NULL
7947 ,NULL
7948 );
7949 /* Free the route if we got one back */
7950 if (ro.ro_rt)
7951 RTFREE(ro.ro_rt);
7952 } else if (ip6_out != NULL) {
7953 struct route_in6 ro;
7954
7955 bzero(&ro, sizeof(ro));
7956 ip6_output(o_pak, NULL, &ro, 0, NULL, NULL
7957 ,NULL
7958 );
7959 /* Free the route if we got one back */
7960 if (ro.ro_rt)
7961 RTFREE(ro.ro_rt);
7962 }
7963 SCTP_STAT_INCR(sctps_sendpackets);
7964 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
7965 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
7966 return (0);
7967}
7968
7969static struct sctp_nets *
7970sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now)
7971{
7972 struct sctp_nets *net, *hnet;
7973 int ms_goneby, highest_ms, state_overide = 0;
7974
7975 SCTP_GETTIME_TIMEVAL(now);
7976 highest_ms = 0;
7977 hnet = NULL;
7978 SCTP_TCB_LOCK_ASSERT(stcb);
7979 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
7980 if (
7981 ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) ||
7982 (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)
7983 ) {
7984 /*
7985 * Skip this guy from consideration if HB is off AND
7986 * its confirmed
7987 */
7988 continue;
7989 }
7990 if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) {
7991 /* skip this dest net from consideration */
7992 continue;
7993 }
7994 if (net->last_sent_time.tv_sec) {
7995 /* Sent to so we subtract */
7996 ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000;
7997 } else
7998 /* Never been sent to */
7999 ms_goneby = 0x7fffffff;
8000 /*
8001 * When the address state is unconfirmed but still
8002 * considered reachable, we HB at a higher rate. Once it
8003 * goes confirmed OR reaches the "unreachable" state, thenw
8004 * we cut it back to HB at a more normal pace.
8005 */
8006 if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) {
8007 state_overide = 1;
8008 } else {
8009 state_overide = 0;
8010 }
8011
8012 if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) &&
8013 (ms_goneby > highest_ms)) {
8014 highest_ms = ms_goneby;
8015 hnet = net;
8016 }
8017 }
8018 if (hnet &&
8019 ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) {
8020 state_overide = 1;
8021 } else {
8022 state_overide = 0;
8023 }
8024
8025 if (highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) {
8026 /*
8027 * Found the one with longest delay bounds OR it is
8028 * unconfirmed and still not marked unreachable.
8029 */
8030#ifdef SCTP_DEBUG
8031 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
8032 printf("net:%p is the hb winner -",
8033 hnet);
8034 if (hnet)
8035 sctp_print_address((struct sockaddr *)&hnet->ro._l_addr);
8036 else
8037 printf(" none\n");
8038 }
8039#endif
8040 /* update the timer now */
8041 hnet->last_sent_time = *now;
8042 return (hnet);
8043 }
8044 /* Nothing to HB */
8045 return (NULL);
8046}
8047
8048int
8049sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
8050{
8051 struct sctp_tmit_chunk *chk;
8052 struct sctp_nets *net;
8053 struct sctp_heartbeat_chunk *hb;
8054 struct timeval now;
8055 struct sockaddr_in *sin;
8056 struct sockaddr_in6 *sin6;
8057
8058 SCTP_TCB_LOCK_ASSERT(stcb);
8059 if (user_req == 0) {
8060 net = sctp_select_hb_destination(stcb, &now);
8061 if (net == NULL) {
8062 /*
8063 * All our busy none to send to, just start the
8064 * timer again.
8065 */
8066 if (stcb->asoc.state == 0) {
8067 return (0);
8068 }
8069 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
8070 stcb->sctp_ep,
8071 stcb,
8072 net);
8073 return (0);
8074 }
8075 } else {
8076 net = u_net;
8077 if (net == NULL) {
8078 return (0);
8079 }
8080 SCTP_GETTIME_TIMEVAL(&now);
8081 }
8082 sin = (struct sockaddr_in *)&net->ro._l_addr;
8083 if (sin->sin_family != AF_INET) {
8084 if (sin->sin_family != AF_INET6) {
8085 /* huh */
8086 return (0);
8087 }
8088 }
8089 sctp_alloc_a_chunk(stcb, chk);
8090 if (chk == NULL) {
8091#ifdef SCTP_DEBUG
8092 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
8093 printf("Gak, can't get a chunk for hb\n");
8094 }
8095#endif
8096 return (0);
8097 }
8098 chk->copy_by_ref = 0;
8099 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
8100 chk->rec.chunk_id.can_take_data = 1;
8101 chk->asoc = &stcb->asoc;
8102 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
8103
8104 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
8105 if (chk->data == NULL) {
8106 sctp_free_a_chunk(stcb, chk);
8107 return (0);
8108 }
8109 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
8110 SCTP_BUF_LEN(chk->data) = chk->send_size;
8111 chk->sent = SCTP_DATAGRAM_UNSENT;
8112 chk->snd_count = 0;
8113 chk->whoTo = net;
8114 atomic_add_int(&chk->whoTo->ref_count, 1);
8115 /* Now we have a mbuf that we can fill in with the details */
8116 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
8117
8118 /* fill out chunk header */
8119 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
8120 hb->ch.chunk_flags = 0;
8121 hb->ch.chunk_length = htons(chk->send_size);
8122 /* Fill out hb parameter */
8123 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
8124 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
8125 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
8126 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
8127 /* Did our user request this one, put it in */
8128 hb->heartbeat.hb_info.user_req = user_req;
8129 hb->heartbeat.hb_info.addr_family = sin->sin_family;
8130 hb->heartbeat.hb_info.addr_len = sin->sin_len;
8131 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8132 /*
8133 * we only take from the entropy pool if the address is not
8134 * confirmed.
8135 */
8136 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
8137 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
8138 } else {
8139 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
8140 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
8141 }
8142 if (sin->sin_family == AF_INET) {
8143 memcpy(hb->heartbeat.hb_info.address, &sin->sin_addr, sizeof(sin->sin_addr));
8144 } else if (sin->sin_family == AF_INET6) {
8145 /* We leave the scope the way it is in our lookup table. */
8146 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
8147 memcpy(hb->heartbeat.hb_info.address, &sin6->sin6_addr, sizeof(sin6->sin6_addr));
8148 } else {
8149 /* huh compiler bug */
8150 return (0);
8151 }
8152 /* ok we have a destination that needs a beat */
8153 /* lets do the theshold management Qiaobing style */
8154
8155 if (sctp_threshold_management(stcb->sctp_ep, stcb, net,
8156 stcb->asoc.max_send_times)) {
8157 /*
8158 * we have lost the association, in a way this is quite bad
8159 * since we really are one less time since we really did not
8160 * send yet. This is the down side to the Q's style as
8161 * defined in the RFC and not my alternate style defined in
8162 * the RFC.
8163 */
8164 atomic_subtract_int(&chk->whoTo->ref_count, 1);
8165 if (chk->data != NULL) {
8166 sctp_m_freem(chk->data);
8167 chk->data = NULL;
8168 }
8169 sctp_free_a_chunk(stcb, chk);
8170 return (-1);
8171 }
8172 net->hb_responded = 0;
8173 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
8174 stcb->asoc.ctrl_queue_cnt++;
8175 SCTP_STAT_INCR(sctps_sendheartbeat);
8176 /*
8177 * Call directly med level routine to put out the chunk. It will
8178 * always tumble out control chunks aka HB but it may even tumble
8179 * out data too.
8180 */
8181 return (1);
8182}
8183
8184void
8185sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
8186 uint32_t high_tsn)
8187{
8188 struct sctp_association *asoc;
8189 struct sctp_ecne_chunk *ecne;
8190 struct sctp_tmit_chunk *chk;
8191
8192 asoc = &stcb->asoc;
8193 SCTP_TCB_LOCK_ASSERT(stcb);
8194 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8195 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
8196 /* found a previous ECN_ECHO update it if needed */
8197 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
8198 ecne->tsn = htonl(high_tsn);
8199 return;
8200 }
8201 }
8202 /* nope could not find one to update so we must build one */
8203 sctp_alloc_a_chunk(stcb, chk);
8204 if (chk == NULL) {
8205 return;
8206 }
8207 chk->copy_by_ref = 0;
8208 SCTP_STAT_INCR(sctps_sendecne);
8209 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
8210 chk->rec.chunk_id.can_take_data = 0;
8211 chk->asoc = &stcb->asoc;
8212 chk->send_size = sizeof(struct sctp_ecne_chunk);
8213 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
8214 if (chk->data == NULL) {
8215 sctp_free_a_chunk(stcb, chk);
8216 return;
8217 }
8218 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
8219 SCTP_BUF_LEN(chk->data) = chk->send_size;
8220 chk->sent = SCTP_DATAGRAM_UNSENT;
8221 chk->snd_count = 0;
8222 chk->whoTo = net;
8223 atomic_add_int(&chk->whoTo->ref_count, 1);
8224 stcb->asoc.ecn_echo_cnt_onq++;
8225 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
8226 ecne->ch.chunk_type = SCTP_ECN_ECHO;
8227 ecne->ch.chunk_flags = 0;
8228 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
8229 ecne->tsn = htonl(high_tsn);
8230 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
8231 asoc->ctrl_queue_cnt++;
8232}
8233
8234void
8235sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
8236 struct mbuf *m, int iphlen, int bad_crc)
8237{
8238 struct sctp_association *asoc;
8239 struct sctp_pktdrop_chunk *drp;
8240 struct sctp_tmit_chunk *chk;
8241 uint8_t *datap;
8242 int len;
8243 unsigned int small_one;
8244 struct ip *iph;
8245
8246 long spc;
8247
8248 asoc = &stcb->asoc;
8249 SCTP_TCB_LOCK_ASSERT(stcb);
8250 if (asoc->peer_supports_pktdrop == 0) {
8251 /*
8252 * peer must declare support before I send one.
8253 */
8254 return;
8255 }
8256 if (stcb->sctp_socket == NULL) {
8257 return;
8258 }
8259 sctp_alloc_a_chunk(stcb, chk);
8260 if (chk == NULL) {
8261 return;
8262 }
8263 chk->copy_by_ref = 0;
8264 iph = mtod(m, struct ip *);
8265 if (iph == NULL) {
8266 return;
8267 }
8268 if (iph->ip_v == IPVERSION) {
8269 /* IPv4 */
8270 len = chk->send_size = iph->ip_len;
8271 } else {
8272 struct ip6_hdr *ip6h;
8273
8274 /* IPv6 */
8275 ip6h = mtod(m, struct ip6_hdr *);
8276 len = chk->send_size = htons(ip6h->ip6_plen);
8277 }
8278 chk->asoc = &stcb->asoc;
8279 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
8280 if (chk->data == NULL) {
8281jump_out:
8282 sctp_free_a_chunk(stcb, chk);
8283 return;
8284 }
8285 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
8286 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
8287 if (drp == NULL) {
8288 sctp_m_freem(chk->data);
8289 chk->data = NULL;
8290 goto jump_out;
8291 }
8292 small_one = asoc->smallest_mtu;
8293 if (small_one > MCLBYTES) {
8294 /* Only one cluster worth of data MAX */
8295 small_one = MCLBYTES;
8296 }
8297 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
8298 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
8299 chk->book_size_scale = 0;
8300 if (chk->book_size > small_one) {
8301 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
8302 drp->trunc_len = htons(chk->send_size);
8303 chk->send_size = small_one - (SCTP_MED_OVERHEAD +
8304 sizeof(struct sctp_pktdrop_chunk) +
8305 sizeof(struct sctphdr));
8306 len = chk->send_size;
8307 } else {
8308 /* no truncation needed */
8309 drp->ch.chunk_flags = 0;
8310 drp->trunc_len = htons(0);
8311 }
8312 if (bad_crc) {
8313 drp->ch.chunk_flags |= SCTP_BADCRC;
8314 }
8315 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
8316 SCTP_BUF_LEN(chk->data) = chk->send_size;
8317 chk->sent = SCTP_DATAGRAM_UNSENT;
8318 chk->snd_count = 0;
8319 if (net) {
8320 /* we should hit here */
8321 chk->whoTo = net;
8322 } else {
8323 chk->whoTo = asoc->primary_destination;
8324 }
8325 atomic_add_int(&chk->whoTo->ref_count, 1);
8326 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
8327 chk->rec.chunk_id.can_take_data = 1;
8328 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
8329 drp->ch.chunk_length = htons(chk->send_size);
8330 spc = stcb->sctp_socket->so_rcv.sb_hiwat;
8331 if (spc < 0) {
8332 spc = 0;
8333 }
8334 drp->bottle_bw = htonl(spc);
8335 if (asoc->my_rwnd) {
8336 drp->current_onq = htonl(asoc->size_on_reasm_queue +
8337 asoc->size_on_all_streams +
8338 asoc->my_rwnd_control_len +
8339 stcb->sctp_socket->so_rcv.sb_cc);
8340 } else {
8341 /*
8342 * If my rwnd is 0, possibly from mbuf depletion as well as
8343 * space used, tell the peer there is NO space aka onq == bw
8344 */
8345 drp->current_onq = htonl(spc);
8346 }
8347 drp->reserved = 0;
8348 datap = drp->data;
8349 m_copydata(m, iphlen, len, (caddr_t)datap);
8350 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
8351 asoc->ctrl_queue_cnt++;
8352}
8353
8354void
8355sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn)
8356{
8357 struct sctp_association *asoc;
8358 struct sctp_cwr_chunk *cwr;
8359 struct sctp_tmit_chunk *chk;
8360
8361 asoc = &stcb->asoc;
8362 SCTP_TCB_LOCK_ASSERT(stcb);
8363 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8364 if (chk->rec.chunk_id.id == SCTP_ECN_CWR) {
8365 /* found a previous ECN_CWR update it if needed */
8366 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
8367 if (compare_with_wrap(high_tsn, ntohl(cwr->tsn),
8368 MAX_TSN)) {
8369 cwr->tsn = htonl(high_tsn);
8370 }
8371 return;
8372 }
8373 }
8374 /* nope could not find one to update so we must build one */
8375 sctp_alloc_a_chunk(stcb, chk);
8376 if (chk == NULL) {
8377 return;
8378 }
8379 chk->copy_by_ref = 0;
8380 chk->rec.chunk_id.id = SCTP_ECN_CWR;
8381 chk->rec.chunk_id.can_take_data = 1;
8382 chk->asoc = &stcb->asoc;
8383 chk->send_size = sizeof(struct sctp_cwr_chunk);
8384 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
8385 if (chk->data == NULL) {
8386 sctp_free_a_chunk(stcb, chk);
8387 return;
8388 }
8389 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
8390 SCTP_BUF_LEN(chk->data) = chk->send_size;
8391 chk->sent = SCTP_DATAGRAM_UNSENT;
8392 chk->snd_count = 0;
8393 chk->whoTo = net;
8394 atomic_add_int(&chk->whoTo->ref_count, 1);
8395 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
8396 cwr->ch.chunk_type = SCTP_ECN_CWR;
8397 cwr->ch.chunk_flags = 0;
8398 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
8399 cwr->tsn = htonl(high_tsn);
8400 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
8401 asoc->ctrl_queue_cnt++;
8402}
8403
8404void
8405sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
8406 int number_entries, uint16_t * list,
8407 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
8408{
8409 int len, old_len, i;
8410 struct sctp_stream_reset_out_request *req_out;
8411 struct sctp_chunkhdr *ch;
8412
8413 ch = mtod(chk->data, struct sctp_chunkhdr *);
8414
8415
8416 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
8417
8418 /* get to new offset for the param. */
8419 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
8420 /* now how long will this param be? */
8421 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
8422 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
8423 req_out->ph.param_length = htons(len);
8424 req_out->request_seq = htonl(seq);
8425 req_out->response_seq = htonl(resp_seq);
8426 req_out->send_reset_at_tsn = htonl(last_sent);
8427 if (number_entries) {
8428 for (i = 0; i < number_entries; i++) {
8429 req_out->list_of_streams[i] = htons(list[i]);
8430 }
8431 }
8432 if (SCTP_SIZE32(len) > len) {
8433 /*
8434 * Need to worry about the pad we may end up adding to the
8435 * end. This is easy since the struct is either aligned to 4
8436 * bytes or 2 bytes off.
8437 */
8438 req_out->list_of_streams[number_entries] = 0;
8439 }
8440 /* now fix the chunk length */
8441 ch->chunk_length = htons(len + old_len);
8442 chk->book_size = len + old_len;
8443 chk->book_size_scale = 0;
8444 chk->send_size = SCTP_SIZE32(chk->book_size);
8445 SCTP_BUF_LEN(chk->data) = chk->send_size;
8446 return;
8447}
8448
8449
8450void
8451sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
8452 int number_entries, uint16_t * list,
8453 uint32_t seq)
8454{
8455 int len, old_len, i;
8456 struct sctp_stream_reset_in_request *req_in;
8457 struct sctp_chunkhdr *ch;
8458
8459 ch = mtod(chk->data, struct sctp_chunkhdr *);
8460
8461
8462 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
8463
8464 /* get to new offset for the param. */
8465 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
8466 /* now how long will this param be? */
8467 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
8468 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
8469 req_in->ph.param_length = htons(len);
8470 req_in->request_seq = htonl(seq);
8471 if (number_entries) {
8472 for (i = 0; i < number_entries; i++) {
8473 req_in->list_of_streams[i] = htons(list[i]);
8474 }
8475 }
8476 if (SCTP_SIZE32(len) > len) {
8477 /*
8478 * Need to worry about the pad we may end up adding to the
8479 * end. This is easy since the struct is either aligned to 4
8480 * bytes or 2 bytes off.
8481 */
8482 req_in->list_of_streams[number_entries] = 0;
8483 }
8484 /* now fix the chunk length */
8485 ch->chunk_length = htons(len + old_len);
8486 chk->book_size = len + old_len;
8487 chk->book_size_scale = 0;
8488 chk->send_size = SCTP_SIZE32(chk->book_size);
8489 SCTP_BUF_LEN(chk->data) = chk->send_size;
8490 return;
8491}
8492
8493
8494void
8495sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
8496 uint32_t seq)
8497{
8498 int len, old_len;
8499 struct sctp_stream_reset_tsn_request *req_tsn;
8500 struct sctp_chunkhdr *ch;
8501
8502 ch = mtod(chk->data, struct sctp_chunkhdr *);
8503
8504
8505 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
8506
8507 /* get to new offset for the param. */
8508 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
8509 /* now how long will this param be? */
8510 len = sizeof(struct sctp_stream_reset_tsn_request);
8511 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
8512 req_tsn->ph.param_length = htons(len);
8513 req_tsn->request_seq = htonl(seq);
8514
8515 /* now fix the chunk length */
8516 ch->chunk_length = htons(len + old_len);
8517 chk->send_size = len + old_len;
8518 chk->book_size = SCTP_SIZE32(chk->send_size);
8519 chk->book_size_scale = 0;
8520 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
8521 return;
8522}
8523
8524void
8525sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
8526 uint32_t resp_seq, uint32_t result)
8527{
8528 int len, old_len;
8529 struct sctp_stream_reset_response *resp;
8530 struct sctp_chunkhdr *ch;
8531
8532 ch = mtod(chk->data, struct sctp_chunkhdr *);
8533
8534
8535 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
8536
8537 /* get to new offset for the param. */
8538 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
8539 /* now how long will this param be? */
8540 len = sizeof(struct sctp_stream_reset_response);
8541 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
8542 resp->ph.param_length = htons(len);
8543 resp->response_seq = htonl(resp_seq);
8544 resp->result = ntohl(result);
8545
8546 /* now fix the chunk length */
8547 ch->chunk_length = htons(len + old_len);
8548 chk->book_size = len + old_len;
8549 chk->book_size_scale = 0;
8550 chk->send_size = SCTP_SIZE32(chk->book_size);
8551 SCTP_BUF_LEN(chk->data) = chk->send_size;
8552 return;
8553
8554}
8555
8556
8557void
8558sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
8559 uint32_t resp_seq, uint32_t result,
8560 uint32_t send_una, uint32_t recv_next)
8561{
8562 int len, old_len;
8563 struct sctp_stream_reset_response_tsn *resp;
8564 struct sctp_chunkhdr *ch;
8565
8566 ch = mtod(chk->data, struct sctp_chunkhdr *);
8567
8568
8569 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
8570
8571 /* get to new offset for the param. */
8572 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
8573 /* now how long will this param be? */
8574 len = sizeof(struct sctp_stream_reset_response_tsn);
8575 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
8576 resp->ph.param_length = htons(len);
8577 resp->response_seq = htonl(resp_seq);
8578 resp->result = htonl(result);
8579 resp->senders_next_tsn = htonl(send_una);
8580 resp->receivers_next_tsn = htonl(recv_next);
8581
8582 /* now fix the chunk length */
8583 ch->chunk_length = htons(len + old_len);
8584 chk->book_size = len + old_len;
8585 chk->send_size = SCTP_SIZE32(chk->book_size);
8586 chk->book_size_scale = 0;
8587 SCTP_BUF_LEN(chk->data) = chk->send_size;
8588 return;
8589}
8590
8591
8592int
8593sctp_send_str_reset_req(struct sctp_tcb *stcb,
8594 int number_entries, uint16_t * list,
8595 uint8_t send_out_req, uint32_t resp_seq,
8596 uint8_t send_in_req,
8597 uint8_t send_tsn_req)
8598{
8599
8600 struct sctp_association *asoc;
8601 struct sctp_tmit_chunk *chk;
8602 struct sctp_chunkhdr *ch;
8603 uint32_t seq;
8604
8605 asoc = &stcb->asoc;
8606 if (asoc->stream_reset_outstanding) {
8607 /*
8608 * Already one pending, must get ACK back to clear the flag.
8609 */
8610 return (EBUSY);
8611 }
8612 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0)) {
8613 /* nothing to do */
8614 return (EINVAL);
8615 }
8616 if (send_tsn_req && (send_out_req || send_in_req)) {
8617 /* error, can't do that */
8618 return (EINVAL);
8619 }
8620 sctp_alloc_a_chunk(stcb, chk);
8621 if (chk == NULL) {
8622 return (ENOMEM);
8623 }
8624 chk->copy_by_ref = 0;
8625 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
8626 chk->rec.chunk_id.can_take_data = 0;
8627 chk->asoc = &stcb->asoc;
8628 chk->book_size = sizeof(struct sctp_chunkhdr);
8629 chk->send_size = SCTP_SIZE32(chk->book_size);
8630 chk->book_size_scale = 0;
8631
8632 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
8633 if (chk->data == NULL) {
8634 sctp_free_a_chunk(stcb, chk);
8635 return (ENOMEM);
8636 }
8637 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
8638
8639 /* setup chunk parameters */
8640 chk->sent = SCTP_DATAGRAM_UNSENT;
8641 chk->snd_count = 0;
8642 chk->whoTo = asoc->primary_destination;
8643 atomic_add_int(&chk->whoTo->ref_count, 1);
8644
8645 ch = mtod(chk->data, struct sctp_chunkhdr *);
8646 ch->chunk_type = SCTP_STREAM_RESET;
8647 ch->chunk_flags = 0;
8648 ch->chunk_length = htons(chk->book_size);
8649 SCTP_BUF_LEN(chk->data) = chk->send_size;
8650
8651 seq = stcb->asoc.str_reset_seq_out;
8652 if (send_out_req) {
8653 sctp_add_stream_reset_out(chk, number_entries, list,
8654 seq, resp_seq, (stcb->asoc.sending_seq - 1));
8655 asoc->stream_reset_out_is_outstanding = 1;
8656 seq++;
8657 asoc->stream_reset_outstanding++;
8658 }
8659 if (send_in_req) {
8660 sctp_add_stream_reset_in(chk, number_entries, list, seq);
8661 asoc->stream_reset_outstanding++;
8662 }
8663 if (send_tsn_req) {
8664 sctp_add_stream_reset_tsn(chk, seq);
8665 asoc->stream_reset_outstanding++;
8666 }
8667 asoc->str_reset = chk;
8668
8669 /* insert the chunk for sending */
8670 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
8671 chk,
8672 sctp_next);
8673 asoc->ctrl_queue_cnt++;
8674 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
8675 return (0);
8676}
8677
8678void
8679sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
8680 struct mbuf *err_cause)
8681{
8682 /*
8683 * Formulate the abort message, and send it back down.
8684 */
8685 struct mbuf *o_pak;
8686 struct mbuf *mout;
8687 struct sctp_abort_msg *abm;
8688 struct ip *iph, *iph_out;
8689 struct ip6_hdr *ip6, *ip6_out;
8690 int iphlen_out;
8691
8692 /* don't respond to ABORT with ABORT */
8693 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
8694 if (err_cause)
8695 sctp_m_freem(err_cause);
8696 return;
8697 }
8698 o_pak = SCTP_GET_HEADER_FOR_OUTPUT((sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg)));
8699 if (o_pak == NULL) {
8700 if (err_cause)
8701 sctp_m_freem(err_cause);
8702 return;
8703 }
8704 mout = SCTP_HEADER_TO_CHAIN(o_pak);
8705 iph = mtod(m, struct ip *);
8706 iph_out = NULL;
8707 ip6_out = NULL;
8708 if (iph->ip_v == IPVERSION) {
8709 iph_out = mtod(mout, struct ip *);
8710 SCTP_BUF_LEN(mout) = sizeof(*iph_out) + sizeof(*abm);
8711 SCTP_BUF_NEXT(mout) = err_cause;
8712
8713 /* Fill in the IP header for the ABORT */
8714 iph_out->ip_v = IPVERSION;
8715 iph_out->ip_hl = (sizeof(struct ip) / 4);
8716 iph_out->ip_tos = (u_char)0;
8717 iph_out->ip_id = 0;
8718 iph_out->ip_off = 0;
8719 iph_out->ip_ttl = MAXTTL;
8720 iph_out->ip_p = IPPROTO_SCTP;
8721 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
8722 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
8723 /* let IP layer calculate this */
8724 iph_out->ip_sum = 0;
8725
8726 iphlen_out = sizeof(*iph_out);
8727 abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out);
8728 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
8729 ip6 = (struct ip6_hdr *)iph;
8730 ip6_out = mtod(mout, struct ip6_hdr *);
8731 SCTP_BUF_LEN(mout) = sizeof(*ip6_out) + sizeof(*abm);
8732 SCTP_BUF_NEXT(mout) = err_cause;
8733
8734 /* Fill in the IP6 header for the ABORT */
8735 ip6_out->ip6_flow = ip6->ip6_flow;
8736 ip6_out->ip6_hlim = ip6_defhlim;
8737 ip6_out->ip6_nxt = IPPROTO_SCTP;
8738 ip6_out->ip6_src = ip6->ip6_dst;
8739 ip6_out->ip6_dst = ip6->ip6_src;
8740
8741 iphlen_out = sizeof(*ip6_out);
8742 abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out);
8743 } else {
8744 /* Currently not supported */
8745 return;
8746 }
8747
8748 abm->sh.src_port = sh->dest_port;
8749 abm->sh.dest_port = sh->src_port;
8750 abm->sh.checksum = 0;
8751 if (vtag == 0) {
8752 abm->sh.v_tag = sh->v_tag;
8753 abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB;
8754 } else {
8755 abm->sh.v_tag = htonl(vtag);
8756 abm->msg.ch.chunk_flags = 0;
8757 }
8758 abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION;
8759
8760 if (err_cause) {
8761 struct mbuf *m_tmp = err_cause;
8762 int err_len = 0;
8763
8764 /* get length of the err_cause chain */
8765 while (m_tmp != NULL) {
8766 err_len += SCTP_BUF_LEN(m_tmp);
8767 m_tmp = SCTP_BUF_NEXT(m_tmp);
8768 }
8769 SCTP_HEADER_LEN(o_pak) = SCTP_BUF_LEN(mout) + err_len;
8770 if (err_len % 4) {
8771 /* need pad at end of chunk */
8772 uint32_t cpthis = 0;
8773 int padlen;
8774
8775 padlen = 4 - (SCTP_HEADER_LEN(o_pak) % 4);
8776 m_copyback(mout, SCTP_HEADER_LEN(o_pak), padlen, (caddr_t)&cpthis);
8777 SCTP_HEADER_LEN(o_pak) += padlen;
8778 }
8779 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len);
8780 } else {
8781 SCTP_HEADER_LEN(mout) = SCTP_BUF_LEN(mout);
8782 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch));
8783 }
8784
8785 /* add checksum */
8786 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(m)) {
8787 abm->sh.checksum = 0;
8788 } else {
8789 abm->sh.checksum = sctp_calculate_sum(mout, NULL, iphlen_out);
8790 }
8791 if (iph_out != NULL) {
8792 struct route ro;
8793
8794 /* zap the stack pointer to the route */
8795 bzero(&ro, sizeof ro);
8796#ifdef SCTP_DEBUG
8797 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
8798 printf("sctp_send_abort calling ip_output:\n");
8799 sctp_print_address_pkt(iph_out, &abm->sh);
8800 }
8801#endif
8802 /* set IPv4 length */
8803 iph_out->ip_len = SCTP_HEADER_LEN(o_pak);
8804 /* out it goes */
8805 (void)ip_output(o_pak, 0, &ro, IP_RAWOUTPUT, NULL
8806 ,NULL
8807 );
8808 /* Free the route if we got one back */
8809 if (ro.ro_rt)
8810 RTFREE(ro.ro_rt);
8811 } else if (ip6_out != NULL) {
8812 struct route_in6 ro;
8813
8814 /* zap the stack pointer to the route */
8815 bzero(&ro, sizeof(ro));
8816#ifdef SCTP_DEBUG
8817 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
8818 printf("sctp_send_abort calling ip6_output:\n");
8819 sctp_print_address_pkt((struct ip *)ip6_out, &abm->sh);
8820 }
8821#endif
8822 ip6_out->ip6_plen = SCTP_HEADER_LEN(o_pak) - sizeof(*ip6_out);
8823 ip6_output(o_pak, NULL, &ro, 0, NULL, NULL
8824 ,NULL
8825 );
8826 /* Free the route if we got one back */
8827 if (ro.ro_rt)
8828 RTFREE(ro.ro_rt);
8829 }
8830 SCTP_STAT_INCR(sctps_sendpackets);
8831 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
8832}
8833
8834void
8835sctp_send_operr_to(struct mbuf *m, int iphlen,
8836 struct mbuf *scm,
8837 uint32_t vtag)
8838{
8839 struct mbuf *o_pak;
8840 struct sctphdr *ihdr;
8841 int retcode;
8842 struct sctphdr *ohdr;
8843 struct sctp_chunkhdr *ophdr;
8844
8845 struct ip *iph;
8846
8847#ifdef SCTP_DEBUG
8848 struct sockaddr_in6 lsa6, fsa6;
8849
8850#endif
8851 uint32_t val;
8852 struct mbuf *at;
8853 int len;
8854
8855 iph = mtod(m, struct ip *);
8856 ihdr = (struct sctphdr *)((caddr_t)iph + iphlen);
8857
8858 SCTP_BUF_PREPEND(scm, (sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr)), M_DONTWAIT);
8859 if (scm == NULL) {
8860 /* can't send because we can't add a mbuf */
8861 return;
8862 }
8863 ohdr = mtod(scm, struct sctphdr *);
8864 ohdr->src_port = ihdr->dest_port;
8865 ohdr->dest_port = ihdr->src_port;
8866 ohdr->v_tag = vtag;
8867 ohdr->checksum = 0;
8868 ophdr = (struct sctp_chunkhdr *)(ohdr + 1);
8869 ophdr->chunk_type = SCTP_OPERATION_ERROR;
8870 ophdr->chunk_flags = 0;
8871 len = 0;
8872 at = scm;
8873 while (at) {
8874 len += SCTP_BUF_LEN(at);
8875 at = SCTP_BUF_NEXT(at);
8876 }
8877
8878 ophdr->chunk_length = htons(len - sizeof(struct sctphdr));
8879 if (len % 4) {
8880 /* need padding */
8881 uint32_t cpthis = 0;
8882 int padlen;
8883
8884 padlen = 4 - (len % 4);
8885 m_copyback(scm, len, padlen, (caddr_t)&cpthis);
8886 len += padlen;
8887 }
8888 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(m)) {
8889 val = 0;
8890 } else {
8891 val = sctp_calculate_sum(scm, NULL, 0);
8892 }
8893 ohdr->checksum = val;
8894 if (iph->ip_v == IPVERSION) {
8895 /* V4 */
8896 struct ip *out;
8897 struct route ro;
8898
8899 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip));
8900 if (o_pak == NULL) {
8901 sctp_m_freem(scm);
8902 return;
8903 }
8904 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip);
8905 len += sizeof(struct ip);
8906 SCTP_ATTACH_CHAIN(o_pak, scm, len);
8907 bzero(&ro, sizeof ro);
8908 out = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip *);
8909 out->ip_v = iph->ip_v;
8910 out->ip_hl = (sizeof(struct ip) / 4);
8911 out->ip_tos = iph->ip_tos;
8912 out->ip_id = iph->ip_id;
8913 out->ip_off = 0;
8914 out->ip_ttl = MAXTTL;
8915 out->ip_p = IPPROTO_SCTP;
8916 out->ip_sum = 0;
8917 out->ip_src = iph->ip_dst;
8918 out->ip_dst = iph->ip_src;
8919 out->ip_len = SCTP_HEADER_LEN(o_pak);
8920 retcode = ip_output(o_pak, 0, &ro, IP_RAWOUTPUT, NULL
8921 ,NULL
8922 );
8923 SCTP_STAT_INCR(sctps_sendpackets);
8924 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
8925 /* Free the route if we got one back */
8926 if (ro.ro_rt)
8927 RTFREE(ro.ro_rt);
8928 } else {
8929 /* V6 */
8930 struct route_in6 ro;
8931 struct ip6_hdr *out6, *in6;
8932
8933 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip6_hdr));
8934 if (o_pak == NULL) {
8935 sctp_m_freem(scm);
8936 return;
8937 }
8938 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip6_hdr);
8939 len += sizeof(struct ip6_hdr);
8940 SCTP_ATTACH_CHAIN(o_pak, scm, len);
8941
8942 bzero(&ro, sizeof ro);
8943 in6 = mtod(m, struct ip6_hdr *);
8944 out6 = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip6_hdr *);
8945 out6->ip6_flow = in6->ip6_flow;
8946 out6->ip6_hlim = ip6_defhlim;
8947 out6->ip6_nxt = IPPROTO_SCTP;
8948 out6->ip6_src = in6->ip6_dst;
8949 out6->ip6_dst = in6->ip6_src;
8950 out6->ip6_plen = len - sizeof(struct ip6_hdr);
8951#ifdef SCTP_DEBUG
8952 bzero(&lsa6, sizeof(lsa6));
8953 lsa6.sin6_len = sizeof(lsa6);
8954 lsa6.sin6_family = AF_INET6;
8955 lsa6.sin6_addr = out6->ip6_src;
8956 bzero(&fsa6, sizeof(fsa6));
8957 fsa6.sin6_len = sizeof(fsa6);
8958 fsa6.sin6_family = AF_INET6;
8959 fsa6.sin6_addr = out6->ip6_dst;
8960 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
8961 printf("sctp_operr_to calling ipv6 output:\n");
8962 printf("src: ");
8963 sctp_print_address((struct sockaddr *)&lsa6);
8964 printf("dst ");
8965 sctp_print_address((struct sockaddr *)&fsa6);
8966 }
8967#endif /* SCTP_DEBUG */
8968 ip6_output(o_pak, NULL, &ro, 0, NULL, NULL
8969 ,NULL
8970 );
8971 SCTP_STAT_INCR(sctps_sendpackets);
8972 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
8973 /* Free the route if we got one back */
8974 if (ro.ro_rt)
8975 RTFREE(ro.ro_rt);
8976 }
8977}
8978
8979
8980
8981static struct mbuf *
8982sctp_copy_resume(struct sctp_stream_queue_pending *sp,
8983 struct uio *uio,
8984 struct sctp_sndrcvinfo *srcv,
8985 int max_send_len,
8986 int user_marks_eor,
8987 int *error,
8988 uint32_t * sndout,
8989 struct mbuf **new_tail)
8990{
8991 int left, cancpy, willcpy;
8992 struct mbuf *m, *prev, *head;
8993
8994 left = min(uio->uio_resid, max_send_len);
8995 /* Always get a header just in case */
8996 head = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA);
8997 cancpy = M_TRAILINGSPACE(head);
8998 willcpy = min(cancpy, left);
8999 *error = uiomove(mtod(head, caddr_t), willcpy, uio);
9000 if (*error) {
9001 sctp_m_freem(head);
9002 return (NULL);
9003 }
9004 *sndout += willcpy;
9005 left -= willcpy;
9006 SCTP_BUF_LEN(head) = willcpy;
9007 m = head;
9008 *new_tail = head;
9009 while (left > 0) {
9010 /* move in user data */
9011 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA);
9012 if (SCTP_BUF_NEXT(m) == NULL) {
9013 sctp_m_freem(head);
9014 *new_tail = NULL;
9015 *error = ENOMEM;
9016 return (NULL);
9017 }
9018 prev = m;
9019 m = SCTP_BUF_NEXT(m);
9020 cancpy = M_TRAILINGSPACE(m);
9021 willcpy = min(cancpy, left);
9022 *error = uiomove(mtod(m, caddr_t), willcpy, uio);
9023 if (*error) {
9024 sctp_m_freem(head);
9025 *new_tail = NULL;
9026 *error = EFAULT;
9027 return (NULL);
9028 }
9029 SCTP_BUF_LEN(m) = willcpy;
9030 left -= willcpy;
9031 *sndout += willcpy;
9032 *new_tail = m;
9033 if (left == 0) {
9034 SCTP_BUF_NEXT(m) = NULL;
9035 }
9036 }
9037 return (head);
9038}
9039
9040static int
9041sctp_copy_one(struct sctp_stream_queue_pending *sp,
9042 struct uio *uio,
9043 int resv_upfront)
9044{
9045 int left, cancpy, willcpy, error;
9046 struct mbuf *m, *head;
9047 int cpsz = 0;
9048
9049 /* First one gets a header */
9050 left = sp->length;
9051 head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAIT, 0, MT_DATA);
9052 if (m == NULL) {
9053 return (ENOMEM);
9054 }
9055 /*
9056 * Add this one for m in now, that way if the alloc fails we won't
9057 * have a bad cnt.
9058 */
9059 SCTP_BUF_RESV_UF(m, resv_upfront);
9060 cancpy = M_TRAILINGSPACE(m);
9061 willcpy = min(cancpy, left);
9062 while (left > 0) {
9063 /* move in user data */
9064 error = uiomove(mtod(m, caddr_t), willcpy, uio);
9065 if (error) {
9066 sctp_m_freem(head);
9067 return (error);
9068 }
9069 SCTP_BUF_LEN(m) = willcpy;
9070 left -= willcpy;
9071 cpsz += willcpy;
9072 if (left > 0) {
9073 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA);
9074 if (SCTP_BUF_NEXT(m) == NULL) {
9075 /*
9076 * the head goes back to caller, he can free
9077 * the rest
9078 */
9079 sctp_m_freem(head);
9080 return (ENOMEM);
9081 }
9082 m = SCTP_BUF_NEXT(m);
9083 cancpy = M_TRAILINGSPACE(m);
9084 willcpy = min(cancpy, left);
9085 } else {
9086 sp->tail_mbuf = m;
9087 SCTP_BUF_NEXT(m) = NULL;
9088 }
9089 }
9090 sp->data = head;
9091 sp->length = cpsz;
9092 return (0);
9093}
9094
9095
9096
9097static struct sctp_stream_queue_pending *
9098sctp_copy_it_in(struct sctp_tcb *stcb,
9099 struct sctp_association *asoc,
9100 struct sctp_sndrcvinfo *srcv,
9101 struct uio *uio,
9102 struct sctp_nets *net,
9103 int max_send_len,
9104 int user_marks_eor,
9105 int *errno,
9106 int non_blocking)
9107{
9108 /*
9109 * This routine must be very careful in its work. Protocol
9110 * processing is up and running so care must be taken to spl...()
9111 * when you need to do something that may effect the stcb/asoc. The
9112 * sb is locked however. When data is copied the protocol processing
9113 * should be enabled since this is a slower operation...
9114 */
9115 struct sctp_stream_queue_pending *sp = NULL;
9116 int resv_in_first;
9117
9118 *errno = 0;
9119 /*
9120 * Unless E_EOR mode is on, we must make a send FIT in one call.
9121 */
9122 if (((user_marks_eor == 0) && non_blocking) &&
9123 (uio->uio_resid > stcb->sctp_socket->so_snd.sb_hiwat)) {
9124 /* It will NEVER fit */
9125 *errno = EMSGSIZE;
9126 goto out_now;
9127 }
9128 /* Now can we send this? */
9129 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
9130 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
9131 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
9132 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
9133 /* got data while shutting down */
9134 *errno = ECONNRESET;
9135 goto out_now;
9136 }
9137 sp = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_strmoq, struct sctp_stream_queue_pending);
9138 if (sp == NULL) {
9139 *errno = ENOMEM;
9140 goto out_now;
9141 }
9142 SCTP_INCR_STRMOQ_COUNT();
9143 sp->act_flags = 0;
9144 sp->sinfo_flags = srcv->sinfo_flags;
9145 sp->timetolive = srcv->sinfo_timetolive;
9146 sp->ppid = srcv->sinfo_ppid;
9147 sp->context = srcv->sinfo_context;
9148 sp->strseq = 0;
9149 SCTP_GETTIME_TIMEVAL(&sp->ts);
9150
9151 sp->stream = srcv->sinfo_stream;
9152 sp->length = min(uio->uio_resid, max_send_len);
9153 if ((sp->length == uio->uio_resid) &&
9154 ((user_marks_eor == 0) ||
9155 (srcv->sinfo_flags & SCTP_EOF) ||
9156 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))
9157 ) {
9158 sp->msg_is_complete = 1;
9159 } else {
9160 sp->msg_is_complete = 0;
9161 }
9162 sp->some_taken = 0;
9163 resv_in_first = sizeof(struct sctp_data_chunk);
9164 sp->data = sp->tail_mbuf = NULL;
9165 *errno = sctp_copy_one(sp, uio, resv_in_first);
9166 if (*errno) {
9167 sctp_free_a_strmoq(stcb, sp);
9168 sp->data = NULL;
9169 sp->net = NULL;
9170 sp = NULL;
9171 } else {
9172 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
9173 sp->net = net;
9174 sp->addr_over = 1;
9175 } else {
9176 sp->net = asoc->primary_destination;
9177 sp->addr_over = 0;
9178 }
9179 atomic_add_int(&sp->net->ref_count, 1);
9180 sctp_set_prsctp_policy(stcb, sp);
9181 }
9182out_now:
9183 return (sp);
9184}
9185
9186
9187int
9188sctp_sosend(struct socket *so,
9189 struct sockaddr *addr,
9190 struct uio *uio,
9191 struct mbuf *top,
9192 struct mbuf *control,
9193 int flags
9194 ,
9195 struct thread *p
9196)
9197{
9198 struct sctp_inpcb *inp;
9199 int error, use_rcvinfo = 0;
9200 struct sctp_sndrcvinfo srcv;
9201
9202 inp = (struct sctp_inpcb *)so->so_pcb;
9203 if (control) {
9204 /* process cmsg snd/rcv info (maybe a assoc-id) */
9205 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control,
9206 sizeof(srcv))) {
9207 /* got one */
9208 use_rcvinfo = 1;
9209 }
9210 }
9211 error = sctp_lower_sosend(so, addr, uio, top, control, flags,
9212 use_rcvinfo, &srcv, p);
9213 return (error);
9214}
9215
9216
8967 SCTP_STAT_INCR(sctps_sendsacks);
8968 return;
8969}
8970
8971
8972void
8973sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr)
8974{
8975 struct mbuf *m_abort;
8976 struct mbuf *m_out = NULL, *m_end = NULL;
8977 struct sctp_abort_chunk *abort = NULL;
8978 int sz;
8979 uint32_t auth_offset = 0;
8980 struct sctp_auth_chunk *auth = NULL;
8981 struct sctphdr *shdr;
8982
8983 /*
8984 * Add an AUTH chunk, if chunk requires it and save the offset into
8985 * the chain for AUTH
8986 */
8987 if (sctp_auth_is_required_chunk(SCTP_ABORT_ASSOCIATION,
8988 stcb->asoc.peer_auth_chunks)) {
8989 m_out = sctp_add_auth_chunk(m_out, &m_end, &auth, &auth_offset,
8990 stcb, SCTP_ABORT_ASSOCIATION);
8991 }
8992 SCTP_TCB_LOCK_ASSERT(stcb);
8993 m_abort = sctp_get_mbuf_for_msg(sizeof(struct sctp_abort_chunk), 0, M_DONTWAIT, 1, MT_HEADER);
8994 if (m_abort == NULL) {
8995 /* no mbuf's */
8996 if (m_out)
8997 sctp_m_freem(m_out);
8998 return;
8999 }
9000 /* link in any error */
9001 SCTP_BUF_NEXT(m_abort) = operr;
9002 sz = 0;
9003 if (operr) {
9004 struct mbuf *n;
9005
9006 n = operr;
9007 while (n) {
9008 sz += SCTP_BUF_LEN(n);
9009 n = SCTP_BUF_NEXT(n);
9010 }
9011 }
9012 SCTP_BUF_LEN(m_abort) = sizeof(*abort);
9013 if (m_out == NULL) {
9014 /* NO Auth chunk prepended, so reserve space in front */
9015 SCTP_BUF_RESV_UF(m_abort, SCTP_MIN_OVERHEAD);
9016 m_out = m_abort;
9017 } else {
9018 /* Put AUTH chunk at the front of the chain */
9019 SCTP_BUF_NEXT(m_end) = m_abort;
9020 }
9021
9022 /* fill in the ABORT chunk */
9023 abort = mtod(m_abort, struct sctp_abort_chunk *);
9024 abort->ch.chunk_type = SCTP_ABORT_ASSOCIATION;
9025 abort->ch.chunk_flags = 0;
9026 abort->ch.chunk_length = htons(sizeof(*abort) + sz);
9027
9028 /* prepend and fill in the SCTP header */
9029 SCTP_BUF_PREPEND(m_out, sizeof(struct sctphdr), M_DONTWAIT);
9030 if (m_out == NULL) {
9031 /* TSNH: no memory */
9032 return;
9033 }
9034 shdr = mtod(m_out, struct sctphdr *);
9035 shdr->src_port = stcb->sctp_ep->sctp_lport;
9036 shdr->dest_port = stcb->rport;
9037 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
9038 shdr->checksum = 0;
9039 auth_offset += sizeof(struct sctphdr);
9040
9041 sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb,
9042 stcb->asoc.primary_destination,
9043 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr,
9044 m_out, auth_offset, auth, 1, 0, NULL, 0);
9045 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9046}
9047
9048int
9049sctp_send_shutdown_complete(struct sctp_tcb *stcb,
9050 struct sctp_nets *net)
9051{
9052 /* formulate and SEND a SHUTDOWN-COMPLETE */
9053 struct mbuf *m_shutdown_comp;
9054 struct sctp_shutdown_complete_msg *comp_cp;
9055
9056 m_shutdown_comp = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_complete_msg), 0, M_DONTWAIT, 1, MT_HEADER);
9057 if (m_shutdown_comp == NULL) {
9058 /* no mbuf's */
9059 return (-1);
9060 }
9061 comp_cp = mtod(m_shutdown_comp, struct sctp_shutdown_complete_msg *);
9062 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
9063 comp_cp->shut_cmp.ch.chunk_flags = 0;
9064 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
9065 comp_cp->sh.src_port = stcb->sctp_ep->sctp_lport;
9066 comp_cp->sh.dest_port = stcb->rport;
9067 comp_cp->sh.v_tag = htonl(stcb->asoc.peer_vtag);
9068 comp_cp->sh.checksum = 0;
9069
9070 SCTP_BUF_LEN(m_shutdown_comp) = sizeof(struct sctp_shutdown_complete_msg);
9071 sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
9072 (struct sockaddr *)&net->ro._l_addr,
9073 m_shutdown_comp, 0, NULL, 1, 0, NULL, 0);
9074 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9075 return (0);
9076}
9077
9078int
9079sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh)
9080{
9081 /* formulate and SEND a SHUTDOWN-COMPLETE */
9082 struct mbuf *o_pak;
9083 struct mbuf *mout;
9084 struct ip *iph, *iph_out;
9085 struct ip6_hdr *ip6, *ip6_out;
9086 int offset_out, len;
9087 struct sctp_shutdown_complete_msg *comp_cp;
9088
9089 /* Get room for the largest message */
9090 len = (sizeof(struct ip6_hdr) + sizeof(struct sctp_shutdown_complete_msg));
9091
9092 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(len);
9093 if (o_pak == NULL) {
9094 /* no mbuf's */
9095 return (-1);
9096 }
9097 mout = SCTP_HEADER_TO_CHAIN(o_pak);
9098 iph = mtod(m, struct ip *);
9099 iph_out = NULL;
9100 ip6_out = NULL;
9101 offset_out = 0;
9102 if (iph->ip_v == IPVERSION) {
9103 SCTP_BUF_LEN(mout) = sizeof(struct ip) +
9104 sizeof(struct sctp_shutdown_complete_msg);
9105 SCTP_BUF_NEXT(mout) = NULL;
9106 iph_out = mtod(mout, struct ip *);
9107
9108 /* Fill in the IP header for the ABORT */
9109 iph_out->ip_v = IPVERSION;
9110 iph_out->ip_hl = (sizeof(struct ip) / 4);
9111 iph_out->ip_tos = (u_char)0;
9112 iph_out->ip_id = 0;
9113 iph_out->ip_off = 0;
9114 iph_out->ip_ttl = MAXTTL;
9115 iph_out->ip_p = IPPROTO_SCTP;
9116 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
9117 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
9118
9119 /* let IP layer calculate this */
9120 iph_out->ip_sum = 0;
9121 offset_out += sizeof(*iph_out);
9122 comp_cp = (struct sctp_shutdown_complete_msg *)(
9123 (caddr_t)iph_out + offset_out);
9124 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
9125 ip6 = (struct ip6_hdr *)iph;
9126 SCTP_BUF_LEN(mout) = sizeof(struct ip6_hdr) +
9127 sizeof(struct sctp_shutdown_complete_msg);
9128 SCTP_BUF_NEXT(mout) = NULL;
9129 ip6_out = mtod(mout, struct ip6_hdr *);
9130
9131 /* Fill in the IPv6 header for the ABORT */
9132 ip6_out->ip6_flow = ip6->ip6_flow;
9133 ip6_out->ip6_hlim = ip6_defhlim;
9134 ip6_out->ip6_nxt = IPPROTO_SCTP;
9135 ip6_out->ip6_src = ip6->ip6_dst;
9136 ip6_out->ip6_dst = ip6->ip6_src;
9137 /*
9138 * ?? The old code had both the iph len + payload, I think
9139 * this is wrong and would never have worked
9140 */
9141 ip6_out->ip6_plen = sizeof(struct sctp_shutdown_complete_msg);
9142 offset_out += sizeof(*ip6_out);
9143 comp_cp = (struct sctp_shutdown_complete_msg *)(
9144 (caddr_t)ip6_out + offset_out);
9145 } else {
9146 /* Currently not supported. */
9147 return (-1);
9148 }
9149
9150 SCTP_HEADER_LEN(o_pak) = SCTP_BUF_LEN(mout);
9151 /* Now copy in and fill in the ABORT tags etc. */
9152 comp_cp->sh.src_port = sh->dest_port;
9153 comp_cp->sh.dest_port = sh->src_port;
9154 comp_cp->sh.checksum = 0;
9155 comp_cp->sh.v_tag = sh->v_tag;
9156 comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB;
9157 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
9158 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
9159
9160 /* add checksum */
9161 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(o_pak)) {
9162 comp_cp->sh.checksum = 0;
9163 } else {
9164 comp_cp->sh.checksum = sctp_calculate_sum(mout, NULL, offset_out);
9165 }
9166 if (iph_out != NULL) {
9167 struct route ro;
9168
9169 bzero(&ro, sizeof ro);
9170 /* set IPv4 length */
9171 iph_out->ip_len = SCTP_HEADER_LEN(o_pak);
9172 /* out it goes */
9173 ip_output(o_pak, 0, &ro, IP_RAWOUTPUT, NULL
9174 ,NULL
9175 );
9176 /* Free the route if we got one back */
9177 if (ro.ro_rt)
9178 RTFREE(ro.ro_rt);
9179 } else if (ip6_out != NULL) {
9180 struct route_in6 ro;
9181
9182 bzero(&ro, sizeof(ro));
9183 ip6_output(o_pak, NULL, &ro, 0, NULL, NULL
9184 ,NULL
9185 );
9186 /* Free the route if we got one back */
9187 if (ro.ro_rt)
9188 RTFREE(ro.ro_rt);
9189 }
9190 SCTP_STAT_INCR(sctps_sendpackets);
9191 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
9192 SCTP_STAT_INCR_COUNTER64(sctps_outcontrolchunks);
9193 return (0);
9194}
9195
9196static struct sctp_nets *
9197sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now)
9198{
9199 struct sctp_nets *net, *hnet;
9200 int ms_goneby, highest_ms, state_overide = 0;
9201
9202 SCTP_GETTIME_TIMEVAL(now);
9203 highest_ms = 0;
9204 hnet = NULL;
9205 SCTP_TCB_LOCK_ASSERT(stcb);
9206 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
9207 if (
9208 ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) ||
9209 (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)
9210 ) {
9211 /*
9212 * Skip this guy from consideration if HB is off AND
9213 * its confirmed
9214 */
9215 continue;
9216 }
9217 if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) {
9218 /* skip this dest net from consideration */
9219 continue;
9220 }
9221 if (net->last_sent_time.tv_sec) {
9222 /* Sent to so we subtract */
9223 ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000;
9224 } else
9225 /* Never been sent to */
9226 ms_goneby = 0x7fffffff;
9227 /*
9228 * When the address state is unconfirmed but still
9229 * considered reachable, we HB at a higher rate. Once it
9230 * goes confirmed OR reaches the "unreachable" state, thenw
9231 * we cut it back to HB at a more normal pace.
9232 */
9233 if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) {
9234 state_overide = 1;
9235 } else {
9236 state_overide = 0;
9237 }
9238
9239 if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) &&
9240 (ms_goneby > highest_ms)) {
9241 highest_ms = ms_goneby;
9242 hnet = net;
9243 }
9244 }
9245 if (hnet &&
9246 ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED | SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) {
9247 state_overide = 1;
9248 } else {
9249 state_overide = 0;
9250 }
9251
9252 if (highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) {
9253 /*
9254 * Found the one with longest delay bounds OR it is
9255 * unconfirmed and still not marked unreachable.
9256 */
9257#ifdef SCTP_DEBUG
9258 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
9259 printf("net:%p is the hb winner -",
9260 hnet);
9261 if (hnet)
9262 sctp_print_address((struct sockaddr *)&hnet->ro._l_addr);
9263 else
9264 printf(" none\n");
9265 }
9266#endif
9267 /* update the timer now */
9268 hnet->last_sent_time = *now;
9269 return (hnet);
9270 }
9271 /* Nothing to HB */
9272 return (NULL);
9273}
9274
9275int
9276sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
9277{
9278 struct sctp_tmit_chunk *chk;
9279 struct sctp_nets *net;
9280 struct sctp_heartbeat_chunk *hb;
9281 struct timeval now;
9282 struct sockaddr_in *sin;
9283 struct sockaddr_in6 *sin6;
9284
9285 SCTP_TCB_LOCK_ASSERT(stcb);
9286 if (user_req == 0) {
9287 net = sctp_select_hb_destination(stcb, &now);
9288 if (net == NULL) {
9289 /*
9290 * All our busy none to send to, just start the
9291 * timer again.
9292 */
9293 if (stcb->asoc.state == 0) {
9294 return (0);
9295 }
9296 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
9297 stcb->sctp_ep,
9298 stcb,
9299 net);
9300 return (0);
9301 }
9302 } else {
9303 net = u_net;
9304 if (net == NULL) {
9305 return (0);
9306 }
9307 SCTP_GETTIME_TIMEVAL(&now);
9308 }
9309 sin = (struct sockaddr_in *)&net->ro._l_addr;
9310 if (sin->sin_family != AF_INET) {
9311 if (sin->sin_family != AF_INET6) {
9312 /* huh */
9313 return (0);
9314 }
9315 }
9316 sctp_alloc_a_chunk(stcb, chk);
9317 if (chk == NULL) {
9318#ifdef SCTP_DEBUG
9319 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
9320 printf("Gak, can't get a chunk for hb\n");
9321 }
9322#endif
9323 return (0);
9324 }
9325 chk->copy_by_ref = 0;
9326 chk->rec.chunk_id.id = SCTP_HEARTBEAT_REQUEST;
9327 chk->rec.chunk_id.can_take_data = 1;
9328 chk->asoc = &stcb->asoc;
9329 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
9330
9331 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
9332 if (chk->data == NULL) {
9333 sctp_free_a_chunk(stcb, chk);
9334 return (0);
9335 }
9336 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
9337 SCTP_BUF_LEN(chk->data) = chk->send_size;
9338 chk->sent = SCTP_DATAGRAM_UNSENT;
9339 chk->snd_count = 0;
9340 chk->whoTo = net;
9341 atomic_add_int(&chk->whoTo->ref_count, 1);
9342 /* Now we have a mbuf that we can fill in with the details */
9343 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
9344
9345 /* fill out chunk header */
9346 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
9347 hb->ch.chunk_flags = 0;
9348 hb->ch.chunk_length = htons(chk->send_size);
9349 /* Fill out hb parameter */
9350 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
9351 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
9352 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
9353 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
9354 /* Did our user request this one, put it in */
9355 hb->heartbeat.hb_info.user_req = user_req;
9356 hb->heartbeat.hb_info.addr_family = sin->sin_family;
9357 hb->heartbeat.hb_info.addr_len = sin->sin_len;
9358 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
9359 /*
9360 * we only take from the entropy pool if the address is not
9361 * confirmed.
9362 */
9363 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
9364 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
9365 } else {
9366 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
9367 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
9368 }
9369 if (sin->sin_family == AF_INET) {
9370 memcpy(hb->heartbeat.hb_info.address, &sin->sin_addr, sizeof(sin->sin_addr));
9371 } else if (sin->sin_family == AF_INET6) {
9372 /* We leave the scope the way it is in our lookup table. */
9373 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
9374 memcpy(hb->heartbeat.hb_info.address, &sin6->sin6_addr, sizeof(sin6->sin6_addr));
9375 } else {
9376 /* huh compiler bug */
9377 return (0);
9378 }
9379 /* ok we have a destination that needs a beat */
9380 /* lets do the theshold management Qiaobing style */
9381
9382 if (sctp_threshold_management(stcb->sctp_ep, stcb, net,
9383 stcb->asoc.max_send_times)) {
9384 /*
9385 * we have lost the association, in a way this is quite bad
9386 * since we really are one less time since we really did not
9387 * send yet. This is the down side to the Q's style as
9388 * defined in the RFC and not my alternate style defined in
9389 * the RFC.
9390 */
9391 atomic_subtract_int(&chk->whoTo->ref_count, 1);
9392 if (chk->data != NULL) {
9393 sctp_m_freem(chk->data);
9394 chk->data = NULL;
9395 }
9396 sctp_free_a_chunk(stcb, chk);
9397 return (-1);
9398 }
9399 net->hb_responded = 0;
9400 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
9401 stcb->asoc.ctrl_queue_cnt++;
9402 SCTP_STAT_INCR(sctps_sendheartbeat);
9403 /*
9404 * Call directly med level routine to put out the chunk. It will
9405 * always tumble out control chunks aka HB but it may even tumble
9406 * out data too.
9407 */
9408 return (1);
9409}
9410
9411void
9412sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
9413 uint32_t high_tsn)
9414{
9415 struct sctp_association *asoc;
9416 struct sctp_ecne_chunk *ecne;
9417 struct sctp_tmit_chunk *chk;
9418
9419 asoc = &stcb->asoc;
9420 SCTP_TCB_LOCK_ASSERT(stcb);
9421 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9422 if (chk->rec.chunk_id.id == SCTP_ECN_ECHO) {
9423 /* found a previous ECN_ECHO update it if needed */
9424 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
9425 ecne->tsn = htonl(high_tsn);
9426 return;
9427 }
9428 }
9429 /* nope could not find one to update so we must build one */
9430 sctp_alloc_a_chunk(stcb, chk);
9431 if (chk == NULL) {
9432 return;
9433 }
9434 chk->copy_by_ref = 0;
9435 SCTP_STAT_INCR(sctps_sendecne);
9436 chk->rec.chunk_id.id = SCTP_ECN_ECHO;
9437 chk->rec.chunk_id.can_take_data = 0;
9438 chk->asoc = &stcb->asoc;
9439 chk->send_size = sizeof(struct sctp_ecne_chunk);
9440 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
9441 if (chk->data == NULL) {
9442 sctp_free_a_chunk(stcb, chk);
9443 return;
9444 }
9445 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
9446 SCTP_BUF_LEN(chk->data) = chk->send_size;
9447 chk->sent = SCTP_DATAGRAM_UNSENT;
9448 chk->snd_count = 0;
9449 chk->whoTo = net;
9450 atomic_add_int(&chk->whoTo->ref_count, 1);
9451 stcb->asoc.ecn_echo_cnt_onq++;
9452 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
9453 ecne->ch.chunk_type = SCTP_ECN_ECHO;
9454 ecne->ch.chunk_flags = 0;
9455 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
9456 ecne->tsn = htonl(high_tsn);
9457 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
9458 asoc->ctrl_queue_cnt++;
9459}
9460
9461void
9462sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
9463 struct mbuf *m, int iphlen, int bad_crc)
9464{
9465 struct sctp_association *asoc;
9466 struct sctp_pktdrop_chunk *drp;
9467 struct sctp_tmit_chunk *chk;
9468 uint8_t *datap;
9469 int len;
9470 unsigned int small_one;
9471 struct ip *iph;
9472
9473 long spc;
9474
9475 asoc = &stcb->asoc;
9476 SCTP_TCB_LOCK_ASSERT(stcb);
9477 if (asoc->peer_supports_pktdrop == 0) {
9478 /*
9479 * peer must declare support before I send one.
9480 */
9481 return;
9482 }
9483 if (stcb->sctp_socket == NULL) {
9484 return;
9485 }
9486 sctp_alloc_a_chunk(stcb, chk);
9487 if (chk == NULL) {
9488 return;
9489 }
9490 chk->copy_by_ref = 0;
9491 iph = mtod(m, struct ip *);
9492 if (iph == NULL) {
9493 return;
9494 }
9495 if (iph->ip_v == IPVERSION) {
9496 /* IPv4 */
9497 len = chk->send_size = iph->ip_len;
9498 } else {
9499 struct ip6_hdr *ip6h;
9500
9501 /* IPv6 */
9502 ip6h = mtod(m, struct ip6_hdr *);
9503 len = chk->send_size = htons(ip6h->ip6_plen);
9504 }
9505 chk->asoc = &stcb->asoc;
9506 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
9507 if (chk->data == NULL) {
9508jump_out:
9509 sctp_free_a_chunk(stcb, chk);
9510 return;
9511 }
9512 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
9513 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
9514 if (drp == NULL) {
9515 sctp_m_freem(chk->data);
9516 chk->data = NULL;
9517 goto jump_out;
9518 }
9519 small_one = asoc->smallest_mtu;
9520 if (small_one > MCLBYTES) {
9521 /* Only one cluster worth of data MAX */
9522 small_one = MCLBYTES;
9523 }
9524 chk->book_size = SCTP_SIZE32((chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
9525 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD));
9526 chk->book_size_scale = 0;
9527 if (chk->book_size > small_one) {
9528 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
9529 drp->trunc_len = htons(chk->send_size);
9530 chk->send_size = small_one - (SCTP_MED_OVERHEAD +
9531 sizeof(struct sctp_pktdrop_chunk) +
9532 sizeof(struct sctphdr));
9533 len = chk->send_size;
9534 } else {
9535 /* no truncation needed */
9536 drp->ch.chunk_flags = 0;
9537 drp->trunc_len = htons(0);
9538 }
9539 if (bad_crc) {
9540 drp->ch.chunk_flags |= SCTP_BADCRC;
9541 }
9542 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
9543 SCTP_BUF_LEN(chk->data) = chk->send_size;
9544 chk->sent = SCTP_DATAGRAM_UNSENT;
9545 chk->snd_count = 0;
9546 if (net) {
9547 /* we should hit here */
9548 chk->whoTo = net;
9549 } else {
9550 chk->whoTo = asoc->primary_destination;
9551 }
9552 atomic_add_int(&chk->whoTo->ref_count, 1);
9553 chk->rec.chunk_id.id = SCTP_PACKET_DROPPED;
9554 chk->rec.chunk_id.can_take_data = 1;
9555 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
9556 drp->ch.chunk_length = htons(chk->send_size);
9557 spc = stcb->sctp_socket->so_rcv.sb_hiwat;
9558 if (spc < 0) {
9559 spc = 0;
9560 }
9561 drp->bottle_bw = htonl(spc);
9562 if (asoc->my_rwnd) {
9563 drp->current_onq = htonl(asoc->size_on_reasm_queue +
9564 asoc->size_on_all_streams +
9565 asoc->my_rwnd_control_len +
9566 stcb->sctp_socket->so_rcv.sb_cc);
9567 } else {
9568 /*
9569 * If my rwnd is 0, possibly from mbuf depletion as well as
9570 * space used, tell the peer there is NO space aka onq == bw
9571 */
9572 drp->current_onq = htonl(spc);
9573 }
9574 drp->reserved = 0;
9575 datap = drp->data;
9576 m_copydata(m, iphlen, len, (caddr_t)datap);
9577 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
9578 asoc->ctrl_queue_cnt++;
9579}
9580
9581void
9582sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn)
9583{
9584 struct sctp_association *asoc;
9585 struct sctp_cwr_chunk *cwr;
9586 struct sctp_tmit_chunk *chk;
9587
9588 asoc = &stcb->asoc;
9589 SCTP_TCB_LOCK_ASSERT(stcb);
9590 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
9591 if (chk->rec.chunk_id.id == SCTP_ECN_CWR) {
9592 /* found a previous ECN_CWR update it if needed */
9593 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
9594 if (compare_with_wrap(high_tsn, ntohl(cwr->tsn),
9595 MAX_TSN)) {
9596 cwr->tsn = htonl(high_tsn);
9597 }
9598 return;
9599 }
9600 }
9601 /* nope could not find one to update so we must build one */
9602 sctp_alloc_a_chunk(stcb, chk);
9603 if (chk == NULL) {
9604 return;
9605 }
9606 chk->copy_by_ref = 0;
9607 chk->rec.chunk_id.id = SCTP_ECN_CWR;
9608 chk->rec.chunk_id.can_take_data = 1;
9609 chk->asoc = &stcb->asoc;
9610 chk->send_size = sizeof(struct sctp_cwr_chunk);
9611 chk->data = sctp_get_mbuf_for_msg(chk->send_size, 0, M_DONTWAIT, 1, MT_HEADER);
9612 if (chk->data == NULL) {
9613 sctp_free_a_chunk(stcb, chk);
9614 return;
9615 }
9616 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
9617 SCTP_BUF_LEN(chk->data) = chk->send_size;
9618 chk->sent = SCTP_DATAGRAM_UNSENT;
9619 chk->snd_count = 0;
9620 chk->whoTo = net;
9621 atomic_add_int(&chk->whoTo->ref_count, 1);
9622 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
9623 cwr->ch.chunk_type = SCTP_ECN_CWR;
9624 cwr->ch.chunk_flags = 0;
9625 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
9626 cwr->tsn = htonl(high_tsn);
9627 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
9628 asoc->ctrl_queue_cnt++;
9629}
9630
9631void
9632sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
9633 int number_entries, uint16_t * list,
9634 uint32_t seq, uint32_t resp_seq, uint32_t last_sent)
9635{
9636 int len, old_len, i;
9637 struct sctp_stream_reset_out_request *req_out;
9638 struct sctp_chunkhdr *ch;
9639
9640 ch = mtod(chk->data, struct sctp_chunkhdr *);
9641
9642
9643 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
9644
9645 /* get to new offset for the param. */
9646 req_out = (struct sctp_stream_reset_out_request *)((caddr_t)ch + len);
9647 /* now how long will this param be? */
9648 len = (sizeof(struct sctp_stream_reset_out_request) + (sizeof(uint16_t) * number_entries));
9649 req_out->ph.param_type = htons(SCTP_STR_RESET_OUT_REQUEST);
9650 req_out->ph.param_length = htons(len);
9651 req_out->request_seq = htonl(seq);
9652 req_out->response_seq = htonl(resp_seq);
9653 req_out->send_reset_at_tsn = htonl(last_sent);
9654 if (number_entries) {
9655 for (i = 0; i < number_entries; i++) {
9656 req_out->list_of_streams[i] = htons(list[i]);
9657 }
9658 }
9659 if (SCTP_SIZE32(len) > len) {
9660 /*
9661 * Need to worry about the pad we may end up adding to the
9662 * end. This is easy since the struct is either aligned to 4
9663 * bytes or 2 bytes off.
9664 */
9665 req_out->list_of_streams[number_entries] = 0;
9666 }
9667 /* now fix the chunk length */
9668 ch->chunk_length = htons(len + old_len);
9669 chk->book_size = len + old_len;
9670 chk->book_size_scale = 0;
9671 chk->send_size = SCTP_SIZE32(chk->book_size);
9672 SCTP_BUF_LEN(chk->data) = chk->send_size;
9673 return;
9674}
9675
9676
9677void
9678sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
9679 int number_entries, uint16_t * list,
9680 uint32_t seq)
9681{
9682 int len, old_len, i;
9683 struct sctp_stream_reset_in_request *req_in;
9684 struct sctp_chunkhdr *ch;
9685
9686 ch = mtod(chk->data, struct sctp_chunkhdr *);
9687
9688
9689 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
9690
9691 /* get to new offset for the param. */
9692 req_in = (struct sctp_stream_reset_in_request *)((caddr_t)ch + len);
9693 /* now how long will this param be? */
9694 len = (sizeof(struct sctp_stream_reset_in_request) + (sizeof(uint16_t) * number_entries));
9695 req_in->ph.param_type = htons(SCTP_STR_RESET_IN_REQUEST);
9696 req_in->ph.param_length = htons(len);
9697 req_in->request_seq = htonl(seq);
9698 if (number_entries) {
9699 for (i = 0; i < number_entries; i++) {
9700 req_in->list_of_streams[i] = htons(list[i]);
9701 }
9702 }
9703 if (SCTP_SIZE32(len) > len) {
9704 /*
9705 * Need to worry about the pad we may end up adding to the
9706 * end. This is easy since the struct is either aligned to 4
9707 * bytes or 2 bytes off.
9708 */
9709 req_in->list_of_streams[number_entries] = 0;
9710 }
9711 /* now fix the chunk length */
9712 ch->chunk_length = htons(len + old_len);
9713 chk->book_size = len + old_len;
9714 chk->book_size_scale = 0;
9715 chk->send_size = SCTP_SIZE32(chk->book_size);
9716 SCTP_BUF_LEN(chk->data) = chk->send_size;
9717 return;
9718}
9719
9720
9721void
9722sctp_add_stream_reset_tsn(struct sctp_tmit_chunk *chk,
9723 uint32_t seq)
9724{
9725 int len, old_len;
9726 struct sctp_stream_reset_tsn_request *req_tsn;
9727 struct sctp_chunkhdr *ch;
9728
9729 ch = mtod(chk->data, struct sctp_chunkhdr *);
9730
9731
9732 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
9733
9734 /* get to new offset for the param. */
9735 req_tsn = (struct sctp_stream_reset_tsn_request *)((caddr_t)ch + len);
9736 /* now how long will this param be? */
9737 len = sizeof(struct sctp_stream_reset_tsn_request);
9738 req_tsn->ph.param_type = htons(SCTP_STR_RESET_TSN_REQUEST);
9739 req_tsn->ph.param_length = htons(len);
9740 req_tsn->request_seq = htonl(seq);
9741
9742 /* now fix the chunk length */
9743 ch->chunk_length = htons(len + old_len);
9744 chk->send_size = len + old_len;
9745 chk->book_size = SCTP_SIZE32(chk->send_size);
9746 chk->book_size_scale = 0;
9747 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size);
9748 return;
9749}
9750
9751void
9752sctp_add_stream_reset_result(struct sctp_tmit_chunk *chk,
9753 uint32_t resp_seq, uint32_t result)
9754{
9755 int len, old_len;
9756 struct sctp_stream_reset_response *resp;
9757 struct sctp_chunkhdr *ch;
9758
9759 ch = mtod(chk->data, struct sctp_chunkhdr *);
9760
9761
9762 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
9763
9764 /* get to new offset for the param. */
9765 resp = (struct sctp_stream_reset_response *)((caddr_t)ch + len);
9766 /* now how long will this param be? */
9767 len = sizeof(struct sctp_stream_reset_response);
9768 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
9769 resp->ph.param_length = htons(len);
9770 resp->response_seq = htonl(resp_seq);
9771 resp->result = ntohl(result);
9772
9773 /* now fix the chunk length */
9774 ch->chunk_length = htons(len + old_len);
9775 chk->book_size = len + old_len;
9776 chk->book_size_scale = 0;
9777 chk->send_size = SCTP_SIZE32(chk->book_size);
9778 SCTP_BUF_LEN(chk->data) = chk->send_size;
9779 return;
9780
9781}
9782
9783
9784void
9785sctp_add_stream_reset_result_tsn(struct sctp_tmit_chunk *chk,
9786 uint32_t resp_seq, uint32_t result,
9787 uint32_t send_una, uint32_t recv_next)
9788{
9789 int len, old_len;
9790 struct sctp_stream_reset_response_tsn *resp;
9791 struct sctp_chunkhdr *ch;
9792
9793 ch = mtod(chk->data, struct sctp_chunkhdr *);
9794
9795
9796 old_len = len = SCTP_SIZE32(ntohs(ch->chunk_length));
9797
9798 /* get to new offset for the param. */
9799 resp = (struct sctp_stream_reset_response_tsn *)((caddr_t)ch + len);
9800 /* now how long will this param be? */
9801 len = sizeof(struct sctp_stream_reset_response_tsn);
9802 resp->ph.param_type = htons(SCTP_STR_RESET_RESPONSE);
9803 resp->ph.param_length = htons(len);
9804 resp->response_seq = htonl(resp_seq);
9805 resp->result = htonl(result);
9806 resp->senders_next_tsn = htonl(send_una);
9807 resp->receivers_next_tsn = htonl(recv_next);
9808
9809 /* now fix the chunk length */
9810 ch->chunk_length = htons(len + old_len);
9811 chk->book_size = len + old_len;
9812 chk->send_size = SCTP_SIZE32(chk->book_size);
9813 chk->book_size_scale = 0;
9814 SCTP_BUF_LEN(chk->data) = chk->send_size;
9815 return;
9816}
9817
9818
9819int
9820sctp_send_str_reset_req(struct sctp_tcb *stcb,
9821 int number_entries, uint16_t * list,
9822 uint8_t send_out_req, uint32_t resp_seq,
9823 uint8_t send_in_req,
9824 uint8_t send_tsn_req)
9825{
9826
9827 struct sctp_association *asoc;
9828 struct sctp_tmit_chunk *chk;
9829 struct sctp_chunkhdr *ch;
9830 uint32_t seq;
9831
9832 asoc = &stcb->asoc;
9833 if (asoc->stream_reset_outstanding) {
9834 /*
9835 * Already one pending, must get ACK back to clear the flag.
9836 */
9837 return (EBUSY);
9838 }
9839 if ((send_out_req == 0) && (send_in_req == 0) && (send_tsn_req == 0)) {
9840 /* nothing to do */
9841 return (EINVAL);
9842 }
9843 if (send_tsn_req && (send_out_req || send_in_req)) {
9844 /* error, can't do that */
9845 return (EINVAL);
9846 }
9847 sctp_alloc_a_chunk(stcb, chk);
9848 if (chk == NULL) {
9849 return (ENOMEM);
9850 }
9851 chk->copy_by_ref = 0;
9852 chk->rec.chunk_id.id = SCTP_STREAM_RESET;
9853 chk->rec.chunk_id.can_take_data = 0;
9854 chk->asoc = &stcb->asoc;
9855 chk->book_size = sizeof(struct sctp_chunkhdr);
9856 chk->send_size = SCTP_SIZE32(chk->book_size);
9857 chk->book_size_scale = 0;
9858
9859 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
9860 if (chk->data == NULL) {
9861 sctp_free_a_chunk(stcb, chk);
9862 return (ENOMEM);
9863 }
9864 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD);
9865
9866 /* setup chunk parameters */
9867 chk->sent = SCTP_DATAGRAM_UNSENT;
9868 chk->snd_count = 0;
9869 chk->whoTo = asoc->primary_destination;
9870 atomic_add_int(&chk->whoTo->ref_count, 1);
9871
9872 ch = mtod(chk->data, struct sctp_chunkhdr *);
9873 ch->chunk_type = SCTP_STREAM_RESET;
9874 ch->chunk_flags = 0;
9875 ch->chunk_length = htons(chk->book_size);
9876 SCTP_BUF_LEN(chk->data) = chk->send_size;
9877
9878 seq = stcb->asoc.str_reset_seq_out;
9879 if (send_out_req) {
9880 sctp_add_stream_reset_out(chk, number_entries, list,
9881 seq, resp_seq, (stcb->asoc.sending_seq - 1));
9882 asoc->stream_reset_out_is_outstanding = 1;
9883 seq++;
9884 asoc->stream_reset_outstanding++;
9885 }
9886 if (send_in_req) {
9887 sctp_add_stream_reset_in(chk, number_entries, list, seq);
9888 asoc->stream_reset_outstanding++;
9889 }
9890 if (send_tsn_req) {
9891 sctp_add_stream_reset_tsn(chk, seq);
9892 asoc->stream_reset_outstanding++;
9893 }
9894 asoc->str_reset = chk;
9895
9896 /* insert the chunk for sending */
9897 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
9898 chk,
9899 sctp_next);
9900 asoc->ctrl_queue_cnt++;
9901 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
9902 return (0);
9903}
9904
9905void
9906sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
9907 struct mbuf *err_cause)
9908{
9909 /*
9910 * Formulate the abort message, and send it back down.
9911 */
9912 struct mbuf *o_pak;
9913 struct mbuf *mout;
9914 struct sctp_abort_msg *abm;
9915 struct ip *iph, *iph_out;
9916 struct ip6_hdr *ip6, *ip6_out;
9917 int iphlen_out;
9918
9919 /* don't respond to ABORT with ABORT */
9920 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
9921 if (err_cause)
9922 sctp_m_freem(err_cause);
9923 return;
9924 }
9925 o_pak = SCTP_GET_HEADER_FOR_OUTPUT((sizeof(struct ip6_hdr) + sizeof(struct sctp_abort_msg)));
9926 if (o_pak == NULL) {
9927 if (err_cause)
9928 sctp_m_freem(err_cause);
9929 return;
9930 }
9931 mout = SCTP_HEADER_TO_CHAIN(o_pak);
9932 iph = mtod(m, struct ip *);
9933 iph_out = NULL;
9934 ip6_out = NULL;
9935 if (iph->ip_v == IPVERSION) {
9936 iph_out = mtod(mout, struct ip *);
9937 SCTP_BUF_LEN(mout) = sizeof(*iph_out) + sizeof(*abm);
9938 SCTP_BUF_NEXT(mout) = err_cause;
9939
9940 /* Fill in the IP header for the ABORT */
9941 iph_out->ip_v = IPVERSION;
9942 iph_out->ip_hl = (sizeof(struct ip) / 4);
9943 iph_out->ip_tos = (u_char)0;
9944 iph_out->ip_id = 0;
9945 iph_out->ip_off = 0;
9946 iph_out->ip_ttl = MAXTTL;
9947 iph_out->ip_p = IPPROTO_SCTP;
9948 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
9949 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
9950 /* let IP layer calculate this */
9951 iph_out->ip_sum = 0;
9952
9953 iphlen_out = sizeof(*iph_out);
9954 abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out);
9955 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
9956 ip6 = (struct ip6_hdr *)iph;
9957 ip6_out = mtod(mout, struct ip6_hdr *);
9958 SCTP_BUF_LEN(mout) = sizeof(*ip6_out) + sizeof(*abm);
9959 SCTP_BUF_NEXT(mout) = err_cause;
9960
9961 /* Fill in the IP6 header for the ABORT */
9962 ip6_out->ip6_flow = ip6->ip6_flow;
9963 ip6_out->ip6_hlim = ip6_defhlim;
9964 ip6_out->ip6_nxt = IPPROTO_SCTP;
9965 ip6_out->ip6_src = ip6->ip6_dst;
9966 ip6_out->ip6_dst = ip6->ip6_src;
9967
9968 iphlen_out = sizeof(*ip6_out);
9969 abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out);
9970 } else {
9971 /* Currently not supported */
9972 return;
9973 }
9974
9975 abm->sh.src_port = sh->dest_port;
9976 abm->sh.dest_port = sh->src_port;
9977 abm->sh.checksum = 0;
9978 if (vtag == 0) {
9979 abm->sh.v_tag = sh->v_tag;
9980 abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB;
9981 } else {
9982 abm->sh.v_tag = htonl(vtag);
9983 abm->msg.ch.chunk_flags = 0;
9984 }
9985 abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION;
9986
9987 if (err_cause) {
9988 struct mbuf *m_tmp = err_cause;
9989 int err_len = 0;
9990
9991 /* get length of the err_cause chain */
9992 while (m_tmp != NULL) {
9993 err_len += SCTP_BUF_LEN(m_tmp);
9994 m_tmp = SCTP_BUF_NEXT(m_tmp);
9995 }
9996 SCTP_HEADER_LEN(o_pak) = SCTP_BUF_LEN(mout) + err_len;
9997 if (err_len % 4) {
9998 /* need pad at end of chunk */
9999 uint32_t cpthis = 0;
10000 int padlen;
10001
10002 padlen = 4 - (SCTP_HEADER_LEN(o_pak) % 4);
10003 m_copyback(mout, SCTP_HEADER_LEN(o_pak), padlen, (caddr_t)&cpthis);
10004 SCTP_HEADER_LEN(o_pak) += padlen;
10005 }
10006 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len);
10007 } else {
10008 SCTP_HEADER_LEN(mout) = SCTP_BUF_LEN(mout);
10009 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch));
10010 }
10011
10012 /* add checksum */
10013 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(m)) {
10014 abm->sh.checksum = 0;
10015 } else {
10016 abm->sh.checksum = sctp_calculate_sum(mout, NULL, iphlen_out);
10017 }
10018 if (iph_out != NULL) {
10019 struct route ro;
10020
10021 /* zap the stack pointer to the route */
10022 bzero(&ro, sizeof ro);
10023#ifdef SCTP_DEBUG
10024 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
10025 printf("sctp_send_abort calling ip_output:\n");
10026 sctp_print_address_pkt(iph_out, &abm->sh);
10027 }
10028#endif
10029 /* set IPv4 length */
10030 iph_out->ip_len = SCTP_HEADER_LEN(o_pak);
10031 /* out it goes */
10032 (void)ip_output(o_pak, 0, &ro, IP_RAWOUTPUT, NULL
10033 ,NULL
10034 );
10035 /* Free the route if we got one back */
10036 if (ro.ro_rt)
10037 RTFREE(ro.ro_rt);
10038 } else if (ip6_out != NULL) {
10039 struct route_in6 ro;
10040
10041 /* zap the stack pointer to the route */
10042 bzero(&ro, sizeof(ro));
10043#ifdef SCTP_DEBUG
10044 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
10045 printf("sctp_send_abort calling ip6_output:\n");
10046 sctp_print_address_pkt((struct ip *)ip6_out, &abm->sh);
10047 }
10048#endif
10049 ip6_out->ip6_plen = SCTP_HEADER_LEN(o_pak) - sizeof(*ip6_out);
10050 ip6_output(o_pak, NULL, &ro, 0, NULL, NULL
10051 ,NULL
10052 );
10053 /* Free the route if we got one back */
10054 if (ro.ro_rt)
10055 RTFREE(ro.ro_rt);
10056 }
10057 SCTP_STAT_INCR(sctps_sendpackets);
10058 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
10059}
10060
10061void
10062sctp_send_operr_to(struct mbuf *m, int iphlen,
10063 struct mbuf *scm,
10064 uint32_t vtag)
10065{
10066 struct mbuf *o_pak;
10067 struct sctphdr *ihdr;
10068 int retcode;
10069 struct sctphdr *ohdr;
10070 struct sctp_chunkhdr *ophdr;
10071
10072 struct ip *iph;
10073
10074#ifdef SCTP_DEBUG
10075 struct sockaddr_in6 lsa6, fsa6;
10076
10077#endif
10078 uint32_t val;
10079 struct mbuf *at;
10080 int len;
10081
10082 iph = mtod(m, struct ip *);
10083 ihdr = (struct sctphdr *)((caddr_t)iph + iphlen);
10084
10085 SCTP_BUF_PREPEND(scm, (sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr)), M_DONTWAIT);
10086 if (scm == NULL) {
10087 /* can't send because we can't add a mbuf */
10088 return;
10089 }
10090 ohdr = mtod(scm, struct sctphdr *);
10091 ohdr->src_port = ihdr->dest_port;
10092 ohdr->dest_port = ihdr->src_port;
10093 ohdr->v_tag = vtag;
10094 ohdr->checksum = 0;
10095 ophdr = (struct sctp_chunkhdr *)(ohdr + 1);
10096 ophdr->chunk_type = SCTP_OPERATION_ERROR;
10097 ophdr->chunk_flags = 0;
10098 len = 0;
10099 at = scm;
10100 while (at) {
10101 len += SCTP_BUF_LEN(at);
10102 at = SCTP_BUF_NEXT(at);
10103 }
10104
10105 ophdr->chunk_length = htons(len - sizeof(struct sctphdr));
10106 if (len % 4) {
10107 /* need padding */
10108 uint32_t cpthis = 0;
10109 int padlen;
10110
10111 padlen = 4 - (len % 4);
10112 m_copyback(scm, len, padlen, (caddr_t)&cpthis);
10113 len += padlen;
10114 }
10115 if ((sctp_no_csum_on_loopback) && SCTP_IS_IT_LOOPBACK(m)) {
10116 val = 0;
10117 } else {
10118 val = sctp_calculate_sum(scm, NULL, 0);
10119 }
10120 ohdr->checksum = val;
10121 if (iph->ip_v == IPVERSION) {
10122 /* V4 */
10123 struct ip *out;
10124 struct route ro;
10125
10126 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip));
10127 if (o_pak == NULL) {
10128 sctp_m_freem(scm);
10129 return;
10130 }
10131 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip);
10132 len += sizeof(struct ip);
10133 SCTP_ATTACH_CHAIN(o_pak, scm, len);
10134 bzero(&ro, sizeof ro);
10135 out = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip *);
10136 out->ip_v = iph->ip_v;
10137 out->ip_hl = (sizeof(struct ip) / 4);
10138 out->ip_tos = iph->ip_tos;
10139 out->ip_id = iph->ip_id;
10140 out->ip_off = 0;
10141 out->ip_ttl = MAXTTL;
10142 out->ip_p = IPPROTO_SCTP;
10143 out->ip_sum = 0;
10144 out->ip_src = iph->ip_dst;
10145 out->ip_dst = iph->ip_src;
10146 out->ip_len = SCTP_HEADER_LEN(o_pak);
10147 retcode = ip_output(o_pak, 0, &ro, IP_RAWOUTPUT, NULL
10148 ,NULL
10149 );
10150 SCTP_STAT_INCR(sctps_sendpackets);
10151 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
10152 /* Free the route if we got one back */
10153 if (ro.ro_rt)
10154 RTFREE(ro.ro_rt);
10155 } else {
10156 /* V6 */
10157 struct route_in6 ro;
10158 struct ip6_hdr *out6, *in6;
10159
10160 o_pak = SCTP_GET_HEADER_FOR_OUTPUT(sizeof(struct ip6_hdr));
10161 if (o_pak == NULL) {
10162 sctp_m_freem(scm);
10163 return;
10164 }
10165 SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip6_hdr);
10166 len += sizeof(struct ip6_hdr);
10167 SCTP_ATTACH_CHAIN(o_pak, scm, len);
10168
10169 bzero(&ro, sizeof ro);
10170 in6 = mtod(m, struct ip6_hdr *);
10171 out6 = mtod(SCTP_HEADER_TO_CHAIN(o_pak), struct ip6_hdr *);
10172 out6->ip6_flow = in6->ip6_flow;
10173 out6->ip6_hlim = ip6_defhlim;
10174 out6->ip6_nxt = IPPROTO_SCTP;
10175 out6->ip6_src = in6->ip6_dst;
10176 out6->ip6_dst = in6->ip6_src;
10177 out6->ip6_plen = len - sizeof(struct ip6_hdr);
10178#ifdef SCTP_DEBUG
10179 bzero(&lsa6, sizeof(lsa6));
10180 lsa6.sin6_len = sizeof(lsa6);
10181 lsa6.sin6_family = AF_INET6;
10182 lsa6.sin6_addr = out6->ip6_src;
10183 bzero(&fsa6, sizeof(fsa6));
10184 fsa6.sin6_len = sizeof(fsa6);
10185 fsa6.sin6_family = AF_INET6;
10186 fsa6.sin6_addr = out6->ip6_dst;
10187 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
10188 printf("sctp_operr_to calling ipv6 output:\n");
10189 printf("src: ");
10190 sctp_print_address((struct sockaddr *)&lsa6);
10191 printf("dst ");
10192 sctp_print_address((struct sockaddr *)&fsa6);
10193 }
10194#endif /* SCTP_DEBUG */
10195 ip6_output(o_pak, NULL, &ro, 0, NULL, NULL
10196 ,NULL
10197 );
10198 SCTP_STAT_INCR(sctps_sendpackets);
10199 SCTP_STAT_INCR_COUNTER64(sctps_outpackets);
10200 /* Free the route if we got one back */
10201 if (ro.ro_rt)
10202 RTFREE(ro.ro_rt);
10203 }
10204}
10205
10206
10207
10208static struct mbuf *
10209sctp_copy_resume(struct sctp_stream_queue_pending *sp,
10210 struct uio *uio,
10211 struct sctp_sndrcvinfo *srcv,
10212 int max_send_len,
10213 int user_marks_eor,
10214 int *error,
10215 uint32_t * sndout,
10216 struct mbuf **new_tail)
10217{
10218 int left, cancpy, willcpy;
10219 struct mbuf *m, *prev, *head;
10220
10221 left = min(uio->uio_resid, max_send_len);
10222 /* Always get a header just in case */
10223 head = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA);
10224 cancpy = M_TRAILINGSPACE(head);
10225 willcpy = min(cancpy, left);
10226 *error = uiomove(mtod(head, caddr_t), willcpy, uio);
10227 if (*error) {
10228 sctp_m_freem(head);
10229 return (NULL);
10230 }
10231 *sndout += willcpy;
10232 left -= willcpy;
10233 SCTP_BUF_LEN(head) = willcpy;
10234 m = head;
10235 *new_tail = head;
10236 while (left > 0) {
10237 /* move in user data */
10238 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA);
10239 if (SCTP_BUF_NEXT(m) == NULL) {
10240 sctp_m_freem(head);
10241 *new_tail = NULL;
10242 *error = ENOMEM;
10243 return (NULL);
10244 }
10245 prev = m;
10246 m = SCTP_BUF_NEXT(m);
10247 cancpy = M_TRAILINGSPACE(m);
10248 willcpy = min(cancpy, left);
10249 *error = uiomove(mtod(m, caddr_t), willcpy, uio);
10250 if (*error) {
10251 sctp_m_freem(head);
10252 *new_tail = NULL;
10253 *error = EFAULT;
10254 return (NULL);
10255 }
10256 SCTP_BUF_LEN(m) = willcpy;
10257 left -= willcpy;
10258 *sndout += willcpy;
10259 *new_tail = m;
10260 if (left == 0) {
10261 SCTP_BUF_NEXT(m) = NULL;
10262 }
10263 }
10264 return (head);
10265}
10266
10267static int
10268sctp_copy_one(struct sctp_stream_queue_pending *sp,
10269 struct uio *uio,
10270 int resv_upfront)
10271{
10272 int left, cancpy, willcpy, error;
10273 struct mbuf *m, *head;
10274 int cpsz = 0;
10275
10276 /* First one gets a header */
10277 left = sp->length;
10278 head = m = sctp_get_mbuf_for_msg((left + resv_upfront), 0, M_WAIT, 0, MT_DATA);
10279 if (m == NULL) {
10280 return (ENOMEM);
10281 }
10282 /*
10283 * Add this one for m in now, that way if the alloc fails we won't
10284 * have a bad cnt.
10285 */
10286 SCTP_BUF_RESV_UF(m, resv_upfront);
10287 cancpy = M_TRAILINGSPACE(m);
10288 willcpy = min(cancpy, left);
10289 while (left > 0) {
10290 /* move in user data */
10291 error = uiomove(mtod(m, caddr_t), willcpy, uio);
10292 if (error) {
10293 sctp_m_freem(head);
10294 return (error);
10295 }
10296 SCTP_BUF_LEN(m) = willcpy;
10297 left -= willcpy;
10298 cpsz += willcpy;
10299 if (left > 0) {
10300 SCTP_BUF_NEXT(m) = sctp_get_mbuf_for_msg(left, 0, M_WAIT, 0, MT_DATA);
10301 if (SCTP_BUF_NEXT(m) == NULL) {
10302 /*
10303 * the head goes back to caller, he can free
10304 * the rest
10305 */
10306 sctp_m_freem(head);
10307 return (ENOMEM);
10308 }
10309 m = SCTP_BUF_NEXT(m);
10310 cancpy = M_TRAILINGSPACE(m);
10311 willcpy = min(cancpy, left);
10312 } else {
10313 sp->tail_mbuf = m;
10314 SCTP_BUF_NEXT(m) = NULL;
10315 }
10316 }
10317 sp->data = head;
10318 sp->length = cpsz;
10319 return (0);
10320}
10321
10322
10323
10324static struct sctp_stream_queue_pending *
10325sctp_copy_it_in(struct sctp_tcb *stcb,
10326 struct sctp_association *asoc,
10327 struct sctp_sndrcvinfo *srcv,
10328 struct uio *uio,
10329 struct sctp_nets *net,
10330 int max_send_len,
10331 int user_marks_eor,
10332 int *errno,
10333 int non_blocking)
10334{
10335 /*
10336 * This routine must be very careful in its work. Protocol
10337 * processing is up and running so care must be taken to spl...()
10338 * when you need to do something that may effect the stcb/asoc. The
10339 * sb is locked however. When data is copied the protocol processing
10340 * should be enabled since this is a slower operation...
10341 */
10342 struct sctp_stream_queue_pending *sp = NULL;
10343 int resv_in_first;
10344
10345 *errno = 0;
10346 /*
10347 * Unless E_EOR mode is on, we must make a send FIT in one call.
10348 */
10349 if (((user_marks_eor == 0) && non_blocking) &&
10350 (uio->uio_resid > stcb->sctp_socket->so_snd.sb_hiwat)) {
10351 /* It will NEVER fit */
10352 *errno = EMSGSIZE;
10353 goto out_now;
10354 }
10355 /* Now can we send this? */
10356 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
10357 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
10358 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
10359 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
10360 /* got data while shutting down */
10361 *errno = ECONNRESET;
10362 goto out_now;
10363 }
10364 sp = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_strmoq, struct sctp_stream_queue_pending);
10365 if (sp == NULL) {
10366 *errno = ENOMEM;
10367 goto out_now;
10368 }
10369 SCTP_INCR_STRMOQ_COUNT();
10370 sp->act_flags = 0;
10371 sp->sinfo_flags = srcv->sinfo_flags;
10372 sp->timetolive = srcv->sinfo_timetolive;
10373 sp->ppid = srcv->sinfo_ppid;
10374 sp->context = srcv->sinfo_context;
10375 sp->strseq = 0;
10376 SCTP_GETTIME_TIMEVAL(&sp->ts);
10377
10378 sp->stream = srcv->sinfo_stream;
10379 sp->length = min(uio->uio_resid, max_send_len);
10380 if ((sp->length == uio->uio_resid) &&
10381 ((user_marks_eor == 0) ||
10382 (srcv->sinfo_flags & SCTP_EOF) ||
10383 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))
10384 ) {
10385 sp->msg_is_complete = 1;
10386 } else {
10387 sp->msg_is_complete = 0;
10388 }
10389 sp->some_taken = 0;
10390 resv_in_first = sizeof(struct sctp_data_chunk);
10391 sp->data = sp->tail_mbuf = NULL;
10392 *errno = sctp_copy_one(sp, uio, resv_in_first);
10393 if (*errno) {
10394 sctp_free_a_strmoq(stcb, sp);
10395 sp->data = NULL;
10396 sp->net = NULL;
10397 sp = NULL;
10398 } else {
10399 if (sp->sinfo_flags & SCTP_ADDR_OVER) {
10400 sp->net = net;
10401 sp->addr_over = 1;
10402 } else {
10403 sp->net = asoc->primary_destination;
10404 sp->addr_over = 0;
10405 }
10406 atomic_add_int(&sp->net->ref_count, 1);
10407 sctp_set_prsctp_policy(stcb, sp);
10408 }
10409out_now:
10410 return (sp);
10411}
10412
10413
10414int
10415sctp_sosend(struct socket *so,
10416 struct sockaddr *addr,
10417 struct uio *uio,
10418 struct mbuf *top,
10419 struct mbuf *control,
10420 int flags
10421 ,
10422 struct thread *p
10423)
10424{
10425 struct sctp_inpcb *inp;
10426 int error, use_rcvinfo = 0;
10427 struct sctp_sndrcvinfo srcv;
10428
10429 inp = (struct sctp_inpcb *)so->so_pcb;
10430 if (control) {
10431 /* process cmsg snd/rcv info (maybe a assoc-id) */
10432 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control,
10433 sizeof(srcv))) {
10434 /* got one */
10435 use_rcvinfo = 1;
10436 }
10437 }
10438 error = sctp_lower_sosend(so, addr, uio, top, control, flags,
10439 use_rcvinfo, &srcv, p);
10440 return (error);
10441}
10442
10443
9217extern unsigned int sctp_add_more_threshold;
9218int
9219sctp_lower_sosend(struct socket *so,
9220 struct sockaddr *addr,
9221 struct uio *uio,
9222 struct mbuf *i_pak,
9223 struct mbuf *control,
9224 int flags,
9225 int use_rcvinfo,
9226 struct sctp_sndrcvinfo *srcv,
9227 struct thread *p
9228)
9229{
9230 unsigned int sndlen, max_len;
9231 int error, len;
9232 struct mbuf *top = NULL;
9233
9234#if defined(__NetBSD__) || defined(__OpenBSD_)
9235 int s;
9236
9237#endif
9238 int queue_only = 0, queue_only_for_init = 0;
9239 int free_cnt_applied = 0;
9240 int un_sent = 0;
9241 int now_filled = 0;
9242 struct sctp_block_entry be;
9243 struct sctp_inpcb *inp;
9244 struct sctp_tcb *stcb = NULL;
9245 struct timeval now;
9246 struct sctp_nets *net;
9247 struct sctp_association *asoc;
9248 struct sctp_inpcb *t_inp;
9249 int create_lock_applied = 0;
9250 int nagle_applies = 0;
9251 int some_on_control = 0;
9252 int got_all_of_the_send = 0;
9253 int hold_tcblock = 0;
9254 int non_blocking = 0;
9255
9256 error = 0;
9257 net = NULL;
9258 stcb = NULL;
9259 asoc = NULL;
9260 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
9261 if (inp == NULL) {
9262 error = EFAULT;
9263 goto out_unlocked;
9264 }
9265 atomic_add_int(&inp->total_sends, 1);
9266 if (uio)
9267 sndlen = uio->uio_resid;
9268 else {
9269 sndlen = SCTP_HEADER_LEN(i_pak);
9270 top = SCTP_HEADER_TO_CHAIN(i_pak);
9271 }
9272
9273 hold_tcblock = 0;
9274
9275 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
9276 (inp->sctp_socket->so_qlimit)) {
9277 /* The listener can NOT send */
9278 error = EFAULT;
9279 goto out_unlocked;
9280 }
9281 if ((use_rcvinfo) && srcv) {
9282 if (INVALID_SINFO_FLAG(srcv->sinfo_flags) || PR_SCTP_INVALID_POLICY(srcv->sinfo_flags)) {
9283 error = EINVAL;
9284 goto out_unlocked;
9285 }
9286 if (srcv->sinfo_flags)
9287 SCTP_STAT_INCR(sctps_sends_with_flags);
9288
9289 if (srcv->sinfo_flags & SCTP_SENDALL) {
9290 /* its a sendall */
9291 error = sctp_sendall(inp, uio, top, srcv);
9292 top = NULL;
9293 goto out_unlocked;
9294 }
9295 }
9296 /* now we must find the assoc */
9297 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
9298 SCTP_INP_RLOCK(inp);
9299 stcb = LIST_FIRST(&inp->sctp_asoc_list);
9300 if (stcb == NULL) {
9301 SCTP_INP_RUNLOCK(inp);
9302 error = ENOTCONN;
9303 goto out_unlocked;
9304 }
9305 hold_tcblock = 0;
9306 SCTP_INP_RUNLOCK(inp);
9307 if (addr)
9308 /* Must locate the net structure if addr given */
9309 net = sctp_findnet(stcb, addr);
9310 else
9311 net = stcb->asoc.primary_destination;
9312
9313 } else if (use_rcvinfo && srcv && srcv->sinfo_assoc_id) {
9314 stcb = sctp_findassociation_ep_asocid(inp, srcv->sinfo_assoc_id, 0);
9315 if (stcb) {
9316 if (addr)
9317 /*
9318 * Must locate the net structure if addr
9319 * given
9320 */
9321 net = sctp_findnet(stcb, addr);
9322 else
9323 net = stcb->asoc.primary_destination;
9324 }
9325 hold_tcblock = 0;
9326 } else if (addr) {
9327 /*
9328 * Since we did not use findep we must increment it, and if
9329 * we don't find a tcb decrement it.
9330 */
9331 SCTP_INP_WLOCK(inp);
9332 SCTP_INP_INCR_REF(inp);
9333 SCTP_INP_WUNLOCK(inp);
9334 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
9335 if (stcb == NULL) {
9336 SCTP_INP_WLOCK(inp);
9337 SCTP_INP_DECR_REF(inp);
9338 SCTP_INP_WUNLOCK(inp);
9339 } else {
9340 hold_tcblock = 1;
9341 }
9342 }
9343 if ((stcb == NULL) && (addr)) {
9344 /* Possible implicit send? */
9345 SCTP_ASOC_CREATE_LOCK(inp);
9346 create_lock_applied = 1;
9347 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
9348 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
9349 /* Should I really unlock ? */
9350 error = EFAULT;
9351 goto out_unlocked;
9352
9353 }
9354 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
9355 (addr->sa_family == AF_INET6)) {
9356 error = EINVAL;
9357 goto out_unlocked;
9358 }
9359 SCTP_INP_WLOCK(inp);
9360 SCTP_INP_INCR_REF(inp);
9361 SCTP_INP_WUNLOCK(inp);
9362 /* With the lock applied look again */
9363 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
9364 if (stcb == NULL) {
9365 SCTP_INP_WLOCK(inp);
9366 SCTP_INP_DECR_REF(inp);
9367 SCTP_INP_WUNLOCK(inp);
9368 } else {
9369 hold_tcblock = 1;
9370 }
9371 }
9372 if (stcb == NULL) {
9373 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
9374 error = ENOTCONN;
9375 goto out_unlocked;
9376 } else if (addr == NULL) {
9377 error = ENOENT;
9378 goto out_unlocked;
9379 } else {
9380 /*
9381 * UDP style, we must go ahead and start the INIT
9382 * process
9383 */
10444int
10445sctp_lower_sosend(struct socket *so,
10446 struct sockaddr *addr,
10447 struct uio *uio,
10448 struct mbuf *i_pak,
10449 struct mbuf *control,
10450 int flags,
10451 int use_rcvinfo,
10452 struct sctp_sndrcvinfo *srcv,
10453 struct thread *p
10454)
10455{
10456 unsigned int sndlen, max_len;
10457 int error, len;
10458 struct mbuf *top = NULL;
10459
10460#if defined(__NetBSD__) || defined(__OpenBSD_)
10461 int s;
10462
10463#endif
10464 int queue_only = 0, queue_only_for_init = 0;
10465 int free_cnt_applied = 0;
10466 int un_sent = 0;
10467 int now_filled = 0;
10468 struct sctp_block_entry be;
10469 struct sctp_inpcb *inp;
10470 struct sctp_tcb *stcb = NULL;
10471 struct timeval now;
10472 struct sctp_nets *net;
10473 struct sctp_association *asoc;
10474 struct sctp_inpcb *t_inp;
10475 int create_lock_applied = 0;
10476 int nagle_applies = 0;
10477 int some_on_control = 0;
10478 int got_all_of_the_send = 0;
10479 int hold_tcblock = 0;
10480 int non_blocking = 0;
10481
10482 error = 0;
10483 net = NULL;
10484 stcb = NULL;
10485 asoc = NULL;
10486 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
10487 if (inp == NULL) {
10488 error = EFAULT;
10489 goto out_unlocked;
10490 }
10491 atomic_add_int(&inp->total_sends, 1);
10492 if (uio)
10493 sndlen = uio->uio_resid;
10494 else {
10495 sndlen = SCTP_HEADER_LEN(i_pak);
10496 top = SCTP_HEADER_TO_CHAIN(i_pak);
10497 }
10498
10499 hold_tcblock = 0;
10500
10501 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
10502 (inp->sctp_socket->so_qlimit)) {
10503 /* The listener can NOT send */
10504 error = EFAULT;
10505 goto out_unlocked;
10506 }
10507 if ((use_rcvinfo) && srcv) {
10508 if (INVALID_SINFO_FLAG(srcv->sinfo_flags) || PR_SCTP_INVALID_POLICY(srcv->sinfo_flags)) {
10509 error = EINVAL;
10510 goto out_unlocked;
10511 }
10512 if (srcv->sinfo_flags)
10513 SCTP_STAT_INCR(sctps_sends_with_flags);
10514
10515 if (srcv->sinfo_flags & SCTP_SENDALL) {
10516 /* its a sendall */
10517 error = sctp_sendall(inp, uio, top, srcv);
10518 top = NULL;
10519 goto out_unlocked;
10520 }
10521 }
10522 /* now we must find the assoc */
10523 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
10524 SCTP_INP_RLOCK(inp);
10525 stcb = LIST_FIRST(&inp->sctp_asoc_list);
10526 if (stcb == NULL) {
10527 SCTP_INP_RUNLOCK(inp);
10528 error = ENOTCONN;
10529 goto out_unlocked;
10530 }
10531 hold_tcblock = 0;
10532 SCTP_INP_RUNLOCK(inp);
10533 if (addr)
10534 /* Must locate the net structure if addr given */
10535 net = sctp_findnet(stcb, addr);
10536 else
10537 net = stcb->asoc.primary_destination;
10538
10539 } else if (use_rcvinfo && srcv && srcv->sinfo_assoc_id) {
10540 stcb = sctp_findassociation_ep_asocid(inp, srcv->sinfo_assoc_id, 0);
10541 if (stcb) {
10542 if (addr)
10543 /*
10544 * Must locate the net structure if addr
10545 * given
10546 */
10547 net = sctp_findnet(stcb, addr);
10548 else
10549 net = stcb->asoc.primary_destination;
10550 }
10551 hold_tcblock = 0;
10552 } else if (addr) {
10553 /*
10554 * Since we did not use findep we must increment it, and if
10555 * we don't find a tcb decrement it.
10556 */
10557 SCTP_INP_WLOCK(inp);
10558 SCTP_INP_INCR_REF(inp);
10559 SCTP_INP_WUNLOCK(inp);
10560 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
10561 if (stcb == NULL) {
10562 SCTP_INP_WLOCK(inp);
10563 SCTP_INP_DECR_REF(inp);
10564 SCTP_INP_WUNLOCK(inp);
10565 } else {
10566 hold_tcblock = 1;
10567 }
10568 }
10569 if ((stcb == NULL) && (addr)) {
10570 /* Possible implicit send? */
10571 SCTP_ASOC_CREATE_LOCK(inp);
10572 create_lock_applied = 1;
10573 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
10574 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
10575 /* Should I really unlock ? */
10576 error = EFAULT;
10577 goto out_unlocked;
10578
10579 }
10580 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
10581 (addr->sa_family == AF_INET6)) {
10582 error = EINVAL;
10583 goto out_unlocked;
10584 }
10585 SCTP_INP_WLOCK(inp);
10586 SCTP_INP_INCR_REF(inp);
10587 SCTP_INP_WUNLOCK(inp);
10588 /* With the lock applied look again */
10589 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
10590 if (stcb == NULL) {
10591 SCTP_INP_WLOCK(inp);
10592 SCTP_INP_DECR_REF(inp);
10593 SCTP_INP_WUNLOCK(inp);
10594 } else {
10595 hold_tcblock = 1;
10596 }
10597 }
10598 if (stcb == NULL) {
10599 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
10600 error = ENOTCONN;
10601 goto out_unlocked;
10602 } else if (addr == NULL) {
10603 error = ENOENT;
10604 goto out_unlocked;
10605 } else {
10606 /*
10607 * UDP style, we must go ahead and start the INIT
10608 * process
10609 */
10610 uint32_t vrf;
10611
9384 if ((use_rcvinfo) && (srcv) &&
9385 ((srcv->sinfo_flags & SCTP_ABORT) ||
9386 ((srcv->sinfo_flags & SCTP_EOF) &&
9387 (uio->uio_resid == 0)))) {
9388 /*
9389 * User asks to abort a non-existant assoc,
9390 * or EOF a non-existant assoc with no data
9391 */
9392 error = ENOENT;
9393 goto out_unlocked;
9394 }
9395 /* get an asoc/stcb struct */
10612 if ((use_rcvinfo) && (srcv) &&
10613 ((srcv->sinfo_flags & SCTP_ABORT) ||
10614 ((srcv->sinfo_flags & SCTP_EOF) &&
10615 (uio->uio_resid == 0)))) {
10616 /*
10617 * User asks to abort a non-existant assoc,
10618 * or EOF a non-existant assoc with no data
10619 */
10620 error = ENOENT;
10621 goto out_unlocked;
10622 }
10623 /* get an asoc/stcb struct */
9396 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0);
10624 vrf = SCTP_DEFAULT_VRFID;
10625 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf);
9397 if (stcb == NULL) {
9398 /* Error is setup for us in the call */
9399 goto out_unlocked;
9400 }
9401 if (create_lock_applied) {
9402 SCTP_ASOC_CREATE_UNLOCK(inp);
9403 create_lock_applied = 0;
9404 } else {
9405 printf("Huh-3? create lock should have been on??\n");
9406 }
9407 /*
9408 * Turn on queue only flag to prevent data from
9409 * being sent
9410 */
9411 queue_only = 1;
9412 asoc = &stcb->asoc;
9413 asoc->state = SCTP_STATE_COOKIE_WAIT;
9414 SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
9415
9416 /* initialize authentication params for the assoc */
9417 sctp_initialize_auth_params(inp, stcb);
9418
9419 if (control) {
9420 /*
9421 * see if a init structure exists in cmsg
9422 * headers
9423 */
9424 struct sctp_initmsg initm;
9425 int i;
9426
9427 if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control,
9428 sizeof(initm))) {
9429 /*
9430 * we have an INIT override of the
9431 * default
9432 */
9433 if (initm.sinit_max_attempts)
9434 asoc->max_init_times = initm.sinit_max_attempts;
9435 if (initm.sinit_num_ostreams)
9436 asoc->pre_open_streams = initm.sinit_num_ostreams;
9437 if (initm.sinit_max_instreams)
9438 asoc->max_inbound_streams = initm.sinit_max_instreams;
9439 if (initm.sinit_max_init_timeo)
9440 asoc->initial_init_rto_max = initm.sinit_max_init_timeo;
9441 if (asoc->streamoutcnt < asoc->pre_open_streams) {
9442 /* Default is NOT correct */
9443#ifdef SCTP_DEBUG
9444 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
9445 printf("Ok, defout:%d pre_open:%d\n",
9446 asoc->streamoutcnt, asoc->pre_open_streams);
9447 }
9448#endif
9449 SCTP_FREE(asoc->strmout);
9450 asoc->strmout = NULL;
9451 asoc->streamoutcnt = asoc->pre_open_streams;
9452 /*
9453 * What happens if this
9454 * fails? .. we panic ...
9455 */
9456 {
9457 struct sctp_stream_out *tmp_str;
9458 int had_lock = 0;
9459
9460 if (hold_tcblock) {
9461 had_lock = 1;
9462 SCTP_TCB_UNLOCK(stcb);
9463 }
9464 SCTP_MALLOC(tmp_str,
9465 struct sctp_stream_out *,
9466 asoc->streamoutcnt *
9467 sizeof(struct sctp_stream_out),
9468 "StreamsOut");
9469 if (had_lock) {
9470 SCTP_TCB_LOCK(stcb);
9471 }
9472 if (asoc->strmout == NULL) {
9473 asoc->strmout = tmp_str;
9474 } else {
9475 SCTP_FREE(asoc->strmout);
9476 asoc->strmout = tmp_str;
9477 }
9478 }
9479 for (i = 0; i < asoc->streamoutcnt; i++) {
9480 /*
9481 * inbound side must
9482 * be set to 0xffff,
9483 * also NOTE when we
9484 * get the INIT-ACK
9485 * back (for INIT
9486 * sender) we MUST
9487 * reduce the count
9488 * (streamoutcnt)
9489 * but first check
9490 * if we sent to any
9491 * of the upper
9492 * streams that were
9493 * dropped (if some
9494 * were). Those that
9495 * were dropped must
9496 * be notified to
9497 * the upper layer
9498 * as failed to
9499 * send.
9500 */
9501 asoc->strmout[i].next_sequence_sent = 0x0;
9502 TAILQ_INIT(&asoc->strmout[i].outqueue);
9503 asoc->strmout[i].stream_no = i;
9504 asoc->strmout[i].last_msg_incomplete = 0;
9505 asoc->strmout[i].next_spoke.tqe_next = 0;
9506 asoc->strmout[i].next_spoke.tqe_prev = 0;
9507 }
9508 }
9509 }
9510 }
9511 hold_tcblock = 1;
9512 /* out with the INIT */
9513 queue_only_for_init = 1;
9514 /*
9515 * we may want to dig in after this call and adjust
9516 * the MTU value. It defaulted to 1500 (constant)
9517 * but the ro structure may now have an update and
9518 * thus we may need to change it BEFORE we append
9519 * the message.
9520 */
9521 net = stcb->asoc.primary_destination;
9522 asoc = &stcb->asoc;
9523 }
9524 }
10626 if (stcb == NULL) {
10627 /* Error is setup for us in the call */
10628 goto out_unlocked;
10629 }
10630 if (create_lock_applied) {
10631 SCTP_ASOC_CREATE_UNLOCK(inp);
10632 create_lock_applied = 0;
10633 } else {
10634 printf("Huh-3? create lock should have been on??\n");
10635 }
10636 /*
10637 * Turn on queue only flag to prevent data from
10638 * being sent
10639 */
10640 queue_only = 1;
10641 asoc = &stcb->asoc;
10642 asoc->state = SCTP_STATE_COOKIE_WAIT;
10643 SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
10644
10645 /* initialize authentication params for the assoc */
10646 sctp_initialize_auth_params(inp, stcb);
10647
10648 if (control) {
10649 /*
10650 * see if a init structure exists in cmsg
10651 * headers
10652 */
10653 struct sctp_initmsg initm;
10654 int i;
10655
10656 if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control,
10657 sizeof(initm))) {
10658 /*
10659 * we have an INIT override of the
10660 * default
10661 */
10662 if (initm.sinit_max_attempts)
10663 asoc->max_init_times = initm.sinit_max_attempts;
10664 if (initm.sinit_num_ostreams)
10665 asoc->pre_open_streams = initm.sinit_num_ostreams;
10666 if (initm.sinit_max_instreams)
10667 asoc->max_inbound_streams = initm.sinit_max_instreams;
10668 if (initm.sinit_max_init_timeo)
10669 asoc->initial_init_rto_max = initm.sinit_max_init_timeo;
10670 if (asoc->streamoutcnt < asoc->pre_open_streams) {
10671 /* Default is NOT correct */
10672#ifdef SCTP_DEBUG
10673 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
10674 printf("Ok, defout:%d pre_open:%d\n",
10675 asoc->streamoutcnt, asoc->pre_open_streams);
10676 }
10677#endif
10678 SCTP_FREE(asoc->strmout);
10679 asoc->strmout = NULL;
10680 asoc->streamoutcnt = asoc->pre_open_streams;
10681 /*
10682 * What happens if this
10683 * fails? .. we panic ...
10684 */
10685 {
10686 struct sctp_stream_out *tmp_str;
10687 int had_lock = 0;
10688
10689 if (hold_tcblock) {
10690 had_lock = 1;
10691 SCTP_TCB_UNLOCK(stcb);
10692 }
10693 SCTP_MALLOC(tmp_str,
10694 struct sctp_stream_out *,
10695 asoc->streamoutcnt *
10696 sizeof(struct sctp_stream_out),
10697 "StreamsOut");
10698 if (had_lock) {
10699 SCTP_TCB_LOCK(stcb);
10700 }
10701 if (asoc->strmout == NULL) {
10702 asoc->strmout = tmp_str;
10703 } else {
10704 SCTP_FREE(asoc->strmout);
10705 asoc->strmout = tmp_str;
10706 }
10707 }
10708 for (i = 0; i < asoc->streamoutcnt; i++) {
10709 /*
10710 * inbound side must
10711 * be set to 0xffff,
10712 * also NOTE when we
10713 * get the INIT-ACK
10714 * back (for INIT
10715 * sender) we MUST
10716 * reduce the count
10717 * (streamoutcnt)
10718 * but first check
10719 * if we sent to any
10720 * of the upper
10721 * streams that were
10722 * dropped (if some
10723 * were). Those that
10724 * were dropped must
10725 * be notified to
10726 * the upper layer
10727 * as failed to
10728 * send.
10729 */
10730 asoc->strmout[i].next_sequence_sent = 0x0;
10731 TAILQ_INIT(&asoc->strmout[i].outqueue);
10732 asoc->strmout[i].stream_no = i;
10733 asoc->strmout[i].last_msg_incomplete = 0;
10734 asoc->strmout[i].next_spoke.tqe_next = 0;
10735 asoc->strmout[i].next_spoke.tqe_prev = 0;
10736 }
10737 }
10738 }
10739 }
10740 hold_tcblock = 1;
10741 /* out with the INIT */
10742 queue_only_for_init = 1;
10743 /*
10744 * we may want to dig in after this call and adjust
10745 * the MTU value. It defaulted to 1500 (constant)
10746 * but the ro structure may now have an update and
10747 * thus we may need to change it BEFORE we append
10748 * the message.
10749 */
10750 net = stcb->asoc.primary_destination;
10751 asoc = &stcb->asoc;
10752 }
10753 }
9525 if (((so->so_state & SS_NBIO)
10754 if ((SCTP_SO_IS_NBIO(so)
9526 || (flags & MSG_NBIO)
9527 )) {
9528 non_blocking = 1;
9529 }
9530 asoc = &stcb->asoc;
9531 /* would we block? */
9532 if (non_blocking) {
9533 if ((so->so_snd.sb_hiwat <
9534 (sndlen + stcb->asoc.total_output_queue_size)) ||
9535 (stcb->asoc.chunks_on_out_queue >
9536 sctp_max_chunks_on_queue)) {
9537 error = EWOULDBLOCK;
9538 atomic_add_int(&stcb->sctp_ep->total_nospaces, 1);
9539 goto out_unlocked;
9540 }
9541 }
9542 /* Keep the stcb from being freed under our feet */
9543 atomic_add_int(&stcb->asoc.refcnt, 1);
9544 free_cnt_applied = 1;
9545
9546 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
9547 error = ECONNRESET;
9548 goto out_unlocked;
9549 }
9550 if (create_lock_applied) {
9551 SCTP_ASOC_CREATE_UNLOCK(inp);
9552 create_lock_applied = 0;
9553 }
9554 if (asoc->stream_reset_outstanding) {
9555 /*
9556 * Can't queue any data while stream reset is underway.
9557 */
9558 error = EAGAIN;
9559 goto out_unlocked;
9560 }
9561 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
9562 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
9563 queue_only = 1;
9564 }
9565 if ((use_rcvinfo == 0) || (srcv == NULL)) {
9566 /* Grab the default stuff from the asoc */
9567 srcv = &stcb->asoc.def_send;
9568 }
9569 /* we are now done with all control */
9570 if (control) {
9571 sctp_m_freem(control);
9572 control = NULL;
9573 }
9574 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
9575 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
9576 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
9577 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
9578 if ((use_rcvinfo) &&
9579 (srcv->sinfo_flags & SCTP_ABORT)) {
9580 ;
9581 } else {
9582 error = ECONNRESET;
9583 goto out_unlocked;
9584 }
9585 }
9586 /* Ok, we will attempt a msgsnd :> */
9587 if (p) {
9588 p->td_proc->p_stats->p_ru.ru_msgsnd++;
9589 }
9590 if (stcb) {
9591 if (net && ((srcv->sinfo_flags & SCTP_ADDR_OVER))) {
9592 /* we take the override or the unconfirmed */
9593 ;
9594 } else {
9595 net = stcb->asoc.primary_destination;
9596 }
9597 }
9598 if ((net->flight_size > net->cwnd) && (sctp_cmt_on_off == 0)) {
9599 /*
9600 * CMT: Added check for CMT above. net above is the primary
9601 * dest. If CMT is ON, sender should always attempt to send
9602 * with the output routine sctp_fill_outqueue() that loops
9603 * through all destination addresses. Therefore, if CMT is
9604 * ON, queue_only is NOT set to 1 here, so that
9605 * sctp_chunk_output() can be called below.
9606 */
9607 queue_only = 1;
9608
9609 } else if (asoc->ifp_had_enobuf) {
9610 SCTP_STAT_INCR(sctps_ifnomemqueued);
9611 if (net->flight_size > (net->mtu * 2))
9612 queue_only = 1;
9613 asoc->ifp_had_enobuf = 0;
9614 } else {
9615 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
9616 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk)));
9617 }
9618 /* Are we aborting? */
9619 if (srcv->sinfo_flags & SCTP_ABORT) {
9620 struct mbuf *mm;
9621 int tot_demand, tot_out, max;
9622
9623 SCTP_STAT_INCR(sctps_sends_with_abort);
9624 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
9625 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
9626 /* It has to be up before we abort */
9627 /* how big is the user initiated abort? */
9628 error = EINVAL;
9629 goto out;
9630 }
9631 if (hold_tcblock) {
9632 SCTP_TCB_UNLOCK(stcb);
9633 hold_tcblock = 0;
9634 }
9635 if (top) {
9636 struct mbuf *cntm;
9637
9638 mm = sctp_get_mbuf_for_msg(1, 0, M_WAIT, 1, MT_DATA);
9639
9640 tot_out = 0;
9641 cntm = top;
9642 while (cntm) {
9643 tot_out += SCTP_BUF_LEN(cntm);
9644 cntm = SCTP_BUF_NEXT(cntm);
9645 }
9646 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
9647 } else {
9648 /* Must fit in a MTU */
9649 tot_out = uio->uio_resid;
9650 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
9651 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA);
9652 }
9653 if (mm == NULL) {
9654 error = ENOMEM;
9655 goto out;
9656 }
9657 max = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
9658 max -= sizeof(struct sctp_abort_msg);
9659 if (tot_out > max) {
9660 tot_out = max;
9661 }
9662 if (mm) {
9663 struct sctp_paramhdr *ph;
9664
9665 /* now move forward the data pointer */
9666 ph = mtod(mm, struct sctp_paramhdr *);
9667 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
9668 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out));
9669 ph++;
9670 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
9671 if (top == NULL) {
9672 error = uiomove((caddr_t)ph, (int)tot_out, uio);
9673 if (error) {
9674 /*
9675 * Here if we can't get his data we
9676 * still abort we just don't get to
9677 * send the users note :-0
9678 */
9679 sctp_m_freem(mm);
9680 mm = NULL;
9681 }
9682 } else {
9683 SCTP_BUF_NEXT(mm) = top;
9684 }
9685 }
9686 if (hold_tcblock == 0) {
9687 SCTP_TCB_LOCK(stcb);
9688 hold_tcblock = 1;
9689 }
9690 atomic_add_int(&stcb->asoc.refcnt, -1);
9691 free_cnt_applied = 0;
9692 /* release this lock, otherwise we hang on ourselves */
9693 sctp_abort_an_association(stcb->sctp_ep, stcb,
9694 SCTP_RESPONSE_TO_USER_REQ,
9695 mm);
9696 /* now relock the stcb so everything is sane */
9697 hold_tcblock = 0;
9698 stcb = NULL;
9699 goto out_unlocked;
9700 }
9701 /* Calculate the maximum we can send */
9702 if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size) {
9703 max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size;
9704 } else {
9705 max_len = 0;
9706 }
9707 if (hold_tcblock) {
9708 SCTP_TCB_UNLOCK(stcb);
9709 hold_tcblock = 0;
9710 }
9711 /* Is the stream no. valid? */
9712 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
9713 /* Invalid stream number */
9714 error = EINVAL;
9715 goto out_unlocked;
9716 }
9717 if (asoc->strmout == NULL) {
9718 /* huh? software error */
9719 error = EFAULT;
9720 goto out_unlocked;
9721 }
9722 len = 0;
9723 if (max_len < sctp_add_more_threshold) {
9724 /* No room right no ! */
9725 SOCKBUF_LOCK(&so->so_snd);
9726 while (so->so_snd.sb_hiwat < (stcb->asoc.total_output_queue_size + sctp_add_more_threshold)) {
9727#ifdef SCTP_BLK_LOGGING
9728 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA,
9729 so, asoc, uio->uio_resid);
9730#endif
9731 be.error = 0;
9732 stcb->block_entry = &be;
9733 error = sbwait(&so->so_snd);
9734 stcb->block_entry = NULL;
9735 if (error || so->so_error || be.error) {
9736 if (error == 0) {
9737 if (so->so_error)
9738 error = so->so_error;
9739 if (be.error) {
9740 error = be.error;
9741 }
9742 }
9743 SOCKBUF_UNLOCK(&so->so_snd);
9744 goto out_unlocked;
9745 }
9746#ifdef SCTP_BLK_LOGGING
9747 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
9748 so, asoc, stcb->asoc.total_output_queue_size);
9749#endif
9750 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
9751 goto out_unlocked;
9752 }
9753 }
9754 if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size) {
9755 max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size;
9756 } else {
9757 max_len = 0;
9758 }
9759 SOCKBUF_UNLOCK(&so->so_snd);
9760 }
9761 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
9762 goto out_unlocked;
9763 }
9764 atomic_add_int(&stcb->total_sends, 1);
9765 if (top == NULL) {
9766 struct sctp_stream_queue_pending *sp;
9767 struct sctp_stream_out *strm;
9768 uint32_t sndout, initial_out;
9769 int user_marks_eor;
9770
9771 if (uio->uio_resid == 0) {
9772 if (srcv->sinfo_flags & SCTP_EOF) {
9773 got_all_of_the_send = 1;
9774 goto dataless_eof;
9775 } else {
9776 error = EINVAL;
9777 goto out;
9778 }
9779 }
9780 initial_out = uio->uio_resid;
9781
9782 if ((asoc->stream_locked) &&
9783 (asoc->stream_locked_on != srcv->sinfo_stream)) {
9784 error = EAGAIN;
9785 goto out;
9786 }
9787 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
9788 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
9789 if (strm->last_msg_incomplete == 0) {
9790 do_a_copy_in:
9791 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error, non_blocking);
9792 if ((sp == NULL) || (error)) {
9793 goto out;
9794 }
9795 SCTP_TCB_SEND_LOCK(stcb);
9796 if (sp->msg_is_complete) {
9797 strm->last_msg_incomplete = 0;
9798 asoc->stream_locked = 0;
9799 } else {
9800 /*
9801 * Just got locked to this guy in case of an
9802 * interupt.
9803 */
9804 strm->last_msg_incomplete = 1;
9805 asoc->stream_locked = 1;
9806 asoc->stream_locked_on = srcv->sinfo_stream;
9807 }
9808 sctp_snd_sb_alloc(stcb, sp->length);
9809
9810 asoc->stream_queue_cnt++;
9811 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
9812 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
9813 sp->strseq = strm->next_sequence_sent;
9814#ifdef SCTP_LOG_SENDING_STR
9815 sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN,
9816 (uintptr_t) stcb, (uintptr_t) sp,
9817 (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0);
9818#endif
9819 strm->next_sequence_sent++;
9820 } else {
9821 SCTP_STAT_INCR(sctps_sends_with_unord);
9822 }
9823
9824 if ((strm->next_spoke.tqe_next == NULL) &&
9825 (strm->next_spoke.tqe_prev == NULL)) {
9826 /* Not on wheel, insert */
9827 sctp_insert_on_wheel(stcb, asoc, strm, 1);
9828 }
9829 SCTP_TCB_SEND_UNLOCK(stcb);
9830 } else {
9831 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
9832 if (sp == NULL) {
9833 /* ???? Huh ??? last msg is gone */
9834#ifdef INVARIANTS
9835 panic("Warning: Last msg marked incomplete, yet nothing left?");
9836#else
9837 printf("Warning: Last msg marked incomplete, yet nothing left?\n");
9838 strm->last_msg_incomplete = 0;
9839#endif
9840 goto do_a_copy_in;
9841
9842 }
9843 }
9844 while (uio->uio_resid > 0) {
9845 /* How much room do we have? */
9846 struct mbuf *new_tail, *mm;
9847
9848 if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size)
9849 max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size;
9850 else
9851 max_len = 0;
9852
9853 if ((max_len > sctp_add_more_threshold) ||
9854 (uio->uio_resid && (uio->uio_resid < max_len))) {
9855 sndout = 0;
9856 new_tail = NULL;
9857 if (hold_tcblock) {
9858 SCTP_TCB_UNLOCK(stcb);
9859 hold_tcblock = 0;
9860 }
9861 mm = sctp_copy_resume(sp, uio, srcv, max_len, user_marks_eor, &error, &sndout, &new_tail);
9862 if ((mm == NULL) || error) {
9863 if (mm) {
9864 sctp_m_freem(mm);
9865 }
9866 goto out;
9867 }
9868 /* Update the mbuf and count */
9869 SCTP_TCB_SEND_LOCK(stcb);
9870 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
9871 /*
9872 * we need to get out. Peer probably
9873 * aborted.
9874 */
9875 sctp_m_freem(mm);
9876 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED)
9877 error = ECONNRESET;
9878 goto out;
9879 }
9880 if (sp->tail_mbuf) {
9881 /* tack it to the end */
9882 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
9883 sp->tail_mbuf = new_tail;
9884 } else {
9885 /* A stolen mbuf */
9886 sp->data = mm;
9887 sp->tail_mbuf = new_tail;
9888 }
9889 sctp_snd_sb_alloc(stcb, sndout);
9890 sp->length += sndout;
9891 len += sndout;
9892 /* Did we reach EOR? */
9893 if ((uio->uio_resid == 0) &&
9894 ((user_marks_eor == 0) ||
9895 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))
9896 ) {
9897 sp->msg_is_complete = 1;
9898 } else {
9899 sp->msg_is_complete = 0;
9900 }
9901 SCTP_TCB_SEND_UNLOCK(stcb);
9902 }
9903 if (uio->uio_resid == 0) {
9904 /* got it all? */
9905 continue;
9906 }
9907 /* PR-SCTP? */
9908 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
9909 /*
9910 * This is ugly but we must assure locking
9911 * order
9912 */
9913 if (hold_tcblock == 0) {
9914 SCTP_TCB_LOCK(stcb);
9915 hold_tcblock = 1;
9916 }
9917 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
9918 if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size)
9919 max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size;
9920 else
9921 max_len = 0;
9922 if (max_len > 0) {
9923 continue;
9924 }
9925 SCTP_TCB_UNLOCK(stcb);
9926 hold_tcblock = 0;
9927 }
9928 /* wait for space now */
9929 if (non_blocking) {
9930 /* Non-blocking io in place out */
9931 goto skip_out_eof;
9932 }
9933 if ((net->flight_size > net->cwnd) &&
9934 (sctp_cmt_on_off == 0)) {
9935 queue_only = 1;
9936
9937 } else if (asoc->ifp_had_enobuf) {
9938 SCTP_STAT_INCR(sctps_ifnomemqueued);
9939 if (net->flight_size > (net->mtu * 2)) {
9940 queue_only = 1;
9941 } else {
9942 queue_only = 0;
9943 }
9944 asoc->ifp_had_enobuf = 0;
9945 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
9946 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) *
9947 sizeof(struct sctp_data_chunk)));
9948 } else {
9949 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
9950 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) *
9951 sizeof(struct sctp_data_chunk)));
9952 queue_only = 0;
9953 }
9954 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
9955 (stcb->asoc.total_flight > 0) &&
9956 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
9957 ) {
9958
9959 /*
9960 * Ok, Nagle is set on and we have data
9961 * outstanding. Don't send anything and let
9962 * SACKs drive out the data unless wen have
9963 * a "full" segment to send.
9964 */
9965#ifdef SCTP_NAGLE_LOGGING
9966 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
9967#endif
9968 SCTP_STAT_INCR(sctps_naglequeued);
9969 nagle_applies = 1;
9970 } else {
9971#ifdef SCTP_NAGLE_LOGGING
9972 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
9973 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
9974#endif
9975 SCTP_STAT_INCR(sctps_naglesent);
9976 nagle_applies = 0;
9977 }
9978 /* What about the INIT, send it maybe */
9979#ifdef SCTP_BLK_LOGGING
9980 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, nagle_applies, un_sent);
9981 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size, stcb->asoc.total_flight,
9982 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
9983#endif
9984 if (queue_only_for_init) {
9985 if (hold_tcblock == 0) {
9986 SCTP_TCB_LOCK(stcb);
9987 hold_tcblock = 1;
9988 }
9989 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
9990 /* a collision took us forward? */
9991 queue_only_for_init = 0;
9992 queue_only = 0;
9993 } else {
9994 sctp_send_initiate(inp, stcb);
9995 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
9996 queue_only_for_init = 0;
9997 queue_only = 1;
9998 }
9999 }
10000 if ((queue_only == 0) && (nagle_applies == 0)
10001 ) {
10002 /*
10003 * need to start chunk output before
10004 * blocking.. note that if a lock is already
10005 * applied, then the input via the net is
10006 * happening and I don't need to start
10007 * output :-D
10008 */
10009 if (hold_tcblock == 0) {
10010 if (SCTP_TCB_TRYLOCK(stcb)) {
10011 hold_tcblock = 1;
10012 sctp_chunk_output(inp,
10013 stcb,
10014 SCTP_OUTPUT_FROM_USR_SEND);
10015
10016 }
10017 } else {
10018 sctp_chunk_output(inp,
10019 stcb,
10020 SCTP_OUTPUT_FROM_USR_SEND);
10021 }
10022 if (hold_tcblock == 1) {
10023 SCTP_TCB_UNLOCK(stcb);
10024 hold_tcblock = 0;
10025 }
10026 }
10027 SOCKBUF_LOCK(&so->so_snd);
10028 /*
10029 * This is a bit strange, but I think it will work.
10030 * The total_output_queue_size is locked and
10031 * protected by the TCB_LOCK, which we just
10032 * released. There is a race that can occur between
10033 * releasing it above, and me getting the socket
10034 * lock, where sacks come in but we have not put the
10035 * SB_WAIT on the so_snd buffer to get the wakeup.
10036 * After the LOCK is applied the sack_processing
10037 * will also need to LOCK the so->so_snd to do the
10038 * actual sowwakeup(). So once we have the socket
10039 * buffer lock if we recheck the size we KNOW we
10040 * will get to sleep safely with the wakeup flag in
10041 * place.
10042 */
10043 if (so->so_snd.sb_hiwat < (stcb->asoc.total_output_queue_size + sctp_add_more_threshold)) {
10044#ifdef SCTP_BLK_LOGGING
10045 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
10046 so, asoc, uio->uio_resid);
10047#endif
10048 be.error = 0;
10049 stcb->block_entry = &be;
10050 error = sbwait(&so->so_snd);
10051 stcb->block_entry = NULL;
10052
10053 if (error || so->so_error || be.error) {
10054 if (error == 0) {
10055 if (so->so_error)
10056 error = so->so_error;
10057 if (be.error) {
10058 error = be.error;
10059 }
10060 }
10061 SOCKBUF_UNLOCK(&so->so_snd);
10062 goto out_unlocked;
10063 }
10064#ifdef SCTP_BLK_LOGGING
10065 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
10066 so, asoc, stcb->asoc.total_output_queue_size);
10067#endif
10068 }
10069 SOCKBUF_UNLOCK(&so->so_snd);
10070 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
10071 goto out_unlocked;
10072 }
10073 }
10074 SCTP_TCB_SEND_LOCK(stcb);
10075 if (sp->msg_is_complete == 0) {
10076 strm->last_msg_incomplete = 1;
10077 asoc->stream_locked = 1;
10078 asoc->stream_locked_on = srcv->sinfo_stream;
10079 } else {
10080 strm->last_msg_incomplete = 0;
10081 asoc->stream_locked = 0;
10082 }
10083 SCTP_TCB_SEND_UNLOCK(stcb);
10084 if (uio->uio_resid == 0) {
10085 got_all_of_the_send = 1;
10086 }
10087 } else if (top) {
10088 /* We send in a 0, since we do NOT have any locks */
10089 error = sctp_msg_append(stcb, net, top, srcv, 0);
10090 top = NULL;
10091 }
10092 if (error) {
10093 goto out;
10094 }
10095dataless_eof:
10096 /* EOF thing ? */
10097 if ((srcv->sinfo_flags & SCTP_EOF) &&
10098 (got_all_of_the_send == 1) &&
10099 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)
10100 ) {
10101 SCTP_STAT_INCR(sctps_sends_with_eof);
10102 error = 0;
10103 if (hold_tcblock == 0) {
10104 SCTP_TCB_LOCK(stcb);
10105 hold_tcblock = 1;
10106 }
10107 if (TAILQ_EMPTY(&asoc->send_queue) &&
10108 TAILQ_EMPTY(&asoc->sent_queue) &&
10109 (asoc->stream_queue_cnt == 0)) {
10110 if (asoc->locked_on_sending) {
10111 goto abort_anyway;
10112 }
10113 /* there is nothing queued to send, so I'm done... */
10114 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
10115 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
10116 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
10117 /* only send SHUTDOWN the first time through */
10118 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
10119 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
10120 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
10121 }
10122 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
10123 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
10124 asoc->primary_destination);
10125 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
10126 asoc->primary_destination);
10127 }
10128 } else {
10129 /*
10130 * we still got (or just got) data to send, so set
10131 * SHUTDOWN_PENDING
10132 */
10133 /*
10134 * XXX sockets draft says that SCTP_EOF should be
10135 * sent with no data. currently, we will allow user
10136 * data to be sent first and move to
10137 * SHUTDOWN-PENDING
10138 */
10139 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
10140 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
10141 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
10142 if (hold_tcblock == 0) {
10143 SCTP_TCB_LOCK(stcb);
10144 hold_tcblock = 1;
10145 }
10146 if (asoc->locked_on_sending) {
10147 /* Locked to send out the data */
10148 struct sctp_stream_queue_pending *sp;
10149
10150 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
10151 if (sp) {
10152 if ((sp->length == 0) && (sp->msg_is_complete == 0))
10153 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
10154 }
10155 }
10156 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
10157 if (TAILQ_EMPTY(&asoc->send_queue) &&
10158 TAILQ_EMPTY(&asoc->sent_queue) &&
10159 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
10160 abort_anyway:
10161 if (free_cnt_applied) {
10162 atomic_add_int(&stcb->asoc.refcnt, -1);
10163 free_cnt_applied = 0;
10164 }
10165 sctp_abort_an_association(stcb->sctp_ep, stcb,
10166 SCTP_RESPONSE_TO_USER_REQ,
10167 NULL);
10168 /*
10169 * now relock the stcb so everything
10170 * is sane
10171 */
10172 hold_tcblock = 0;
10173 stcb = NULL;
10174 goto out;
10175 }
10176 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
10177 asoc->primary_destination);
10178 }
10179 }
10180 }
10181skip_out_eof:
10182 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
10183 some_on_control = 1;
10184 }
10185 if ((net->flight_size > net->cwnd) &&
10186 (sctp_cmt_on_off == 0)) {
10187 queue_only = 1;
10188 } else if (asoc->ifp_had_enobuf) {
10189 SCTP_STAT_INCR(sctps_ifnomemqueued);
10190 if (net->flight_size > (net->mtu * 2)) {
10191 queue_only = 1;
10192 } else {
10193 queue_only = 0;
10194 }
10195 asoc->ifp_had_enobuf = 0;
10196 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10197 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) *
10198 sizeof(struct sctp_data_chunk)));
10199 } else {
10200 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10201 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) *
10202 sizeof(struct sctp_data_chunk)));
10203 queue_only = 0;
10204 }
10205 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
10206 (stcb->asoc.total_flight > 0) &&
10207 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
10208 ) {
10209
10210 /*
10211 * Ok, Nagle is set on and we have data outstanding. Don't
10212 * send anything and let SACKs drive out the data unless wen
10213 * have a "full" segment to send.
10214 */
10215#ifdef SCTP_NAGLE_LOGGING
10216 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
10217#endif
10218 SCTP_STAT_INCR(sctps_naglequeued);
10219 nagle_applies = 1;
10220 } else {
10221#ifdef SCTP_NAGLE_LOGGING
10222 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
10223 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
10224#endif
10225 SCTP_STAT_INCR(sctps_naglesent);
10226 nagle_applies = 0;
10227 }
10228 if (queue_only_for_init) {
10229 if (hold_tcblock == 0) {
10230 SCTP_TCB_LOCK(stcb);
10231 hold_tcblock = 1;
10232 }
10233 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
10234 /* a collision took us forward? */
10235 queue_only_for_init = 0;
10236 queue_only = 0;
10237 } else {
10238 sctp_send_initiate(inp, stcb);
10239 if (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)
10240 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT |
10241 SCTP_STATE_SHUTDOWN_PENDING;
10242 else
10243 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
10244 queue_only_for_init = 0;
10245 queue_only = 1;
10246 }
10247 }
10248 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
10249 /* we can attempt to send too. */
10250 if (hold_tcblock == 0) {
10251 /*
10252 * If there is activity recv'ing sacks no need to
10253 * send
10254 */
10255 if (SCTP_TCB_TRYLOCK(stcb)) {
10256 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND);
10257 hold_tcblock = 1;
10258 }
10259 } else {
10260 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND);
10261 }
10262 } else if ((queue_only == 0) &&
10263 (stcb->asoc.peers_rwnd == 0) &&
10264 (stcb->asoc.total_flight == 0)) {
10265 /* We get to have a probe outstanding */
10266 if (hold_tcblock == 0) {
10267 hold_tcblock = 1;
10268 SCTP_TCB_LOCK(stcb);
10269 }
10270 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND);
10271 } else if (some_on_control) {
10272 int num_out, reason, cwnd_full, frag_point;
10273
10274 /* Here we do control only */
10275 if (hold_tcblock == 0) {
10276 hold_tcblock = 1;
10277 SCTP_TCB_LOCK(stcb);
10278 }
10279 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
10280 sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
10281 &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point);
10282 }
10283#ifdef SCTP_DEBUG
10284 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
10285 printf("USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d \n",
10286 queue_only, stcb->asoc.peers_rwnd, un_sent,
10287 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
10288 stcb->asoc.total_output_queue_size);
10289 }
10290#endif
10291out:
10292out_unlocked:
10293
10294 if (create_lock_applied) {
10295 SCTP_ASOC_CREATE_UNLOCK(inp);
10296 create_lock_applied = 0;
10297 }
10298 if ((stcb) && hold_tcblock) {
10299 SCTP_TCB_UNLOCK(stcb);
10300 }
10301 if (stcb && free_cnt_applied) {
10302 atomic_add_int(&stcb->asoc.refcnt, -1);
10303 }
10304#ifdef INVARIANTS
10305 if (stcb) {
10306 if (mtx_owned(&stcb->tcb_mtx)) {
10307 panic("Leaving with tcb mtx owned?");
10308 }
10309 if (mtx_owned(&stcb->tcb_send_mtx)) {
10310 panic("Leaving with tcb send mtx owned?");
10311 }
10312 }
10313#endif
10314 if (top) {
10315 sctp_m_freem(top);
10316 }
10317 if (control) {
10318 sctp_m_freem(control);
10319 }
10320 return (error);
10321}
10322
10323
10324/*
10325 * generate an AUTHentication chunk, if required
10326 */
10327struct mbuf *
10328sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
10329 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
10330 struct sctp_tcb *stcb, uint8_t chunk)
10331{
10332 struct mbuf *m_auth;
10333 struct sctp_auth_chunk *auth;
10334 int chunk_len;
10335
10336 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
10337 (stcb == NULL))
10338 return (m);
10339
10340 /* sysctl disabled auth? */
10341 if (sctp_auth_disable)
10342 return (m);
10343
10344 /* peer doesn't do auth... */
10345 if (!stcb->asoc.peer_supports_auth) {
10346 return (m);
10347 }
10348 /* does the requested chunk require auth? */
10349 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
10350 return (m);
10351 }
10352 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER);
10353 if (m_auth == NULL) {
10354 /* no mbuf's */
10355 return (m);
10356 }
10357 /* reserve some space if this will be the first mbuf */
10358 if (m == NULL)
10359 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
10360 /* fill in the AUTH chunk details */
10361 auth = mtod(m_auth, struct sctp_auth_chunk *);
10362 bzero(auth, sizeof(*auth));
10363 auth->ch.chunk_type = SCTP_AUTHENTICATION;
10364 auth->ch.chunk_flags = 0;
10365 chunk_len = sizeof(*auth) +
10366 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
10367 auth->ch.chunk_length = htons(chunk_len);
10368 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
10369 /* key id and hmac digest will be computed and filled in upon send */
10370
10371 /* save the offset where the auth was inserted into the chain */
10372 if (m != NULL) {
10373 struct mbuf *cn;
10374
10375 *offset = 0;
10376 cn = m;
10377 while (cn) {
10378 *offset += SCTP_BUF_LEN(cn);
10379 cn = SCTP_BUF_NEXT(cn);
10380 }
10381 } else
10382 *offset = 0;
10383
10384 /* update length and return pointer to the auth chunk */
10385 SCTP_BUF_LEN(m_auth) = chunk_len;
10386 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
10387 if (auth_ret != NULL)
10388 *auth_ret = auth;
10389
10390 return (m);
10391}
10755 || (flags & MSG_NBIO)
10756 )) {
10757 non_blocking = 1;
10758 }
10759 asoc = &stcb->asoc;
10760 /* would we block? */
10761 if (non_blocking) {
10762 if ((so->so_snd.sb_hiwat <
10763 (sndlen + stcb->asoc.total_output_queue_size)) ||
10764 (stcb->asoc.chunks_on_out_queue >
10765 sctp_max_chunks_on_queue)) {
10766 error = EWOULDBLOCK;
10767 atomic_add_int(&stcb->sctp_ep->total_nospaces, 1);
10768 goto out_unlocked;
10769 }
10770 }
10771 /* Keep the stcb from being freed under our feet */
10772 atomic_add_int(&stcb->asoc.refcnt, 1);
10773 free_cnt_applied = 1;
10774
10775 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
10776 error = ECONNRESET;
10777 goto out_unlocked;
10778 }
10779 if (create_lock_applied) {
10780 SCTP_ASOC_CREATE_UNLOCK(inp);
10781 create_lock_applied = 0;
10782 }
10783 if (asoc->stream_reset_outstanding) {
10784 /*
10785 * Can't queue any data while stream reset is underway.
10786 */
10787 error = EAGAIN;
10788 goto out_unlocked;
10789 }
10790 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
10791 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
10792 queue_only = 1;
10793 }
10794 if ((use_rcvinfo == 0) || (srcv == NULL)) {
10795 /* Grab the default stuff from the asoc */
10796 srcv = &stcb->asoc.def_send;
10797 }
10798 /* we are now done with all control */
10799 if (control) {
10800 sctp_m_freem(control);
10801 control = NULL;
10802 }
10803 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
10804 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
10805 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
10806 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
10807 if ((use_rcvinfo) &&
10808 (srcv->sinfo_flags & SCTP_ABORT)) {
10809 ;
10810 } else {
10811 error = ECONNRESET;
10812 goto out_unlocked;
10813 }
10814 }
10815 /* Ok, we will attempt a msgsnd :> */
10816 if (p) {
10817 p->td_proc->p_stats->p_ru.ru_msgsnd++;
10818 }
10819 if (stcb) {
10820 if (net && ((srcv->sinfo_flags & SCTP_ADDR_OVER))) {
10821 /* we take the override or the unconfirmed */
10822 ;
10823 } else {
10824 net = stcb->asoc.primary_destination;
10825 }
10826 }
10827 if ((net->flight_size > net->cwnd) && (sctp_cmt_on_off == 0)) {
10828 /*
10829 * CMT: Added check for CMT above. net above is the primary
10830 * dest. If CMT is ON, sender should always attempt to send
10831 * with the output routine sctp_fill_outqueue() that loops
10832 * through all destination addresses. Therefore, if CMT is
10833 * ON, queue_only is NOT set to 1 here, so that
10834 * sctp_chunk_output() can be called below.
10835 */
10836 queue_only = 1;
10837
10838 } else if (asoc->ifp_had_enobuf) {
10839 SCTP_STAT_INCR(sctps_ifnomemqueued);
10840 if (net->flight_size > (net->mtu * 2))
10841 queue_only = 1;
10842 asoc->ifp_had_enobuf = 0;
10843 } else {
10844 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10845 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk)));
10846 }
10847 /* Are we aborting? */
10848 if (srcv->sinfo_flags & SCTP_ABORT) {
10849 struct mbuf *mm;
10850 int tot_demand, tot_out, max;
10851
10852 SCTP_STAT_INCR(sctps_sends_with_abort);
10853 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
10854 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
10855 /* It has to be up before we abort */
10856 /* how big is the user initiated abort? */
10857 error = EINVAL;
10858 goto out;
10859 }
10860 if (hold_tcblock) {
10861 SCTP_TCB_UNLOCK(stcb);
10862 hold_tcblock = 0;
10863 }
10864 if (top) {
10865 struct mbuf *cntm;
10866
10867 mm = sctp_get_mbuf_for_msg(1, 0, M_WAIT, 1, MT_DATA);
10868
10869 tot_out = 0;
10870 cntm = top;
10871 while (cntm) {
10872 tot_out += SCTP_BUF_LEN(cntm);
10873 cntm = SCTP_BUF_NEXT(cntm);
10874 }
10875 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
10876 } else {
10877 /* Must fit in a MTU */
10878 tot_out = uio->uio_resid;
10879 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
10880 mm = sctp_get_mbuf_for_msg(tot_demand, 0, M_WAIT, 1, MT_DATA);
10881 }
10882 if (mm == NULL) {
10883 error = ENOMEM;
10884 goto out;
10885 }
10886 max = asoc->smallest_mtu - sizeof(struct sctp_paramhdr);
10887 max -= sizeof(struct sctp_abort_msg);
10888 if (tot_out > max) {
10889 tot_out = max;
10890 }
10891 if (mm) {
10892 struct sctp_paramhdr *ph;
10893
10894 /* now move forward the data pointer */
10895 ph = mtod(mm, struct sctp_paramhdr *);
10896 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
10897 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out));
10898 ph++;
10899 SCTP_BUF_LEN(mm) = tot_out + sizeof(struct sctp_paramhdr);
10900 if (top == NULL) {
10901 error = uiomove((caddr_t)ph, (int)tot_out, uio);
10902 if (error) {
10903 /*
10904 * Here if we can't get his data we
10905 * still abort we just don't get to
10906 * send the users note :-0
10907 */
10908 sctp_m_freem(mm);
10909 mm = NULL;
10910 }
10911 } else {
10912 SCTP_BUF_NEXT(mm) = top;
10913 }
10914 }
10915 if (hold_tcblock == 0) {
10916 SCTP_TCB_LOCK(stcb);
10917 hold_tcblock = 1;
10918 }
10919 atomic_add_int(&stcb->asoc.refcnt, -1);
10920 free_cnt_applied = 0;
10921 /* release this lock, otherwise we hang on ourselves */
10922 sctp_abort_an_association(stcb->sctp_ep, stcb,
10923 SCTP_RESPONSE_TO_USER_REQ,
10924 mm);
10925 /* now relock the stcb so everything is sane */
10926 hold_tcblock = 0;
10927 stcb = NULL;
10928 goto out_unlocked;
10929 }
10930 /* Calculate the maximum we can send */
10931 if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size) {
10932 max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size;
10933 } else {
10934 max_len = 0;
10935 }
10936 if (hold_tcblock) {
10937 SCTP_TCB_UNLOCK(stcb);
10938 hold_tcblock = 0;
10939 }
10940 /* Is the stream no. valid? */
10941 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
10942 /* Invalid stream number */
10943 error = EINVAL;
10944 goto out_unlocked;
10945 }
10946 if (asoc->strmout == NULL) {
10947 /* huh? software error */
10948 error = EFAULT;
10949 goto out_unlocked;
10950 }
10951 len = 0;
10952 if (max_len < sctp_add_more_threshold) {
10953 /* No room right no ! */
10954 SOCKBUF_LOCK(&so->so_snd);
10955 while (so->so_snd.sb_hiwat < (stcb->asoc.total_output_queue_size + sctp_add_more_threshold)) {
10956#ifdef SCTP_BLK_LOGGING
10957 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLKA,
10958 so, asoc, uio->uio_resid);
10959#endif
10960 be.error = 0;
10961 stcb->block_entry = &be;
10962 error = sbwait(&so->so_snd);
10963 stcb->block_entry = NULL;
10964 if (error || so->so_error || be.error) {
10965 if (error == 0) {
10966 if (so->so_error)
10967 error = so->so_error;
10968 if (be.error) {
10969 error = be.error;
10970 }
10971 }
10972 SOCKBUF_UNLOCK(&so->so_snd);
10973 goto out_unlocked;
10974 }
10975#ifdef SCTP_BLK_LOGGING
10976 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
10977 so, asoc, stcb->asoc.total_output_queue_size);
10978#endif
10979 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
10980 goto out_unlocked;
10981 }
10982 }
10983 if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size) {
10984 max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size;
10985 } else {
10986 max_len = 0;
10987 }
10988 SOCKBUF_UNLOCK(&so->so_snd);
10989 }
10990 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
10991 goto out_unlocked;
10992 }
10993 atomic_add_int(&stcb->total_sends, 1);
10994 if (top == NULL) {
10995 struct sctp_stream_queue_pending *sp;
10996 struct sctp_stream_out *strm;
10997 uint32_t sndout, initial_out;
10998 int user_marks_eor;
10999
11000 if (uio->uio_resid == 0) {
11001 if (srcv->sinfo_flags & SCTP_EOF) {
11002 got_all_of_the_send = 1;
11003 goto dataless_eof;
11004 } else {
11005 error = EINVAL;
11006 goto out;
11007 }
11008 }
11009 initial_out = uio->uio_resid;
11010
11011 if ((asoc->stream_locked) &&
11012 (asoc->stream_locked_on != srcv->sinfo_stream)) {
11013 error = EAGAIN;
11014 goto out;
11015 }
11016 strm = &stcb->asoc.strmout[srcv->sinfo_stream];
11017 user_marks_eor = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
11018 if (strm->last_msg_incomplete == 0) {
11019 do_a_copy_in:
11020 sp = sctp_copy_it_in(stcb, asoc, srcv, uio, net, max_len, user_marks_eor, &error, non_blocking);
11021 if ((sp == NULL) || (error)) {
11022 goto out;
11023 }
11024 SCTP_TCB_SEND_LOCK(stcb);
11025 if (sp->msg_is_complete) {
11026 strm->last_msg_incomplete = 0;
11027 asoc->stream_locked = 0;
11028 } else {
11029 /*
11030 * Just got locked to this guy in case of an
11031 * interupt.
11032 */
11033 strm->last_msg_incomplete = 1;
11034 asoc->stream_locked = 1;
11035 asoc->stream_locked_on = srcv->sinfo_stream;
11036 }
11037 sctp_snd_sb_alloc(stcb, sp->length);
11038
11039 asoc->stream_queue_cnt++;
11040 TAILQ_INSERT_TAIL(&strm->outqueue, sp, next);
11041 if ((srcv->sinfo_flags & SCTP_UNORDERED) == 0) {
11042 sp->strseq = strm->next_sequence_sent;
11043#ifdef SCTP_LOG_SENDING_STR
11044 sctp_misc_ints(SCTP_STRMOUT_LOG_ASSIGN,
11045 (uintptr_t) stcb, (uintptr_t) sp,
11046 (uint32_t) ((srcv->sinfo_stream << 16) | sp->strseq), 0);
11047#endif
11048 strm->next_sequence_sent++;
11049 } else {
11050 SCTP_STAT_INCR(sctps_sends_with_unord);
11051 }
11052
11053 if ((strm->next_spoke.tqe_next == NULL) &&
11054 (strm->next_spoke.tqe_prev == NULL)) {
11055 /* Not on wheel, insert */
11056 sctp_insert_on_wheel(stcb, asoc, strm, 1);
11057 }
11058 SCTP_TCB_SEND_UNLOCK(stcb);
11059 } else {
11060 sp = TAILQ_LAST(&strm->outqueue, sctp_streamhead);
11061 if (sp == NULL) {
11062 /* ???? Huh ??? last msg is gone */
11063#ifdef INVARIANTS
11064 panic("Warning: Last msg marked incomplete, yet nothing left?");
11065#else
11066 printf("Warning: Last msg marked incomplete, yet nothing left?\n");
11067 strm->last_msg_incomplete = 0;
11068#endif
11069 goto do_a_copy_in;
11070
11071 }
11072 }
11073 while (uio->uio_resid > 0) {
11074 /* How much room do we have? */
11075 struct mbuf *new_tail, *mm;
11076
11077 if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size)
11078 max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size;
11079 else
11080 max_len = 0;
11081
11082 if ((max_len > sctp_add_more_threshold) ||
11083 (uio->uio_resid && (uio->uio_resid < max_len))) {
11084 sndout = 0;
11085 new_tail = NULL;
11086 if (hold_tcblock) {
11087 SCTP_TCB_UNLOCK(stcb);
11088 hold_tcblock = 0;
11089 }
11090 mm = sctp_copy_resume(sp, uio, srcv, max_len, user_marks_eor, &error, &sndout, &new_tail);
11091 if ((mm == NULL) || error) {
11092 if (mm) {
11093 sctp_m_freem(mm);
11094 }
11095 goto out;
11096 }
11097 /* Update the mbuf and count */
11098 SCTP_TCB_SEND_LOCK(stcb);
11099 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
11100 /*
11101 * we need to get out. Peer probably
11102 * aborted.
11103 */
11104 sctp_m_freem(mm);
11105 if (stcb->asoc.state & SCTP_PCB_FLAGS_WAS_ABORTED)
11106 error = ECONNRESET;
11107 goto out;
11108 }
11109 if (sp->tail_mbuf) {
11110 /* tack it to the end */
11111 SCTP_BUF_NEXT(sp->tail_mbuf) = mm;
11112 sp->tail_mbuf = new_tail;
11113 } else {
11114 /* A stolen mbuf */
11115 sp->data = mm;
11116 sp->tail_mbuf = new_tail;
11117 }
11118 sctp_snd_sb_alloc(stcb, sndout);
11119 sp->length += sndout;
11120 len += sndout;
11121 /* Did we reach EOR? */
11122 if ((uio->uio_resid == 0) &&
11123 ((user_marks_eor == 0) ||
11124 (user_marks_eor && (srcv->sinfo_flags & SCTP_EOR)))
11125 ) {
11126 sp->msg_is_complete = 1;
11127 } else {
11128 sp->msg_is_complete = 0;
11129 }
11130 SCTP_TCB_SEND_UNLOCK(stcb);
11131 }
11132 if (uio->uio_resid == 0) {
11133 /* got it all? */
11134 continue;
11135 }
11136 /* PR-SCTP? */
11137 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
11138 /*
11139 * This is ugly but we must assure locking
11140 * order
11141 */
11142 if (hold_tcblock == 0) {
11143 SCTP_TCB_LOCK(stcb);
11144 hold_tcblock = 1;
11145 }
11146 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
11147 if (so->so_snd.sb_hiwat > stcb->asoc.total_output_queue_size)
11148 max_len = so->so_snd.sb_hiwat - stcb->asoc.total_output_queue_size;
11149 else
11150 max_len = 0;
11151 if (max_len > 0) {
11152 continue;
11153 }
11154 SCTP_TCB_UNLOCK(stcb);
11155 hold_tcblock = 0;
11156 }
11157 /* wait for space now */
11158 if (non_blocking) {
11159 /* Non-blocking io in place out */
11160 goto skip_out_eof;
11161 }
11162 if ((net->flight_size > net->cwnd) &&
11163 (sctp_cmt_on_off == 0)) {
11164 queue_only = 1;
11165
11166 } else if (asoc->ifp_had_enobuf) {
11167 SCTP_STAT_INCR(sctps_ifnomemqueued);
11168 if (net->flight_size > (net->mtu * 2)) {
11169 queue_only = 1;
11170 } else {
11171 queue_only = 0;
11172 }
11173 asoc->ifp_had_enobuf = 0;
11174 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
11175 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) *
11176 sizeof(struct sctp_data_chunk)));
11177 } else {
11178 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
11179 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) *
11180 sizeof(struct sctp_data_chunk)));
11181 queue_only = 0;
11182 }
11183 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
11184 (stcb->asoc.total_flight > 0) &&
11185 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
11186 ) {
11187
11188 /*
11189 * Ok, Nagle is set on and we have data
11190 * outstanding. Don't send anything and let
11191 * SACKs drive out the data unless wen have
11192 * a "full" segment to send.
11193 */
11194#ifdef SCTP_NAGLE_LOGGING
11195 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
11196#endif
11197 SCTP_STAT_INCR(sctps_naglequeued);
11198 nagle_applies = 1;
11199 } else {
11200#ifdef SCTP_NAGLE_LOGGING
11201 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
11202 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
11203#endif
11204 SCTP_STAT_INCR(sctps_naglesent);
11205 nagle_applies = 0;
11206 }
11207 /* What about the INIT, send it maybe */
11208#ifdef SCTP_BLK_LOGGING
11209 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, queue_only_for_init, queue_only, nagle_applies, un_sent);
11210 sctp_misc_ints(SCTP_CWNDLOG_PRESEND, stcb->asoc.total_output_queue_size, stcb->asoc.total_flight,
11211 stcb->asoc.chunks_on_out_queue, stcb->asoc.total_flight_count);
11212#endif
11213 if (queue_only_for_init) {
11214 if (hold_tcblock == 0) {
11215 SCTP_TCB_LOCK(stcb);
11216 hold_tcblock = 1;
11217 }
11218 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
11219 /* a collision took us forward? */
11220 queue_only_for_init = 0;
11221 queue_only = 0;
11222 } else {
11223 sctp_send_initiate(inp, stcb);
11224 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
11225 queue_only_for_init = 0;
11226 queue_only = 1;
11227 }
11228 }
11229 if ((queue_only == 0) && (nagle_applies == 0)
11230 ) {
11231 /*
11232 * need to start chunk output before
11233 * blocking.. note that if a lock is already
11234 * applied, then the input via the net is
11235 * happening and I don't need to start
11236 * output :-D
11237 */
11238 if (hold_tcblock == 0) {
11239 if (SCTP_TCB_TRYLOCK(stcb)) {
11240 hold_tcblock = 1;
11241 sctp_chunk_output(inp,
11242 stcb,
11243 SCTP_OUTPUT_FROM_USR_SEND);
11244
11245 }
11246 } else {
11247 sctp_chunk_output(inp,
11248 stcb,
11249 SCTP_OUTPUT_FROM_USR_SEND);
11250 }
11251 if (hold_tcblock == 1) {
11252 SCTP_TCB_UNLOCK(stcb);
11253 hold_tcblock = 0;
11254 }
11255 }
11256 SOCKBUF_LOCK(&so->so_snd);
11257 /*
11258 * This is a bit strange, but I think it will work.
11259 * The total_output_queue_size is locked and
11260 * protected by the TCB_LOCK, which we just
11261 * released. There is a race that can occur between
11262 * releasing it above, and me getting the socket
11263 * lock, where sacks come in but we have not put the
11264 * SB_WAIT on the so_snd buffer to get the wakeup.
11265 * After the LOCK is applied the sack_processing
11266 * will also need to LOCK the so->so_snd to do the
11267 * actual sowwakeup(). So once we have the socket
11268 * buffer lock if we recheck the size we KNOW we
11269 * will get to sleep safely with the wakeup flag in
11270 * place.
11271 */
11272 if (so->so_snd.sb_hiwat < (stcb->asoc.total_output_queue_size + sctp_add_more_threshold)) {
11273#ifdef SCTP_BLK_LOGGING
11274 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
11275 so, asoc, uio->uio_resid);
11276#endif
11277 be.error = 0;
11278 stcb->block_entry = &be;
11279 error = sbwait(&so->so_snd);
11280 stcb->block_entry = NULL;
11281
11282 if (error || so->so_error || be.error) {
11283 if (error == 0) {
11284 if (so->so_error)
11285 error = so->so_error;
11286 if (be.error) {
11287 error = be.error;
11288 }
11289 }
11290 SOCKBUF_UNLOCK(&so->so_snd);
11291 goto out_unlocked;
11292 }
11293#ifdef SCTP_BLK_LOGGING
11294 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
11295 so, asoc, stcb->asoc.total_output_queue_size);
11296#endif
11297 }
11298 SOCKBUF_UNLOCK(&so->so_snd);
11299 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
11300 goto out_unlocked;
11301 }
11302 }
11303 SCTP_TCB_SEND_LOCK(stcb);
11304 if (sp->msg_is_complete == 0) {
11305 strm->last_msg_incomplete = 1;
11306 asoc->stream_locked = 1;
11307 asoc->stream_locked_on = srcv->sinfo_stream;
11308 } else {
11309 strm->last_msg_incomplete = 0;
11310 asoc->stream_locked = 0;
11311 }
11312 SCTP_TCB_SEND_UNLOCK(stcb);
11313 if (uio->uio_resid == 0) {
11314 got_all_of_the_send = 1;
11315 }
11316 } else if (top) {
11317 /* We send in a 0, since we do NOT have any locks */
11318 error = sctp_msg_append(stcb, net, top, srcv, 0);
11319 top = NULL;
11320 }
11321 if (error) {
11322 goto out;
11323 }
11324dataless_eof:
11325 /* EOF thing ? */
11326 if ((srcv->sinfo_flags & SCTP_EOF) &&
11327 (got_all_of_the_send == 1) &&
11328 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)
11329 ) {
11330 SCTP_STAT_INCR(sctps_sends_with_eof);
11331 error = 0;
11332 if (hold_tcblock == 0) {
11333 SCTP_TCB_LOCK(stcb);
11334 hold_tcblock = 1;
11335 }
11336 if (TAILQ_EMPTY(&asoc->send_queue) &&
11337 TAILQ_EMPTY(&asoc->sent_queue) &&
11338 (asoc->stream_queue_cnt == 0)) {
11339 if (asoc->locked_on_sending) {
11340 goto abort_anyway;
11341 }
11342 /* there is nothing queued to send, so I'm done... */
11343 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
11344 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
11345 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
11346 /* only send SHUTDOWN the first time through */
11347 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
11348 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) {
11349 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
11350 }
11351 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
11352 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
11353 asoc->primary_destination);
11354 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
11355 asoc->primary_destination);
11356 }
11357 } else {
11358 /*
11359 * we still got (or just got) data to send, so set
11360 * SHUTDOWN_PENDING
11361 */
11362 /*
11363 * XXX sockets draft says that SCTP_EOF should be
11364 * sent with no data. currently, we will allow user
11365 * data to be sent first and move to
11366 * SHUTDOWN-PENDING
11367 */
11368 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
11369 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) &&
11370 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
11371 if (hold_tcblock == 0) {
11372 SCTP_TCB_LOCK(stcb);
11373 hold_tcblock = 1;
11374 }
11375 if (asoc->locked_on_sending) {
11376 /* Locked to send out the data */
11377 struct sctp_stream_queue_pending *sp;
11378
11379 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
11380 if (sp) {
11381 if ((sp->length == 0) && (sp->msg_is_complete == 0))
11382 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
11383 }
11384 }
11385 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
11386 if (TAILQ_EMPTY(&asoc->send_queue) &&
11387 TAILQ_EMPTY(&asoc->sent_queue) &&
11388 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
11389 abort_anyway:
11390 if (free_cnt_applied) {
11391 atomic_add_int(&stcb->asoc.refcnt, -1);
11392 free_cnt_applied = 0;
11393 }
11394 sctp_abort_an_association(stcb->sctp_ep, stcb,
11395 SCTP_RESPONSE_TO_USER_REQ,
11396 NULL);
11397 /*
11398 * now relock the stcb so everything
11399 * is sane
11400 */
11401 hold_tcblock = 0;
11402 stcb = NULL;
11403 goto out;
11404 }
11405 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
11406 asoc->primary_destination);
11407 }
11408 }
11409 }
11410skip_out_eof:
11411 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
11412 some_on_control = 1;
11413 }
11414 if ((net->flight_size > net->cwnd) &&
11415 (sctp_cmt_on_off == 0)) {
11416 queue_only = 1;
11417 } else if (asoc->ifp_had_enobuf) {
11418 SCTP_STAT_INCR(sctps_ifnomemqueued);
11419 if (net->flight_size > (net->mtu * 2)) {
11420 queue_only = 1;
11421 } else {
11422 queue_only = 0;
11423 }
11424 asoc->ifp_had_enobuf = 0;
11425 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
11426 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) *
11427 sizeof(struct sctp_data_chunk)));
11428 } else {
11429 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
11430 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) *
11431 sizeof(struct sctp_data_chunk)));
11432 queue_only = 0;
11433 }
11434 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY)) &&
11435 (stcb->asoc.total_flight > 0) &&
11436 (un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
11437 ) {
11438
11439 /*
11440 * Ok, Nagle is set on and we have data outstanding. Don't
11441 * send anything and let SACKs drive out the data unless wen
11442 * have a "full" segment to send.
11443 */
11444#ifdef SCTP_NAGLE_LOGGING
11445 sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
11446#endif
11447 SCTP_STAT_INCR(sctps_naglequeued);
11448 nagle_applies = 1;
11449 } else {
11450#ifdef SCTP_NAGLE_LOGGING
11451 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_NODELAY))
11452 sctp_log_nagle_event(stcb, SCTP_NAGLE_SKIPPED);
11453#endif
11454 SCTP_STAT_INCR(sctps_naglesent);
11455 nagle_applies = 0;
11456 }
11457 if (queue_only_for_init) {
11458 if (hold_tcblock == 0) {
11459 SCTP_TCB_LOCK(stcb);
11460 hold_tcblock = 1;
11461 }
11462 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) {
11463 /* a collision took us forward? */
11464 queue_only_for_init = 0;
11465 queue_only = 0;
11466 } else {
11467 sctp_send_initiate(inp, stcb);
11468 if (stcb->asoc.state & SCTP_STATE_SHUTDOWN_PENDING)
11469 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT |
11470 SCTP_STATE_SHUTDOWN_PENDING;
11471 else
11472 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
11473 queue_only_for_init = 0;
11474 queue_only = 1;
11475 }
11476 }
11477 if ((queue_only == 0) && (nagle_applies == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
11478 /* we can attempt to send too. */
11479 if (hold_tcblock == 0) {
11480 /*
11481 * If there is activity recv'ing sacks no need to
11482 * send
11483 */
11484 if (SCTP_TCB_TRYLOCK(stcb)) {
11485 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND);
11486 hold_tcblock = 1;
11487 }
11488 } else {
11489 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND);
11490 }
11491 } else if ((queue_only == 0) &&
11492 (stcb->asoc.peers_rwnd == 0) &&
11493 (stcb->asoc.total_flight == 0)) {
11494 /* We get to have a probe outstanding */
11495 if (hold_tcblock == 0) {
11496 hold_tcblock = 1;
11497 SCTP_TCB_LOCK(stcb);
11498 }
11499 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_USR_SEND);
11500 } else if (some_on_control) {
11501 int num_out, reason, cwnd_full, frag_point;
11502
11503 /* Here we do control only */
11504 if (hold_tcblock == 0) {
11505 hold_tcblock = 1;
11506 SCTP_TCB_LOCK(stcb);
11507 }
11508 frag_point = sctp_get_frag_point(stcb, &stcb->asoc);
11509 sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
11510 &reason, 1, &cwnd_full, 1, &now, &now_filled, frag_point);
11511 }
11512#ifdef SCTP_DEBUG
11513 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
11514 printf("USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d \n",
11515 queue_only, stcb->asoc.peers_rwnd, un_sent,
11516 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
11517 stcb->asoc.total_output_queue_size);
11518 }
11519#endif
11520out:
11521out_unlocked:
11522
11523 if (create_lock_applied) {
11524 SCTP_ASOC_CREATE_UNLOCK(inp);
11525 create_lock_applied = 0;
11526 }
11527 if ((stcb) && hold_tcblock) {
11528 SCTP_TCB_UNLOCK(stcb);
11529 }
11530 if (stcb && free_cnt_applied) {
11531 atomic_add_int(&stcb->asoc.refcnt, -1);
11532 }
11533#ifdef INVARIANTS
11534 if (stcb) {
11535 if (mtx_owned(&stcb->tcb_mtx)) {
11536 panic("Leaving with tcb mtx owned?");
11537 }
11538 if (mtx_owned(&stcb->tcb_send_mtx)) {
11539 panic("Leaving with tcb send mtx owned?");
11540 }
11541 }
11542#endif
11543 if (top) {
11544 sctp_m_freem(top);
11545 }
11546 if (control) {
11547 sctp_m_freem(control);
11548 }
11549 return (error);
11550}
11551
11552
11553/*
11554 * generate an AUTHentication chunk, if required
11555 */
11556struct mbuf *
11557sctp_add_auth_chunk(struct mbuf *m, struct mbuf **m_end,
11558 struct sctp_auth_chunk **auth_ret, uint32_t * offset,
11559 struct sctp_tcb *stcb, uint8_t chunk)
11560{
11561 struct mbuf *m_auth;
11562 struct sctp_auth_chunk *auth;
11563 int chunk_len;
11564
11565 if ((m_end == NULL) || (auth_ret == NULL) || (offset == NULL) ||
11566 (stcb == NULL))
11567 return (m);
11568
11569 /* sysctl disabled auth? */
11570 if (sctp_auth_disable)
11571 return (m);
11572
11573 /* peer doesn't do auth... */
11574 if (!stcb->asoc.peer_supports_auth) {
11575 return (m);
11576 }
11577 /* does the requested chunk require auth? */
11578 if (!sctp_auth_is_required_chunk(chunk, stcb->asoc.peer_auth_chunks)) {
11579 return (m);
11580 }
11581 m_auth = sctp_get_mbuf_for_msg(sizeof(*auth), 0, M_DONTWAIT, 1, MT_HEADER);
11582 if (m_auth == NULL) {
11583 /* no mbuf's */
11584 return (m);
11585 }
11586 /* reserve some space if this will be the first mbuf */
11587 if (m == NULL)
11588 SCTP_BUF_RESV_UF(m_auth, SCTP_MIN_OVERHEAD);
11589 /* fill in the AUTH chunk details */
11590 auth = mtod(m_auth, struct sctp_auth_chunk *);
11591 bzero(auth, sizeof(*auth));
11592 auth->ch.chunk_type = SCTP_AUTHENTICATION;
11593 auth->ch.chunk_flags = 0;
11594 chunk_len = sizeof(*auth) +
11595 sctp_get_hmac_digest_len(stcb->asoc.peer_hmac_id);
11596 auth->ch.chunk_length = htons(chunk_len);
11597 auth->hmac_id = htons(stcb->asoc.peer_hmac_id);
11598 /* key id and hmac digest will be computed and filled in upon send */
11599
11600 /* save the offset where the auth was inserted into the chain */
11601 if (m != NULL) {
11602 struct mbuf *cn;
11603
11604 *offset = 0;
11605 cn = m;
11606 while (cn) {
11607 *offset += SCTP_BUF_LEN(cn);
11608 cn = SCTP_BUF_NEXT(cn);
11609 }
11610 } else
11611 *offset = 0;
11612
11613 /* update length and return pointer to the auth chunk */
11614 SCTP_BUF_LEN(m_auth) = chunk_len;
11615 m = sctp_copy_mbufchain(m_auth, m, m_end, 1, chunk_len, 0);
11616 if (auth_ret != NULL)
11617 *auth_ret = auth;
11618
11619 return (m);
11620}