Deleted Added
full compact
sctp_structs.h (163953) sctp_structs.h (163996)
1/*-
2 * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_structs.h,v 1.13 2005/03/06 16:04:18 itojun Exp $ */
32
33#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001-2006, Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_structs.h,v 1.13 2005/03/06 16:04:18 itojun Exp $ */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_structs.h 163953 2006-11-03 15:23:16Z rrs $");
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_structs.h 163996 2006-11-05 13:25:18Z rrs $");
35
36#ifndef __sctp_structs_h__
37#define __sctp_structs_h__
38
39#include <sys/queue.h>
40
41#include <sys/callout.h>
42#include <sys/socket.h>
43
44#ifdef IPSEC
45#include <netinet6/ipsec.h>
46#include <netkey/key.h>
47#endif
48
49#include <netinet/sctp_header.h>
50#include <netinet/sctp_uio.h>
51#include <netinet/sctp_auth.h>
52
53struct sctp_timer {
54 struct callout timer;
55 int type;
56 /*
57 * Depending on the timer type these will be setup and cast with the
58 * appropriate entity.
59 */
60 void *ep;
61 void *tcb;
62 void *net;
63
64 /* for sanity checking */
65 void *self;
66 uint32_t ticks;
67};
68
69struct sctp_nonpad_sndrcvinfo {
70 uint16_t sinfo_stream;
71 uint16_t sinfo_ssn;
72 uint16_t sinfo_flags;
73 uint32_t sinfo_ppid;
74 uint32_t sinfo_context;
75 uint32_t sinfo_timetolive;
76 uint32_t sinfo_tsn;
77 uint32_t sinfo_cumtsn;
78 sctp_assoc_t sinfo_assoc_id;
79};
80
81
82/*
83 * This is the information we track on each interface that we know about from
84 * the distant end.
85 */
86TAILQ_HEAD(sctpnetlisthead, sctp_nets);
87
88struct sctp_stream_reset_list {
89 TAILQ_ENTRY(sctp_stream_reset_list) next_resp;
90 uint32_t tsn;
91 int number_entries;
92 struct sctp_stream_reset_out_request req;
93};
94
95TAILQ_HEAD(sctp_resethead, sctp_stream_reset_list);
96
97/*
98 * Users of the iterator need to malloc a iterator with a call to
99 * sctp_initiate_iterator(inp_func, assoc_func, pcb_flags, pcb_features,
100 * asoc_state, void-ptr-arg, uint32-arg, end_func, inp);
101 *
102 * Use the following two defines if you don't care what pcb flags are on the EP
103 * and/or you don't care what state the association is in.
104 *
105 * Note that if you specify an INP as the last argument then ONLY each
106 * association of that single INP will be executed upon. Note that the pcb
107 * flags STILL apply so if the inp you specify has different pcb_flags then
108 * what you put in pcb_flags nothing will happen. use SCTP_PCB_ANY_FLAGS to
109 * assure the inp you specify gets treated.
110 */
111#define SCTP_PCB_ANY_FLAGS 0x00000000
112#define SCTP_PCB_ANY_FEATURES 0x00000000
113#define SCTP_ASOC_ANY_STATE 0x00000000
114
115typedef void (*asoc_func) (struct sctp_inpcb *, struct sctp_tcb *, void *ptr,
116 uint32_t val);
117typedef void (*inp_func) (struct sctp_inpcb *, void *ptr, uint32_t val);
118typedef void (*end_func) (void *ptr, uint32_t val);
119
120struct sctp_iterator {
121 LIST_ENTRY(sctp_iterator) sctp_nxt_itr;
122 struct sctp_timer tmr;
123 struct sctp_inpcb *inp; /* current endpoint */
124 struct sctp_tcb *stcb; /* current* assoc */
125 asoc_func function_assoc; /* per assoc function */
126 inp_func function_inp; /* per endpoint function */
127 end_func function_atend;/* iterator completion function */
128 void *pointer; /* pointer for apply func to use */
129 uint32_t val; /* value for apply func to use */
130 uint32_t pcb_flags; /* endpoint flags being checked */
131 uint32_t pcb_features; /* endpoint features being checked */
132 uint32_t asoc_state; /* assoc state being checked */
133 uint32_t iterator_flags;
134 uint8_t no_chunk_output;
135};
136
137/* iterator_flags values */
138#define SCTP_ITERATOR_DO_ALL_INP 0x00000001
139#define SCTP_ITERATOR_DO_SINGLE_INP 0x00000002
140
141LIST_HEAD(sctpiterators, sctp_iterator);
142
143struct sctp_copy_all {
144 struct sctp_inpcb *inp; /* ep */
145 struct mbuf *m;
146 struct sctp_sndrcvinfo sndrcv;
147 int sndlen;
148 int cnt_sent;
149 int cnt_failed;
150};
151
152union sctp_sockstore {
153#ifdef AF_INET
154 struct sockaddr_in sin;
155#endif
156#ifdef AF_INET6
157 struct sockaddr_in6 sin6;
158#endif
159 struct sockaddr sa;
160};
161
162struct sctp_nets {
163 TAILQ_ENTRY(sctp_nets) sctp_next; /* next link */
164
165 /*
166 * Things on the top half may be able to be split into a common
167 * structure shared by all.
168 */
169 struct sctp_timer pmtu_timer;
170
171 /*
172 * The following two in combination equate to a route entry for v6
173 * or v4.
174 */
175 struct sctp_route {
176 struct rtentry *ro_rt;
177 union sctp_sockstore _l_addr; /* remote peer addr */
178 union sctp_sockstore _s_addr; /* our selected src addr */
179 } ro;
180 /* mtu discovered so far */
181 uint32_t mtu;
182 uint32_t ssthresh; /* not sure about this one for split */
183
184 /* smoothed average things for RTT and RTO itself */
185 int lastsa;
186 int lastsv;
187 unsigned int RTO;
188
189 /* This is used for SHUTDOWN/SHUTDOWN-ACK/SEND or INIT timers */
190 struct sctp_timer rxt_timer;
191 struct sctp_timer fr_timer; /* for early fr */
192
193 /* last time in seconds I sent to it */
194 struct timeval last_sent_time;
195 int ref_count;
196
197 /* Congestion stats per destination */
198 /*
199 * flight size variables and such, sorry Vern, I could not avoid
200 * this if I wanted performance :>
201 */
202 uint32_t flight_size;
203 uint32_t cwnd; /* actual cwnd */
204 uint32_t prev_cwnd; /* cwnd before any processing */
205 uint32_t partial_bytes_acked; /* in CA tracks when to incr a MTU */
206 uint32_t rtt_variance;
207 uint32_t prev_rtt;
208 /* tracking variables to avoid the aloc/free in sack processing */
209 unsigned int net_ack;
210 unsigned int net_ack2;
211
212 /*
213 * CMT variables (iyengar@cis.udel.edu)
214 */
215 uint32_t this_sack_highest_newack; /* tracks highest TSN newly
216 * acked for a given dest in
217 * the current SACK. Used in
218 * SFR and HTNA algos */
219 uint32_t pseudo_cumack; /* CMT CUC algorithm. Maintains next expected
220 * pseudo-cumack for this destination */
221 uint32_t rtx_pseudo_cumack; /* CMT CUC algorithm. Maintains next
222 * expected pseudo-cumack for this
223 * destination */
224
225 /* CMT fast recovery variables */
226 uint32_t fast_recovery_tsn;
227 uint32_t heartbeat_random1;
228 uint32_t heartbeat_random2;
229 uint32_t tos_flowlabel;
230
231 /* if this guy is ok or not ... status */
232 uint16_t dest_state;
233 /* number of transmit failures to down this guy */
234 uint16_t failure_threshold;
235 /* error stats on destination */
236 uint16_t error_count;
237
238 uint8_t fast_retran_loss_recovery;
239 uint8_t will_exit_fast_recovery;
240 /* Flags that probably can be combined into dest_state */
241 uint8_t rto_variance_dir; /* increase = 1, decreasing = 0 */
242 uint8_t rto_pending; /* is segment marked for RTO update ** if we
243 * split? */
244 uint8_t fast_retran_ip; /* fast retransmit in progress */
245 uint8_t hb_responded;
246 uint8_t saw_newack; /* CMT's SFR algorithm flag */
247 uint8_t src_addr_selected; /* if we split we move */
248 uint8_t indx_of_eligible_next_to_use;
249 uint8_t addr_is_local; /* its a local address (if known) could move
250 * in split */
251
252 /*
253 * CMT variables (iyengar@cis.udel.edu)
254 */
255 uint8_t find_pseudo_cumack; /* CMT CUC algorithm. Flag used to
256 * find a new pseudocumack. This flag
257 * is set after a new pseudo-cumack
258 * has been received and indicates
259 * that the sender should find the
260 * next pseudo-cumack expected for
261 * this destination */
262 uint8_t find_rtx_pseudo_cumack; /* CMT CUCv2 algorithm. Flag used to
263 * find a new rtx-pseudocumack. This
264 * flag is set after a new
265 * rtx-pseudo-cumack has been received
266 * and indicates that the sender
267 * should find the next
268 * rtx-pseudo-cumack expected for this
269 * destination */
270 uint8_t new_pseudo_cumack; /* CMT CUC algorithm. Flag used to
271 * indicate if a new pseudo-cumack or
272 * rtx-pseudo-cumack has been received */
273#ifdef SCTP_HIGH_SPEED
274 uint8_t last_hs_used; /* index into the last HS table entry we used */
275#endif
276};
277
278
279struct sctp_data_chunkrec {
280 uint32_t TSN_seq; /* the TSN of this transmit */
281 uint16_t stream_seq; /* the stream sequence number of this transmit */
282 uint16_t stream_number; /* the stream number of this guy */
283 uint32_t payloadtype;
284 uint32_t context; /* from send */
285
286 /* ECN Nonce: Nonce Value for this chunk */
287 uint8_t ect_nonce;
288
289 /*
290 * part of the Highest sacked algorithm to be able to stroke counts
291 * on ones that are FR'd.
292 */
293 uint32_t fast_retran_tsn; /* sending_seq at the time of FR */
294 struct timeval timetodrop; /* time we drop it from queue */
295 uint8_t doing_fast_retransmit;
296 uint8_t rcv_flags; /* flags pulled from data chunk on inbound for
297 * outbound holds sending flags. */
298 uint8_t state_flags;
299 uint8_t chunk_was_revoked;
300};
301
302TAILQ_HEAD(sctpchunk_listhead, sctp_tmit_chunk);
303
304/* The lower byte is used to enumerate PR_SCTP policies */
305#define CHUNK_FLAGS_PR_SCTP_TTL SCTP_PR_SCTP_TTL
306#define CHUNK_FLAGS_PR_SCTP_BUF SCTP_PR_SCTP_BUF
307#define CHUNK_FLAGS_PR_SCTP_RTX SCTP_PR_SCTP_RTX
308
309/* The upper byte is used a a bit mask */
310#define CHUNK_FLAGS_FRAGMENT_OK 0x0100
311
312struct chk_id {
313 uint16_t id;
314 uint16_t can_take_data;
315};
316
317
318struct sctp_tmit_chunk {
319 union {
320 struct sctp_data_chunkrec data;
321 struct chk_id chunk_id;
322 } rec;
323 struct sctp_association *asoc; /* bp to asoc this belongs to */
324 struct timeval sent_rcv_time; /* filled in if RTT being calculated */
325 struct mbuf *data; /* pointer to mbuf chain of data */
326 struct mbuf *last_mbuf; /* pointer to last mbuf in chain */
327 struct sctp_nets *whoTo;
328 TAILQ_ENTRY(sctp_tmit_chunk) sctp_next; /* next link */
329 int32_t sent; /* the send status */
330 uint16_t snd_count; /* number of times I sent */
331 uint16_t flags; /* flags, such as FRAGMENT_OK */
332 uint16_t send_size;
333 uint16_t book_size;
334 uint16_t mbcnt;
335 uint8_t pad_inplace;
336 uint8_t do_rtt;
337 uint8_t book_size_scale;
338 uint8_t addr_over; /* flag which is set if the dest address for
339 * this chunk is overridden by user. Used for
340 * CMT (iyengar@cis.udel.edu, 2005/06/21) */
341 uint8_t no_fr_allowed;
342 uint8_t pr_sctp_on;
343 uint8_t copy_by_ref;
344};
345
346/*
347 * The first part of this structure MUST be the entire sinfo structure. Maybe
348 * I should have made it a sub structure... we can circle back later and do
349 * that if we want.
350 */
351struct sctp_queued_to_read { /* sinfo structure Pluse more */
352 uint16_t sinfo_stream; /* off the wire */
353 uint16_t sinfo_ssn; /* off the wire */
354 uint16_t sinfo_flags; /* SCTP_UNORDERED from wire use SCTP_EOF for
355 * EOR */
356 uint32_t sinfo_ppid; /* off the wire */
357 uint32_t sinfo_context; /* pick this up from assoc def context? */
358 uint32_t sinfo_timetolive; /* not used by kernel */
359 uint32_t sinfo_tsn; /* Use this in reassembly as first TSN */
360 uint32_t sinfo_cumtsn; /* Use this in reassembly as last TSN */
361 sctp_assoc_t sinfo_assoc_id; /* our assoc id */
362 /* Non sinfo stuff */
363 uint32_t length; /* length of data */
364 uint32_t held_length; /* length held in sb */
365 struct sctp_nets *whoFrom; /* where it came from */
366 struct mbuf *data; /* front of the mbuf chain of data with
367 * PKT_HDR */
368 struct mbuf *tail_mbuf; /* used for multi-part data */
369 struct sctp_tcb *stcb; /* assoc, used for window update */
370 TAILQ_ENTRY(sctp_queued_to_read) next;
371 uint16_t port_from;
372 uint8_t do_not_ref_stcb;
373 uint8_t end_added;
374};
375
376/* This data structure will be on the outbound
377 * stream queues. Data will be pulled off from
378 * the front of the mbuf data and chunk-ified
379 * by the output routines. We will custom
380 * fit every chunk we pull to the send/sent
381 * queue to make up the next full packet
382 * if we can. An entry cannot be removed
383 * from the stream_out queue until
384 * the msg_is_complete flag is set. This
385 * means at times data/tail_mbuf MIGHT
386 * be NULL.. If that occurs it happens
387 * for one of two reasons. Either the user
388 * is blocked on a send() call and has not
389 * awoken to copy more data down... OR
390 * the user is in the explict MSG_EOR mode
391 * and wrote some data, but has not completed
392 * sending.
393 */
394struct sctp_stream_queue_pending {
395 struct mbuf *data;
396 struct mbuf *tail_mbuf;
397 struct timeval ts;
398 struct sctp_nets *net;
399 TAILQ_ENTRY(sctp_stream_queue_pending) next;
400 uint32_t length;
401 uint32_t timetolive;
402 uint32_t ppid;
403 uint32_t context;
404 uint16_t sinfo_flags;
405 uint16_t stream;
406 uint16_t strseq;
407 uint8_t msg_is_complete;
408 uint8_t some_taken;
409 uint8_t addr_over;
410 uint8_t act_flags;
411 uint8_t pr_sctp_on;
412 uint8_t resv;
413};
414
415/*
416 * this struct contains info that is used to track inbound stream data and
417 * help with ordering.
418 */
419TAILQ_HEAD(sctpwheelunrel_listhead, sctp_stream_in);
420struct sctp_stream_in {
421 struct sctp_readhead inqueue;
422 TAILQ_ENTRY(sctp_stream_in) next_spoke;
423 uint16_t stream_no;
424 uint16_t last_sequence_delivered; /* used for re-order */
425};
426
427/* This struct is used to track the traffic on outbound streams */
428TAILQ_HEAD(sctpwheel_listhead, sctp_stream_out);
429struct sctp_stream_out {
430 struct sctp_streamhead outqueue;
431 TAILQ_ENTRY(sctp_stream_out) next_spoke; /* next link in wheel */
432 uint16_t stream_no;
433 uint16_t next_sequence_sent; /* next one I expect to send out */
434 uint8_t last_msg_incomplete;
435};
436
437/* used to keep track of the addresses yet to try to add/delete */
438TAILQ_HEAD(sctp_asconf_addrhead, sctp_asconf_addr);
439struct sctp_asconf_addr {
440 TAILQ_ENTRY(sctp_asconf_addr) next;
441 struct sctp_asconf_addr_param ap;
442 struct ifaddr *ifa; /* save the ifa for add/del ip */
443 uint8_t sent; /* has this been sent yet? */
444};
445
446struct sctp_scoping {
447 uint8_t ipv4_addr_legal;
448 uint8_t ipv6_addr_legal;
449 uint8_t loopback_scope;
450 uint8_t ipv4_local_scope;
451 uint8_t local_scope;
452 uint8_t site_scope;
453};
454
455/*
456 * Here we have information about each individual association that we track.
457 * We probably in production would be more dynamic. But for ease of
458 * implementation we will have a fixed array that we hunt for in a linear
459 * fashion.
460 */
461struct sctp_association {
462 /* association state */
463 int state;
464 /* queue of pending addrs to add/delete */
465 struct sctp_asconf_addrhead asconf_queue;
466 struct timeval time_entered; /* time we entered state */
467 struct timeval time_last_rcvd;
468 struct timeval time_last_sent;
469 struct timeval time_last_sat_advance;
470 struct sctp_sndrcvinfo def_send; /* default send parameters */
471
472 /* timers and such */
473 struct sctp_timer hb_timer; /* hb timer */
474 struct sctp_timer dack_timer; /* Delayed ack timer */
475 struct sctp_timer asconf_timer; /* Asconf */
476 struct sctp_timer strreset_timer; /* stream reset */
477 struct sctp_timer shut_guard_timer; /* guard */
478 struct sctp_timer autoclose_timer; /* automatic close timer */
479 struct sctp_timer delayed_event_timer; /* timer for delayed events */
480
481 /* list of local addresses when add/del in progress */
482 struct sctpladdr sctp_local_addr_list;
483 struct sctpnetlisthead nets;
484
485 /* Free chunk list */
486 struct sctpchunk_listhead free_chunks;
487
488 /* Free stream output control list */
489 struct sctp_streamhead free_strmoq;
490
491 /* Control chunk queue */
492 struct sctpchunk_listhead control_send_queue;
493
494 /*
495 * Once a TSN hits the wire it is moved to the sent_queue. We
496 * maintain two counts here (don't know if any but retran_cnt is
497 * needed). The idea is that the sent_queue_retran_cnt reflects how
498 * many chunks have been marked for retranmission by either T3-rxt
499 * or FR.
500 */
501 struct sctpchunk_listhead sent_queue;
502 struct sctpchunk_listhead send_queue;
503
504
505 /* re-assembly queue for fragmented chunks on the inbound path */
506 struct sctpchunk_listhead reasmqueue;
507
508 /*
509 * this queue is used when we reach a condition that we can NOT put
510 * data into the socket buffer. We track the size of this queue and
511 * set our rwnd to the space in the socket minus also the
512 * size_on_delivery_queue.
513 */
514 struct sctpwheel_listhead out_wheel;
515
516 /*
517 * This pointer will be set to NULL most of the time. But when we
518 * have a fragmented message, where we could not get out all of the
519 * message at the last send then this will point to the stream to go
520 * get data from.
521 */
522 struct sctp_stream_out *locked_on_sending;
523
524 /* If an iterator is looking at me, this is it */
525 struct sctp_iterator *stcb_starting_point_for_iterator;
526
527 /* ASCONF destination address last sent to */
528/* struct sctp_nets *asconf_last_sent_to;*/
529/* Peter, greppign for the above shows only on strange set
530 * I don't think we need it so I have commented it out.
531 */
532
533 /* ASCONF save the last ASCONF-ACK so we can resend it if necessary */
534 struct mbuf *last_asconf_ack_sent;
535
536 /*
537 * pointer to last stream reset queued to control queue by us with
538 * requests.
539 */
540 struct sctp_tmit_chunk *str_reset;
541 /*
542 * if Source Address Selection happening, this will rotate through
543 * the link list.
544 */
545 struct sctp_laddr *last_used_address;
546
547 /* stream arrays */
548 struct sctp_stream_in *strmin;
549 struct sctp_stream_out *strmout;
550 uint8_t *mapping_array;
551 /* primary destination to use */
552 struct sctp_nets *primary_destination;
553 /* For CMT */
554 struct sctp_nets *last_net_data_came_from;
555 /* last place I got a data chunk from */
556 struct sctp_nets *last_data_chunk_from;
557 /* last place I got a control from */
558 struct sctp_nets *last_control_chunk_from;
559
560 /* circular looking for output selection */
561 struct sctp_stream_out *last_out_stream;
562
563 /*
564 * wait to the point the cum-ack passes req->send_reset_at_tsn for
565 * any req on the list.
566 */
567 struct sctp_resethead resetHead;
568
569 /* queue of chunks waiting to be sent into the local stack */
570 struct sctp_readhead pending_reply_queue;
571
572 uint32_t cookie_preserve_req;
573 /* ASCONF next seq I am sending out, inits at init-tsn */
574 uint32_t asconf_seq_out;
575 /* ASCONF last received ASCONF from peer, starts at peer's TSN-1 */
576 uint32_t asconf_seq_in;
577
578 /* next seq I am sending in str reset messages */
579 uint32_t str_reset_seq_out;
580
581 /* next seq I am expecting in str reset messages */
582 uint32_t str_reset_seq_in;
583
584
585 /* various verification tag information */
586 uint32_t my_vtag; /* The tag to be used. if assoc is re-initited
587 * by remote end, and I have unlocked this
588 * will be regenerated to a new random value. */
589 uint32_t peer_vtag; /* The peers last tag */
590
591 uint32_t my_vtag_nonce;
592 uint32_t peer_vtag_nonce;
593
594 uint32_t assoc_id;
595
596 /* This is the SCTP fragmentation threshold */
597 uint32_t smallest_mtu;
598
599 /*
600 * Special hook for Fast retransmit, allows us to track the highest
601 * TSN that is NEW in this SACK if gap ack blocks are present.
602 */
603 uint32_t this_sack_highest_gap;
604
605 /*
606 * The highest consecutive TSN that has been acked by peer on my
607 * sends
608 */
609 uint32_t last_acked_seq;
610
611 /* The next TSN that I will use in sending. */
612 uint32_t sending_seq;
613
614 /* Original seq number I used ??questionable to keep?? */
615 uint32_t init_seq_number;
616
617
618 /* The Advanced Peer Ack Point, as required by the PR-SCTP */
619 /* (A1 in Section 4.2) */
620 uint32_t advanced_peer_ack_point;
621
622 /*
623 * The highest consequetive TSN at the bottom of the mapping array
624 * (for his sends).
625 */
626 uint32_t cumulative_tsn;
627 /*
628 * Used to track the mapping array and its offset bits. This MAY be
629 * lower then cumulative_tsn.
630 */
631 uint32_t mapping_array_base_tsn;
632 /*
633 * used to track highest TSN we have received and is listed in the
634 * mapping array.
635 */
636 uint32_t highest_tsn_inside_map;
637
638 uint32_t last_echo_tsn;
639 uint32_t last_cwr_tsn;
640 uint32_t fast_recovery_tsn;
641 uint32_t sat_t3_recovery_tsn;
642 uint32_t tsn_last_delivered;
643 /*
644 * For the pd-api we should re-write this a bit more efficent. We
645 * could have multiple sctp_queued_to_read's that we are building at
646 * once. Now we only do this when we get ready to deliver to the
647 * socket buffer. Note that we depend on the fact that the struct is
648 * "stuck" on the read queue until we finish all the pd-api.
649 */
650 struct sctp_queued_to_read *control_pdapi;
651
652 uint32_t tsn_of_pdapi_last_delivered;
653 uint32_t pdapi_ppid;
654 uint32_t context;
655 uint32_t last_reset_action[SCTP_MAX_RESET_PARAMS];
656 uint32_t last_sending_seq[SCTP_MAX_RESET_PARAMS];
657 uint32_t last_base_tsnsent[SCTP_MAX_RESET_PARAMS];
658 /*
659 * window state information and smallest MTU that I use to bound
660 * segmentation
661 */
662 uint32_t peers_rwnd;
663 uint32_t my_rwnd;
664 uint32_t my_last_reported_rwnd;
665 uint32_t my_rwnd_control_len;
666
667 uint32_t total_output_queue_size;
668
669 uint32_t sb_cc; /* shadow of sb_cc in one-2-one */
670 uint32_t sb_mbcnt; /* shadow of sb_mbcnt in one-2-one */
671 /* 32 bit nonce stuff */
672 uint32_t nonce_resync_tsn;
673 uint32_t nonce_wait_tsn;
674 uint32_t default_flowlabel;
675 uint32_t pr_sctp_cnt;
676 int ctrl_queue_cnt; /* could be removed REM */
677 /*
678 * All outbound datagrams queue into this list from the individual
679 * stream queue. Here they get assigned a TSN and then await
680 * sending. The stream seq comes when it is first put in the
681 * individual str queue
682 */
683 unsigned int stream_queue_cnt;
684 unsigned int send_queue_cnt;
685 unsigned int sent_queue_cnt;
686 unsigned int sent_queue_cnt_removeable;
687 /*
688 * Number on sent queue that are marked for retran until this value
689 * is 0 we only send one packet of retran'ed data.
690 */
691 unsigned int sent_queue_retran_cnt;
692
693 unsigned int size_on_reasm_queue;
694 unsigned int cnt_on_reasm_queue;
695 /* amount of data (bytes) currently in flight (on all destinations) */
696 unsigned int total_flight;
697 /* Total book size in flight */
698 unsigned int total_flight_count; /* count of chunks used with
699 * book total */
700 /* count of destinaton nets and list of destination nets */
701 unsigned int numnets;
702
703 /* Total error count on this association */
704 unsigned int overall_error_count;
705
706 unsigned int cnt_msg_on_sb;
707
708 /* All stream count of chunks for delivery */
709 unsigned int size_on_all_streams;
710 unsigned int cnt_on_all_streams;
711
712 /* Heart Beat delay in ticks */
713 unsigned int heart_beat_delay;
714
715 /* autoclose */
716 unsigned int sctp_autoclose_ticks;
717
718 /* how many preopen streams we have */
719 unsigned int pre_open_streams;
720
721 /* How many streams I support coming into me */
722 unsigned int max_inbound_streams;
723
724 /* the cookie life I award for any cookie, in seconds */
725 unsigned int cookie_life;
726 /* time to delay acks for */
727 unsigned int delayed_ack;
728
729 unsigned int numduptsns;
730 int dup_tsns[SCTP_MAX_DUP_TSNS];
731 unsigned int initial_init_rto_max; /* initial RTO for INIT's */
732 unsigned int initial_rto; /* initial send RTO */
733 unsigned int minrto; /* per assoc RTO-MIN */
734 unsigned int maxrto; /* per assoc RTO-MAX */
735
736 /* authentication fields */
737 sctp_auth_chklist_t *local_auth_chunks;
738 sctp_auth_chklist_t *peer_auth_chunks;
739 sctp_hmaclist_t *local_hmacs; /* local HMACs supported */
740 sctp_hmaclist_t *peer_hmacs; /* peer HMACs supported */
741 struct sctp_keyhead shared_keys; /* assoc's shared keys */
742 sctp_authinfo_t authinfo; /* randoms, cached keys */
35
36#ifndef __sctp_structs_h__
37#define __sctp_structs_h__
38
39#include <sys/queue.h>
40
41#include <sys/callout.h>
42#include <sys/socket.h>
43
44#ifdef IPSEC
45#include <netinet6/ipsec.h>
46#include <netkey/key.h>
47#endif
48
49#include <netinet/sctp_header.h>
50#include <netinet/sctp_uio.h>
51#include <netinet/sctp_auth.h>
52
53struct sctp_timer {
54 struct callout timer;
55 int type;
56 /*
57 * Depending on the timer type these will be setup and cast with the
58 * appropriate entity.
59 */
60 void *ep;
61 void *tcb;
62 void *net;
63
64 /* for sanity checking */
65 void *self;
66 uint32_t ticks;
67};
68
69struct sctp_nonpad_sndrcvinfo {
70 uint16_t sinfo_stream;
71 uint16_t sinfo_ssn;
72 uint16_t sinfo_flags;
73 uint32_t sinfo_ppid;
74 uint32_t sinfo_context;
75 uint32_t sinfo_timetolive;
76 uint32_t sinfo_tsn;
77 uint32_t sinfo_cumtsn;
78 sctp_assoc_t sinfo_assoc_id;
79};
80
81
82/*
83 * This is the information we track on each interface that we know about from
84 * the distant end.
85 */
86TAILQ_HEAD(sctpnetlisthead, sctp_nets);
87
88struct sctp_stream_reset_list {
89 TAILQ_ENTRY(sctp_stream_reset_list) next_resp;
90 uint32_t tsn;
91 int number_entries;
92 struct sctp_stream_reset_out_request req;
93};
94
95TAILQ_HEAD(sctp_resethead, sctp_stream_reset_list);
96
97/*
98 * Users of the iterator need to malloc a iterator with a call to
99 * sctp_initiate_iterator(inp_func, assoc_func, pcb_flags, pcb_features,
100 * asoc_state, void-ptr-arg, uint32-arg, end_func, inp);
101 *
102 * Use the following two defines if you don't care what pcb flags are on the EP
103 * and/or you don't care what state the association is in.
104 *
105 * Note that if you specify an INP as the last argument then ONLY each
106 * association of that single INP will be executed upon. Note that the pcb
107 * flags STILL apply so if the inp you specify has different pcb_flags then
108 * what you put in pcb_flags nothing will happen. use SCTP_PCB_ANY_FLAGS to
109 * assure the inp you specify gets treated.
110 */
111#define SCTP_PCB_ANY_FLAGS 0x00000000
112#define SCTP_PCB_ANY_FEATURES 0x00000000
113#define SCTP_ASOC_ANY_STATE 0x00000000
114
115typedef void (*asoc_func) (struct sctp_inpcb *, struct sctp_tcb *, void *ptr,
116 uint32_t val);
117typedef void (*inp_func) (struct sctp_inpcb *, void *ptr, uint32_t val);
118typedef void (*end_func) (void *ptr, uint32_t val);
119
120struct sctp_iterator {
121 LIST_ENTRY(sctp_iterator) sctp_nxt_itr;
122 struct sctp_timer tmr;
123 struct sctp_inpcb *inp; /* current endpoint */
124 struct sctp_tcb *stcb; /* current* assoc */
125 asoc_func function_assoc; /* per assoc function */
126 inp_func function_inp; /* per endpoint function */
127 end_func function_atend;/* iterator completion function */
128 void *pointer; /* pointer for apply func to use */
129 uint32_t val; /* value for apply func to use */
130 uint32_t pcb_flags; /* endpoint flags being checked */
131 uint32_t pcb_features; /* endpoint features being checked */
132 uint32_t asoc_state; /* assoc state being checked */
133 uint32_t iterator_flags;
134 uint8_t no_chunk_output;
135};
136
137/* iterator_flags values */
138#define SCTP_ITERATOR_DO_ALL_INP 0x00000001
139#define SCTP_ITERATOR_DO_SINGLE_INP 0x00000002
140
141LIST_HEAD(sctpiterators, sctp_iterator);
142
143struct sctp_copy_all {
144 struct sctp_inpcb *inp; /* ep */
145 struct mbuf *m;
146 struct sctp_sndrcvinfo sndrcv;
147 int sndlen;
148 int cnt_sent;
149 int cnt_failed;
150};
151
152union sctp_sockstore {
153#ifdef AF_INET
154 struct sockaddr_in sin;
155#endif
156#ifdef AF_INET6
157 struct sockaddr_in6 sin6;
158#endif
159 struct sockaddr sa;
160};
161
162struct sctp_nets {
163 TAILQ_ENTRY(sctp_nets) sctp_next; /* next link */
164
165 /*
166 * Things on the top half may be able to be split into a common
167 * structure shared by all.
168 */
169 struct sctp_timer pmtu_timer;
170
171 /*
172 * The following two in combination equate to a route entry for v6
173 * or v4.
174 */
175 struct sctp_route {
176 struct rtentry *ro_rt;
177 union sctp_sockstore _l_addr; /* remote peer addr */
178 union sctp_sockstore _s_addr; /* our selected src addr */
179 } ro;
180 /* mtu discovered so far */
181 uint32_t mtu;
182 uint32_t ssthresh; /* not sure about this one for split */
183
184 /* smoothed average things for RTT and RTO itself */
185 int lastsa;
186 int lastsv;
187 unsigned int RTO;
188
189 /* This is used for SHUTDOWN/SHUTDOWN-ACK/SEND or INIT timers */
190 struct sctp_timer rxt_timer;
191 struct sctp_timer fr_timer; /* for early fr */
192
193 /* last time in seconds I sent to it */
194 struct timeval last_sent_time;
195 int ref_count;
196
197 /* Congestion stats per destination */
198 /*
199 * flight size variables and such, sorry Vern, I could not avoid
200 * this if I wanted performance :>
201 */
202 uint32_t flight_size;
203 uint32_t cwnd; /* actual cwnd */
204 uint32_t prev_cwnd; /* cwnd before any processing */
205 uint32_t partial_bytes_acked; /* in CA tracks when to incr a MTU */
206 uint32_t rtt_variance;
207 uint32_t prev_rtt;
208 /* tracking variables to avoid the aloc/free in sack processing */
209 unsigned int net_ack;
210 unsigned int net_ack2;
211
212 /*
213 * CMT variables (iyengar@cis.udel.edu)
214 */
215 uint32_t this_sack_highest_newack; /* tracks highest TSN newly
216 * acked for a given dest in
217 * the current SACK. Used in
218 * SFR and HTNA algos */
219 uint32_t pseudo_cumack; /* CMT CUC algorithm. Maintains next expected
220 * pseudo-cumack for this destination */
221 uint32_t rtx_pseudo_cumack; /* CMT CUC algorithm. Maintains next
222 * expected pseudo-cumack for this
223 * destination */
224
225 /* CMT fast recovery variables */
226 uint32_t fast_recovery_tsn;
227 uint32_t heartbeat_random1;
228 uint32_t heartbeat_random2;
229 uint32_t tos_flowlabel;
230
231 /* if this guy is ok or not ... status */
232 uint16_t dest_state;
233 /* number of transmit failures to down this guy */
234 uint16_t failure_threshold;
235 /* error stats on destination */
236 uint16_t error_count;
237
238 uint8_t fast_retran_loss_recovery;
239 uint8_t will_exit_fast_recovery;
240 /* Flags that probably can be combined into dest_state */
241 uint8_t rto_variance_dir; /* increase = 1, decreasing = 0 */
242 uint8_t rto_pending; /* is segment marked for RTO update ** if we
243 * split? */
244 uint8_t fast_retran_ip; /* fast retransmit in progress */
245 uint8_t hb_responded;
246 uint8_t saw_newack; /* CMT's SFR algorithm flag */
247 uint8_t src_addr_selected; /* if we split we move */
248 uint8_t indx_of_eligible_next_to_use;
249 uint8_t addr_is_local; /* its a local address (if known) could move
250 * in split */
251
252 /*
253 * CMT variables (iyengar@cis.udel.edu)
254 */
255 uint8_t find_pseudo_cumack; /* CMT CUC algorithm. Flag used to
256 * find a new pseudocumack. This flag
257 * is set after a new pseudo-cumack
258 * has been received and indicates
259 * that the sender should find the
260 * next pseudo-cumack expected for
261 * this destination */
262 uint8_t find_rtx_pseudo_cumack; /* CMT CUCv2 algorithm. Flag used to
263 * find a new rtx-pseudocumack. This
264 * flag is set after a new
265 * rtx-pseudo-cumack has been received
266 * and indicates that the sender
267 * should find the next
268 * rtx-pseudo-cumack expected for this
269 * destination */
270 uint8_t new_pseudo_cumack; /* CMT CUC algorithm. Flag used to
271 * indicate if a new pseudo-cumack or
272 * rtx-pseudo-cumack has been received */
273#ifdef SCTP_HIGH_SPEED
274 uint8_t last_hs_used; /* index into the last HS table entry we used */
275#endif
276};
277
278
279struct sctp_data_chunkrec {
280 uint32_t TSN_seq; /* the TSN of this transmit */
281 uint16_t stream_seq; /* the stream sequence number of this transmit */
282 uint16_t stream_number; /* the stream number of this guy */
283 uint32_t payloadtype;
284 uint32_t context; /* from send */
285
286 /* ECN Nonce: Nonce Value for this chunk */
287 uint8_t ect_nonce;
288
289 /*
290 * part of the Highest sacked algorithm to be able to stroke counts
291 * on ones that are FR'd.
292 */
293 uint32_t fast_retran_tsn; /* sending_seq at the time of FR */
294 struct timeval timetodrop; /* time we drop it from queue */
295 uint8_t doing_fast_retransmit;
296 uint8_t rcv_flags; /* flags pulled from data chunk on inbound for
297 * outbound holds sending flags. */
298 uint8_t state_flags;
299 uint8_t chunk_was_revoked;
300};
301
302TAILQ_HEAD(sctpchunk_listhead, sctp_tmit_chunk);
303
304/* The lower byte is used to enumerate PR_SCTP policies */
305#define CHUNK_FLAGS_PR_SCTP_TTL SCTP_PR_SCTP_TTL
306#define CHUNK_FLAGS_PR_SCTP_BUF SCTP_PR_SCTP_BUF
307#define CHUNK_FLAGS_PR_SCTP_RTX SCTP_PR_SCTP_RTX
308
309/* The upper byte is used a a bit mask */
310#define CHUNK_FLAGS_FRAGMENT_OK 0x0100
311
312struct chk_id {
313 uint16_t id;
314 uint16_t can_take_data;
315};
316
317
318struct sctp_tmit_chunk {
319 union {
320 struct sctp_data_chunkrec data;
321 struct chk_id chunk_id;
322 } rec;
323 struct sctp_association *asoc; /* bp to asoc this belongs to */
324 struct timeval sent_rcv_time; /* filled in if RTT being calculated */
325 struct mbuf *data; /* pointer to mbuf chain of data */
326 struct mbuf *last_mbuf; /* pointer to last mbuf in chain */
327 struct sctp_nets *whoTo;
328 TAILQ_ENTRY(sctp_tmit_chunk) sctp_next; /* next link */
329 int32_t sent; /* the send status */
330 uint16_t snd_count; /* number of times I sent */
331 uint16_t flags; /* flags, such as FRAGMENT_OK */
332 uint16_t send_size;
333 uint16_t book_size;
334 uint16_t mbcnt;
335 uint8_t pad_inplace;
336 uint8_t do_rtt;
337 uint8_t book_size_scale;
338 uint8_t addr_over; /* flag which is set if the dest address for
339 * this chunk is overridden by user. Used for
340 * CMT (iyengar@cis.udel.edu, 2005/06/21) */
341 uint8_t no_fr_allowed;
342 uint8_t pr_sctp_on;
343 uint8_t copy_by_ref;
344};
345
346/*
347 * The first part of this structure MUST be the entire sinfo structure. Maybe
348 * I should have made it a sub structure... we can circle back later and do
349 * that if we want.
350 */
351struct sctp_queued_to_read { /* sinfo structure Pluse more */
352 uint16_t sinfo_stream; /* off the wire */
353 uint16_t sinfo_ssn; /* off the wire */
354 uint16_t sinfo_flags; /* SCTP_UNORDERED from wire use SCTP_EOF for
355 * EOR */
356 uint32_t sinfo_ppid; /* off the wire */
357 uint32_t sinfo_context; /* pick this up from assoc def context? */
358 uint32_t sinfo_timetolive; /* not used by kernel */
359 uint32_t sinfo_tsn; /* Use this in reassembly as first TSN */
360 uint32_t sinfo_cumtsn; /* Use this in reassembly as last TSN */
361 sctp_assoc_t sinfo_assoc_id; /* our assoc id */
362 /* Non sinfo stuff */
363 uint32_t length; /* length of data */
364 uint32_t held_length; /* length held in sb */
365 struct sctp_nets *whoFrom; /* where it came from */
366 struct mbuf *data; /* front of the mbuf chain of data with
367 * PKT_HDR */
368 struct mbuf *tail_mbuf; /* used for multi-part data */
369 struct sctp_tcb *stcb; /* assoc, used for window update */
370 TAILQ_ENTRY(sctp_queued_to_read) next;
371 uint16_t port_from;
372 uint8_t do_not_ref_stcb;
373 uint8_t end_added;
374};
375
376/* This data structure will be on the outbound
377 * stream queues. Data will be pulled off from
378 * the front of the mbuf data and chunk-ified
379 * by the output routines. We will custom
380 * fit every chunk we pull to the send/sent
381 * queue to make up the next full packet
382 * if we can. An entry cannot be removed
383 * from the stream_out queue until
384 * the msg_is_complete flag is set. This
385 * means at times data/tail_mbuf MIGHT
386 * be NULL.. If that occurs it happens
387 * for one of two reasons. Either the user
388 * is blocked on a send() call and has not
389 * awoken to copy more data down... OR
390 * the user is in the explict MSG_EOR mode
391 * and wrote some data, but has not completed
392 * sending.
393 */
394struct sctp_stream_queue_pending {
395 struct mbuf *data;
396 struct mbuf *tail_mbuf;
397 struct timeval ts;
398 struct sctp_nets *net;
399 TAILQ_ENTRY(sctp_stream_queue_pending) next;
400 uint32_t length;
401 uint32_t timetolive;
402 uint32_t ppid;
403 uint32_t context;
404 uint16_t sinfo_flags;
405 uint16_t stream;
406 uint16_t strseq;
407 uint8_t msg_is_complete;
408 uint8_t some_taken;
409 uint8_t addr_over;
410 uint8_t act_flags;
411 uint8_t pr_sctp_on;
412 uint8_t resv;
413};
414
415/*
416 * this struct contains info that is used to track inbound stream data and
417 * help with ordering.
418 */
419TAILQ_HEAD(sctpwheelunrel_listhead, sctp_stream_in);
420struct sctp_stream_in {
421 struct sctp_readhead inqueue;
422 TAILQ_ENTRY(sctp_stream_in) next_spoke;
423 uint16_t stream_no;
424 uint16_t last_sequence_delivered; /* used for re-order */
425};
426
427/* This struct is used to track the traffic on outbound streams */
428TAILQ_HEAD(sctpwheel_listhead, sctp_stream_out);
429struct sctp_stream_out {
430 struct sctp_streamhead outqueue;
431 TAILQ_ENTRY(sctp_stream_out) next_spoke; /* next link in wheel */
432 uint16_t stream_no;
433 uint16_t next_sequence_sent; /* next one I expect to send out */
434 uint8_t last_msg_incomplete;
435};
436
437/* used to keep track of the addresses yet to try to add/delete */
438TAILQ_HEAD(sctp_asconf_addrhead, sctp_asconf_addr);
439struct sctp_asconf_addr {
440 TAILQ_ENTRY(sctp_asconf_addr) next;
441 struct sctp_asconf_addr_param ap;
442 struct ifaddr *ifa; /* save the ifa for add/del ip */
443 uint8_t sent; /* has this been sent yet? */
444};
445
446struct sctp_scoping {
447 uint8_t ipv4_addr_legal;
448 uint8_t ipv6_addr_legal;
449 uint8_t loopback_scope;
450 uint8_t ipv4_local_scope;
451 uint8_t local_scope;
452 uint8_t site_scope;
453};
454
455/*
456 * Here we have information about each individual association that we track.
457 * We probably in production would be more dynamic. But for ease of
458 * implementation we will have a fixed array that we hunt for in a linear
459 * fashion.
460 */
461struct sctp_association {
462 /* association state */
463 int state;
464 /* queue of pending addrs to add/delete */
465 struct sctp_asconf_addrhead asconf_queue;
466 struct timeval time_entered; /* time we entered state */
467 struct timeval time_last_rcvd;
468 struct timeval time_last_sent;
469 struct timeval time_last_sat_advance;
470 struct sctp_sndrcvinfo def_send; /* default send parameters */
471
472 /* timers and such */
473 struct sctp_timer hb_timer; /* hb timer */
474 struct sctp_timer dack_timer; /* Delayed ack timer */
475 struct sctp_timer asconf_timer; /* Asconf */
476 struct sctp_timer strreset_timer; /* stream reset */
477 struct sctp_timer shut_guard_timer; /* guard */
478 struct sctp_timer autoclose_timer; /* automatic close timer */
479 struct sctp_timer delayed_event_timer; /* timer for delayed events */
480
481 /* list of local addresses when add/del in progress */
482 struct sctpladdr sctp_local_addr_list;
483 struct sctpnetlisthead nets;
484
485 /* Free chunk list */
486 struct sctpchunk_listhead free_chunks;
487
488 /* Free stream output control list */
489 struct sctp_streamhead free_strmoq;
490
491 /* Control chunk queue */
492 struct sctpchunk_listhead control_send_queue;
493
494 /*
495 * Once a TSN hits the wire it is moved to the sent_queue. We
496 * maintain two counts here (don't know if any but retran_cnt is
497 * needed). The idea is that the sent_queue_retran_cnt reflects how
498 * many chunks have been marked for retranmission by either T3-rxt
499 * or FR.
500 */
501 struct sctpchunk_listhead sent_queue;
502 struct sctpchunk_listhead send_queue;
503
504
505 /* re-assembly queue for fragmented chunks on the inbound path */
506 struct sctpchunk_listhead reasmqueue;
507
508 /*
509 * this queue is used when we reach a condition that we can NOT put
510 * data into the socket buffer. We track the size of this queue and
511 * set our rwnd to the space in the socket minus also the
512 * size_on_delivery_queue.
513 */
514 struct sctpwheel_listhead out_wheel;
515
516 /*
517 * This pointer will be set to NULL most of the time. But when we
518 * have a fragmented message, where we could not get out all of the
519 * message at the last send then this will point to the stream to go
520 * get data from.
521 */
522 struct sctp_stream_out *locked_on_sending;
523
524 /* If an iterator is looking at me, this is it */
525 struct sctp_iterator *stcb_starting_point_for_iterator;
526
527 /* ASCONF destination address last sent to */
528/* struct sctp_nets *asconf_last_sent_to;*/
529/* Peter, greppign for the above shows only on strange set
530 * I don't think we need it so I have commented it out.
531 */
532
533 /* ASCONF save the last ASCONF-ACK so we can resend it if necessary */
534 struct mbuf *last_asconf_ack_sent;
535
536 /*
537 * pointer to last stream reset queued to control queue by us with
538 * requests.
539 */
540 struct sctp_tmit_chunk *str_reset;
541 /*
542 * if Source Address Selection happening, this will rotate through
543 * the link list.
544 */
545 struct sctp_laddr *last_used_address;
546
547 /* stream arrays */
548 struct sctp_stream_in *strmin;
549 struct sctp_stream_out *strmout;
550 uint8_t *mapping_array;
551 /* primary destination to use */
552 struct sctp_nets *primary_destination;
553 /* For CMT */
554 struct sctp_nets *last_net_data_came_from;
555 /* last place I got a data chunk from */
556 struct sctp_nets *last_data_chunk_from;
557 /* last place I got a control from */
558 struct sctp_nets *last_control_chunk_from;
559
560 /* circular looking for output selection */
561 struct sctp_stream_out *last_out_stream;
562
563 /*
564 * wait to the point the cum-ack passes req->send_reset_at_tsn for
565 * any req on the list.
566 */
567 struct sctp_resethead resetHead;
568
569 /* queue of chunks waiting to be sent into the local stack */
570 struct sctp_readhead pending_reply_queue;
571
572 uint32_t cookie_preserve_req;
573 /* ASCONF next seq I am sending out, inits at init-tsn */
574 uint32_t asconf_seq_out;
575 /* ASCONF last received ASCONF from peer, starts at peer's TSN-1 */
576 uint32_t asconf_seq_in;
577
578 /* next seq I am sending in str reset messages */
579 uint32_t str_reset_seq_out;
580
581 /* next seq I am expecting in str reset messages */
582 uint32_t str_reset_seq_in;
583
584
585 /* various verification tag information */
586 uint32_t my_vtag; /* The tag to be used. if assoc is re-initited
587 * by remote end, and I have unlocked this
588 * will be regenerated to a new random value. */
589 uint32_t peer_vtag; /* The peers last tag */
590
591 uint32_t my_vtag_nonce;
592 uint32_t peer_vtag_nonce;
593
594 uint32_t assoc_id;
595
596 /* This is the SCTP fragmentation threshold */
597 uint32_t smallest_mtu;
598
599 /*
600 * Special hook for Fast retransmit, allows us to track the highest
601 * TSN that is NEW in this SACK if gap ack blocks are present.
602 */
603 uint32_t this_sack_highest_gap;
604
605 /*
606 * The highest consecutive TSN that has been acked by peer on my
607 * sends
608 */
609 uint32_t last_acked_seq;
610
611 /* The next TSN that I will use in sending. */
612 uint32_t sending_seq;
613
614 /* Original seq number I used ??questionable to keep?? */
615 uint32_t init_seq_number;
616
617
618 /* The Advanced Peer Ack Point, as required by the PR-SCTP */
619 /* (A1 in Section 4.2) */
620 uint32_t advanced_peer_ack_point;
621
622 /*
623 * The highest consequetive TSN at the bottom of the mapping array
624 * (for his sends).
625 */
626 uint32_t cumulative_tsn;
627 /*
628 * Used to track the mapping array and its offset bits. This MAY be
629 * lower then cumulative_tsn.
630 */
631 uint32_t mapping_array_base_tsn;
632 /*
633 * used to track highest TSN we have received and is listed in the
634 * mapping array.
635 */
636 uint32_t highest_tsn_inside_map;
637
638 uint32_t last_echo_tsn;
639 uint32_t last_cwr_tsn;
640 uint32_t fast_recovery_tsn;
641 uint32_t sat_t3_recovery_tsn;
642 uint32_t tsn_last_delivered;
643 /*
644 * For the pd-api we should re-write this a bit more efficent. We
645 * could have multiple sctp_queued_to_read's that we are building at
646 * once. Now we only do this when we get ready to deliver to the
647 * socket buffer. Note that we depend on the fact that the struct is
648 * "stuck" on the read queue until we finish all the pd-api.
649 */
650 struct sctp_queued_to_read *control_pdapi;
651
652 uint32_t tsn_of_pdapi_last_delivered;
653 uint32_t pdapi_ppid;
654 uint32_t context;
655 uint32_t last_reset_action[SCTP_MAX_RESET_PARAMS];
656 uint32_t last_sending_seq[SCTP_MAX_RESET_PARAMS];
657 uint32_t last_base_tsnsent[SCTP_MAX_RESET_PARAMS];
658 /*
659 * window state information and smallest MTU that I use to bound
660 * segmentation
661 */
662 uint32_t peers_rwnd;
663 uint32_t my_rwnd;
664 uint32_t my_last_reported_rwnd;
665 uint32_t my_rwnd_control_len;
666
667 uint32_t total_output_queue_size;
668
669 uint32_t sb_cc; /* shadow of sb_cc in one-2-one */
670 uint32_t sb_mbcnt; /* shadow of sb_mbcnt in one-2-one */
671 /* 32 bit nonce stuff */
672 uint32_t nonce_resync_tsn;
673 uint32_t nonce_wait_tsn;
674 uint32_t default_flowlabel;
675 uint32_t pr_sctp_cnt;
676 int ctrl_queue_cnt; /* could be removed REM */
677 /*
678 * All outbound datagrams queue into this list from the individual
679 * stream queue. Here they get assigned a TSN and then await
680 * sending. The stream seq comes when it is first put in the
681 * individual str queue
682 */
683 unsigned int stream_queue_cnt;
684 unsigned int send_queue_cnt;
685 unsigned int sent_queue_cnt;
686 unsigned int sent_queue_cnt_removeable;
687 /*
688 * Number on sent queue that are marked for retran until this value
689 * is 0 we only send one packet of retran'ed data.
690 */
691 unsigned int sent_queue_retran_cnt;
692
693 unsigned int size_on_reasm_queue;
694 unsigned int cnt_on_reasm_queue;
695 /* amount of data (bytes) currently in flight (on all destinations) */
696 unsigned int total_flight;
697 /* Total book size in flight */
698 unsigned int total_flight_count; /* count of chunks used with
699 * book total */
700 /* count of destinaton nets and list of destination nets */
701 unsigned int numnets;
702
703 /* Total error count on this association */
704 unsigned int overall_error_count;
705
706 unsigned int cnt_msg_on_sb;
707
708 /* All stream count of chunks for delivery */
709 unsigned int size_on_all_streams;
710 unsigned int cnt_on_all_streams;
711
712 /* Heart Beat delay in ticks */
713 unsigned int heart_beat_delay;
714
715 /* autoclose */
716 unsigned int sctp_autoclose_ticks;
717
718 /* how many preopen streams we have */
719 unsigned int pre_open_streams;
720
721 /* How many streams I support coming into me */
722 unsigned int max_inbound_streams;
723
724 /* the cookie life I award for any cookie, in seconds */
725 unsigned int cookie_life;
726 /* time to delay acks for */
727 unsigned int delayed_ack;
728
729 unsigned int numduptsns;
730 int dup_tsns[SCTP_MAX_DUP_TSNS];
731 unsigned int initial_init_rto_max; /* initial RTO for INIT's */
732 unsigned int initial_rto; /* initial send RTO */
733 unsigned int minrto; /* per assoc RTO-MIN */
734 unsigned int maxrto; /* per assoc RTO-MAX */
735
736 /* authentication fields */
737 sctp_auth_chklist_t *local_auth_chunks;
738 sctp_auth_chklist_t *peer_auth_chunks;
739 sctp_hmaclist_t *local_hmacs; /* local HMACs supported */
740 sctp_hmaclist_t *peer_hmacs; /* peer HMACs supported */
741 struct sctp_keyhead shared_keys; /* assoc's shared keys */
742 sctp_authinfo_t authinfo; /* randoms, cached keys */
743 uint16_t peer_hmac_id; /* peer HMAC id to send */
744 /*
745 * refcnt to block freeing when a sender or receiver is off coping
746 * user data in.
747 */
743 /*
744 * refcnt to block freeing when a sender or receiver is off coping
745 * user data in.
746 */
748 uint16_t refcnt;
747 uint32_t refcnt;
748 uint32_t chunks_on_out_queue; /* total chunks floating around,
749 * locked by send socket buffer */
749
750
751 uint16_t peer_hmac_id; /* peer HMAC id to send */
752
750 /*
751 * Being that we have no bag to collect stale cookies, and that we
752 * really would not want to anyway.. we will count them in this
753 * counter. We of course feed them to the pigeons right away (I have
754 * always thought of pigeons as flying rats).
755 */
756 uint16_t stale_cookie_count;
757
758 /*
759 * For the partial delivery API, if up, invoked this is what last
760 * TSN I delivered
761 */
762 uint16_t str_of_pdapi;
763 uint16_t ssn_of_pdapi;
764
765 /* counts of actual built streams. Allocation may be more however */
766 /* could re-arrange to optimize space here. */
767 uint16_t streamincnt;
768 uint16_t streamoutcnt;
769
770 /* my maximum number of retrans of INIT and SEND */
771 /* copied from SCTP but should be individually setable */
772 uint16_t max_init_times;
773 uint16_t max_send_times;
774
775 uint16_t def_net_failure;
776
777 /*
778 * lock flag: 0 is ok to send, 1+ (duals as a retran count) is
779 * awaiting ACK
780 */
781 uint16_t asconf_sent; /* possibly removable REM */
782 uint16_t mapping_array_size;
783
784 uint16_t last_strm_seq_delivered;
785 uint16_t last_strm_no_delivered;
786
753 /*
754 * Being that we have no bag to collect stale cookies, and that we
755 * really would not want to anyway.. we will count them in this
756 * counter. We of course feed them to the pigeons right away (I have
757 * always thought of pigeons as flying rats).
758 */
759 uint16_t stale_cookie_count;
760
761 /*
762 * For the partial delivery API, if up, invoked this is what last
763 * TSN I delivered
764 */
765 uint16_t str_of_pdapi;
766 uint16_t ssn_of_pdapi;
767
768 /* counts of actual built streams. Allocation may be more however */
769 /* could re-arrange to optimize space here. */
770 uint16_t streamincnt;
771 uint16_t streamoutcnt;
772
773 /* my maximum number of retrans of INIT and SEND */
774 /* copied from SCTP but should be individually setable */
775 uint16_t max_init_times;
776 uint16_t max_send_times;
777
778 uint16_t def_net_failure;
779
780 /*
781 * lock flag: 0 is ok to send, 1+ (duals as a retran count) is
782 * awaiting ACK
783 */
784 uint16_t asconf_sent; /* possibly removable REM */
785 uint16_t mapping_array_size;
786
787 uint16_t last_strm_seq_delivered;
788 uint16_t last_strm_no_delivered;
789
787 uint16_t chunks_on_out_queue; /* total chunks floating around,
788 * locked by send socket buffer */
789 uint16_t last_revoke_count;
790 int16_t num_send_timers_up;
791
792 uint16_t stream_locked_on;
793 uint16_t ecn_echo_cnt_onq;
794
795 uint16_t free_chunk_cnt;
796 uint16_t free_strmoq_cnt;
797
798 uint8_t stream_locked;
799 uint8_t authenticated; /* packet authenticated ok */
800 /*
801 * This flag indicates that we need to send the first SACK. If in
802 * place it says we have NOT yet sent a SACK and need to.
803 */
804 uint8_t first_ack_sent;
805
806 /* max burst after fast retransmit completes */
807 uint8_t max_burst;
808
809 uint8_t sat_network; /* RTT is in range of sat net or greater */
810 uint8_t sat_network_lockout; /* lockout code */
811 uint8_t burst_limit_applied; /* Burst limit in effect at last send? */
812 /* flag goes on when we are doing a partial delivery api */
813 uint8_t hb_random_values[4];
814 uint8_t fragmented_delivery_inprogress;
815 uint8_t fragment_flags;
816 uint8_t last_flags_delivered;
817 uint8_t hb_ect_randombit;
818 uint8_t hb_random_idx;
819 uint8_t hb_is_disabled; /* is the hb disabled? */
820 uint8_t default_tos;
821
822 /* ECN Nonce stuff */
823 uint8_t receiver_nonce_sum; /* nonce I sum and put in my sack */
824 uint8_t ecn_nonce_allowed; /* Tells us if ECN nonce is on */
825 uint8_t nonce_sum_check;/* On off switch used during re-sync */
826 uint8_t nonce_wait_for_ecne; /* flag when we expect a ECN */
827 uint8_t peer_supports_ecn_nonce;
828
829 /*
830 * This value, plus all other ack'd but above cum-ack is added
831 * together to cross check against the bit that we have yet to
832 * define (probably in the SACK). When the cum-ack is updated, this
833 * sum is updated as well.
834 */
835 uint8_t nonce_sum_expect_base;
836 /* Flag to tell if ECN is allowed */
837 uint8_t ecn_allowed;
838
839 /* flag to indicate if peer can do asconf */
840 uint8_t peer_supports_asconf;
841 /* pr-sctp support flag */
842 uint8_t peer_supports_prsctp;
843 /* peer authentication support flag */
844 uint8_t peer_supports_auth;
845 /* stream resets are supported by the peer */
846 uint8_t peer_supports_strreset;
847
848 /*
849 * packet drop's are supported by the peer, we don't really care
850 * about this but we bookkeep it anyway.
851 */
852 uint8_t peer_supports_pktdrop;
853
854 /* Do we allow V6/V4? */
855 uint8_t ipv4_addr_legal;
856 uint8_t ipv6_addr_legal;
857 /* Address scoping flags */
858 /* scope value for IPv4 */
859 uint8_t ipv4_local_scope;
860 /* scope values for IPv6 */
861 uint8_t local_scope;
862 uint8_t site_scope;
863 /* loopback scope */
864 uint8_t loopback_scope;
865 /* flags to handle send alternate net tracking */
866 uint8_t used_alt_onsack;
867 uint8_t used_alt_asconfack;
868 uint8_t fast_retran_loss_recovery;
869 uint8_t sat_t3_loss_recovery;
870 uint8_t dropped_special_cnt;
871 uint8_t seen_a_sack_this_pkt;
872 uint8_t stream_reset_outstanding;
873 uint8_t stream_reset_out_is_outstanding;
874 uint8_t delayed_connection;
875 uint8_t ifp_had_enobuf;
876 uint8_t saw_sack_with_frags;
877 uint8_t in_restart_hash;
878 uint8_t assoc_up_sent;
879 /* CMT variables */
880 uint8_t cmt_dac_pkts_rcvd;
881 uint8_t sctp_cmt_on_off;
882 uint8_t iam_blocking;
883 /*
884 * The mapping array is used to track out of order sequences above
885 * last_acked_seq. 0 indicates packet missing 1 indicates packet
886 * rec'd. We slide it up every time we raise last_acked_seq and 0
887 * trailing locactions out. If I get a TSN above the array
888 * mappingArraySz, I discard the datagram and let retransmit happen.
889 */
890};
891
892#endif
790 uint16_t last_revoke_count;
791 int16_t num_send_timers_up;
792
793 uint16_t stream_locked_on;
794 uint16_t ecn_echo_cnt_onq;
795
796 uint16_t free_chunk_cnt;
797 uint16_t free_strmoq_cnt;
798
799 uint8_t stream_locked;
800 uint8_t authenticated; /* packet authenticated ok */
801 /*
802 * This flag indicates that we need to send the first SACK. If in
803 * place it says we have NOT yet sent a SACK and need to.
804 */
805 uint8_t first_ack_sent;
806
807 /* max burst after fast retransmit completes */
808 uint8_t max_burst;
809
810 uint8_t sat_network; /* RTT is in range of sat net or greater */
811 uint8_t sat_network_lockout; /* lockout code */
812 uint8_t burst_limit_applied; /* Burst limit in effect at last send? */
813 /* flag goes on when we are doing a partial delivery api */
814 uint8_t hb_random_values[4];
815 uint8_t fragmented_delivery_inprogress;
816 uint8_t fragment_flags;
817 uint8_t last_flags_delivered;
818 uint8_t hb_ect_randombit;
819 uint8_t hb_random_idx;
820 uint8_t hb_is_disabled; /* is the hb disabled? */
821 uint8_t default_tos;
822
823 /* ECN Nonce stuff */
824 uint8_t receiver_nonce_sum; /* nonce I sum and put in my sack */
825 uint8_t ecn_nonce_allowed; /* Tells us if ECN nonce is on */
826 uint8_t nonce_sum_check;/* On off switch used during re-sync */
827 uint8_t nonce_wait_for_ecne; /* flag when we expect a ECN */
828 uint8_t peer_supports_ecn_nonce;
829
830 /*
831 * This value, plus all other ack'd but above cum-ack is added
832 * together to cross check against the bit that we have yet to
833 * define (probably in the SACK). When the cum-ack is updated, this
834 * sum is updated as well.
835 */
836 uint8_t nonce_sum_expect_base;
837 /* Flag to tell if ECN is allowed */
838 uint8_t ecn_allowed;
839
840 /* flag to indicate if peer can do asconf */
841 uint8_t peer_supports_asconf;
842 /* pr-sctp support flag */
843 uint8_t peer_supports_prsctp;
844 /* peer authentication support flag */
845 uint8_t peer_supports_auth;
846 /* stream resets are supported by the peer */
847 uint8_t peer_supports_strreset;
848
849 /*
850 * packet drop's are supported by the peer, we don't really care
851 * about this but we bookkeep it anyway.
852 */
853 uint8_t peer_supports_pktdrop;
854
855 /* Do we allow V6/V4? */
856 uint8_t ipv4_addr_legal;
857 uint8_t ipv6_addr_legal;
858 /* Address scoping flags */
859 /* scope value for IPv4 */
860 uint8_t ipv4_local_scope;
861 /* scope values for IPv6 */
862 uint8_t local_scope;
863 uint8_t site_scope;
864 /* loopback scope */
865 uint8_t loopback_scope;
866 /* flags to handle send alternate net tracking */
867 uint8_t used_alt_onsack;
868 uint8_t used_alt_asconfack;
869 uint8_t fast_retran_loss_recovery;
870 uint8_t sat_t3_loss_recovery;
871 uint8_t dropped_special_cnt;
872 uint8_t seen_a_sack_this_pkt;
873 uint8_t stream_reset_outstanding;
874 uint8_t stream_reset_out_is_outstanding;
875 uint8_t delayed_connection;
876 uint8_t ifp_had_enobuf;
877 uint8_t saw_sack_with_frags;
878 uint8_t in_restart_hash;
879 uint8_t assoc_up_sent;
880 /* CMT variables */
881 uint8_t cmt_dac_pkts_rcvd;
882 uint8_t sctp_cmt_on_off;
883 uint8_t iam_blocking;
884 /*
885 * The mapping array is used to track out of order sequences above
886 * last_acked_seq. 0 indicates packet missing 1 indicates packet
887 * rec'd. We slide it up every time we raise last_acked_seq and 0
888 * trailing locactions out. If I get a TSN above the array
889 * mappingArraySz, I discard the datagram and let retransmit happen.
890 */
891};
892
893#endif