Deleted Added
full compact
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)tcp_output.c 8.4 (Berkeley) 5/24/95
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/netinet/tcp_output.c 258622 2013-11-26 08:46:27Z avg $");
33__FBSDID("$FreeBSD: head/sys/netinet/tcp_output.c 262763 2014-03-05 01:17:47Z glebius $");
34
35#include "opt_inet.h"
36#include "opt_inet6.h"
37#include "opt_ipsec.h"
38#include "opt_tcpdebug.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/domain.h>
43#include <sys/hhook.h>
44#include <sys/kernel.h>
45#include <sys/lock.h>
46#include <sys/mbuf.h>
47#include <sys/mutex.h>
48#include <sys/protosw.h>
49#include <sys/sdt.h>
50#include <sys/socket.h>
51#include <sys/socketvar.h>
52#include <sys/sysctl.h>
53
54#include <net/if.h>
55#include <net/route.h>
56#include <net/vnet.h>
57
58#include <netinet/cc.h>
59#include <netinet/in.h>
60#include <netinet/in_kdtrace.h>
61#include <netinet/in_systm.h>
62#include <netinet/ip.h>
63#include <netinet/in_pcb.h>
64#include <netinet/ip_var.h>
65#include <netinet/ip_options.h>
66#ifdef INET6
67#include <netinet6/in6_pcb.h>
68#include <netinet/ip6.h>
69#include <netinet6/ip6_var.h>
70#endif
71#define TCPOUTFLAGS
72#include <netinet/tcp_fsm.h>
73#include <netinet/tcp_seq.h>
74#include <netinet/tcp_timer.h>
75#include <netinet/tcp_var.h>
76#include <netinet/tcpip.h>
77#ifdef TCPDEBUG
78#include <netinet/tcp_debug.h>
79#endif
80#ifdef TCP_OFFLOAD
81#include <netinet/tcp_offload.h>
82#endif
83
84#ifdef IPSEC
85#include <netipsec/ipsec.h>
86#endif /*IPSEC*/
87
88#include <machine/in_cksum.h>
89
90#include <security/mac/mac_framework.h>
91
92VNET_DEFINE(int, path_mtu_discovery) = 1;
93SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, path_mtu_discovery, CTLFLAG_RW,
94 &VNET_NAME(path_mtu_discovery), 1,
95 "Enable Path MTU Discovery");
96
97VNET_DEFINE(int, tcp_do_tso) = 1;
98#define V_tcp_do_tso VNET(tcp_do_tso)
99SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, tso, CTLFLAG_RW,
100 &VNET_NAME(tcp_do_tso), 0,
101 "Enable TCP Segmentation Offload");
102
103VNET_DEFINE(int, tcp_sendspace) = 1024*32;
104#define V_tcp_sendspace VNET(tcp_sendspace)
105SYSCTL_VNET_INT(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace, CTLFLAG_RW,
106 &VNET_NAME(tcp_sendspace), 0, "Initial send socket buffer size");
107
108VNET_DEFINE(int, tcp_do_autosndbuf) = 1;
109#define V_tcp_do_autosndbuf VNET(tcp_do_autosndbuf)
110SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto, CTLFLAG_RW,
111 &VNET_NAME(tcp_do_autosndbuf), 0,
112 "Enable automatic send buffer sizing");
113
114VNET_DEFINE(int, tcp_autosndbuf_inc) = 8*1024;
115#define V_tcp_autosndbuf_inc VNET(tcp_autosndbuf_inc)
116SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, sendbuf_inc, CTLFLAG_RW,
117 &VNET_NAME(tcp_autosndbuf_inc), 0,
118 "Incrementor step size of automatic send buffer");
119
120VNET_DEFINE(int, tcp_autosndbuf_max) = 2*1024*1024;
121#define V_tcp_autosndbuf_max VNET(tcp_autosndbuf_max)
122SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_RW,
123 &VNET_NAME(tcp_autosndbuf_max), 0,
124 "Max size of automatic send buffer");
125
126static void inline hhook_run_tcp_est_out(struct tcpcb *tp,
127 struct tcphdr *th, struct tcpopt *to,
128 long len, int tso);
129static void inline cc_after_idle(struct tcpcb *tp);
130
131/*
132 * Wrapper for the TCP established output helper hook.
133 */
134static void inline
135hhook_run_tcp_est_out(struct tcpcb *tp, struct tcphdr *th,
136 struct tcpopt *to, long len, int tso)
137{
138 struct tcp_hhook_data hhook_data;
139
140 if (V_tcp_hhh[HHOOK_TCP_EST_OUT]->hhh_nhooks > 0) {
141 hhook_data.tp = tp;
142 hhook_data.th = th;
143 hhook_data.to = to;
144 hhook_data.len = len;
145 hhook_data.tso = tso;
146
147 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_OUT], &hhook_data,
148 tp->osd);
149 }
150}
151
152/*
153 * CC wrapper hook functions
154 */
155static void inline
156cc_after_idle(struct tcpcb *tp)
157{
158 INP_WLOCK_ASSERT(tp->t_inpcb);
159
160 if (CC_ALGO(tp)->after_idle != NULL)
161 CC_ALGO(tp)->after_idle(tp->ccv);
162}
163
164/*
165 * Tcp output routine: figure out what should be sent and send it.
166 */
167int
168tcp_output(struct tcpcb *tp)
169{
170 struct socket *so = tp->t_inpcb->inp_socket;
171 long len, recwin, sendwin;
172 int off, flags, error = 0; /* Keep compiler happy */
173 struct mbuf *m;
174 struct ip *ip = NULL;
175 struct ipovly *ipov = NULL;
176 struct tcphdr *th;
177 u_char opt[TCP_MAXOLEN];
178 unsigned ipoptlen, optlen, hdrlen;
179#ifdef IPSEC
180 unsigned ipsec_optlen = 0;
181#endif
182 int idle, sendalot;
183 int sack_rxmit, sack_bytes_rxmt;
184 struct sackhole *p;
185 int tso, mtu;
186 struct tcpopt to;
187#if 0
188 int maxburst = TCP_MAXBURST;
189#endif
190#ifdef INET6
191 struct ip6_hdr *ip6 = NULL;
192 int isipv6;
193
194 isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0;
195#endif
196
197 INP_WLOCK_ASSERT(tp->t_inpcb);
198
199#ifdef TCP_OFFLOAD
200 if (tp->t_flags & TF_TOE)
201 return (tcp_offload_output(tp));
202#endif
203
204 /*
205 * Determine length of data that should be transmitted,
206 * and flags that will be used.
207 * If there is some data or critical controls (SYN, RST)
208 * to send, then transmit; otherwise, investigate further.
209 */
210 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
211 if (idle && ticks - tp->t_rcvtime >= tp->t_rxtcur)
212 cc_after_idle(tp);
213 tp->t_flags &= ~TF_LASTIDLE;
214 if (idle) {
215 if (tp->t_flags & TF_MORETOCOME) {
216 tp->t_flags |= TF_LASTIDLE;
217 idle = 0;
218 }
219 }
220again:
221 /*
222 * If we've recently taken a timeout, snd_max will be greater than
223 * snd_nxt. There may be SACK information that allows us to avoid
224 * resending already delivered data. Adjust snd_nxt accordingly.
225 */
226 if ((tp->t_flags & TF_SACK_PERMIT) &&
227 SEQ_LT(tp->snd_nxt, tp->snd_max))
228 tcp_sack_adjust(tp);
229 sendalot = 0;
230 tso = 0;
231 mtu = 0;
232 off = tp->snd_nxt - tp->snd_una;
233 sendwin = min(tp->snd_wnd, tp->snd_cwnd);
234
235 flags = tcp_outflags[tp->t_state];
236 /*
237 * Send any SACK-generated retransmissions. If we're explicitly trying
238 * to send out new data (when sendalot is 1), bypass this function.
239 * If we retransmit in fast recovery mode, decrement snd_cwnd, since
240 * we're replacing a (future) new transmission with a retransmission
241 * now, and we previously incremented snd_cwnd in tcp_input().
242 */
243 /*
244 * Still in sack recovery , reset rxmit flag to zero.
245 */
246 sack_rxmit = 0;
247 sack_bytes_rxmt = 0;
248 len = 0;
249 p = NULL;
250 if ((tp->t_flags & TF_SACK_PERMIT) && IN_FASTRECOVERY(tp->t_flags) &&
251 (p = tcp_sack_output(tp, &sack_bytes_rxmt))) {
252 long cwin;
253
254 cwin = min(tp->snd_wnd, tp->snd_cwnd) - sack_bytes_rxmt;
255 if (cwin < 0)
256 cwin = 0;
257 /* Do not retransmit SACK segments beyond snd_recover */
258 if (SEQ_GT(p->end, tp->snd_recover)) {
259 /*
260 * (At least) part of sack hole extends beyond
261 * snd_recover. Check to see if we can rexmit data
262 * for this hole.
263 */
264 if (SEQ_GEQ(p->rxmit, tp->snd_recover)) {
265 /*
266 * Can't rexmit any more data for this hole.
267 * That data will be rexmitted in the next
268 * sack recovery episode, when snd_recover
269 * moves past p->rxmit.
270 */
271 p = NULL;
272 goto after_sack_rexmit;
273 } else
274 /* Can rexmit part of the current hole */
275 len = ((long)ulmin(cwin,
276 tp->snd_recover - p->rxmit));
277 } else
278 len = ((long)ulmin(cwin, p->end - p->rxmit));
279 off = p->rxmit - tp->snd_una;
280 KASSERT(off >= 0,("%s: sack block to the left of una : %d",
281 __func__, off));
282 if (len > 0) {
283 sack_rxmit = 1;
284 sendalot = 1;
285 TCPSTAT_INC(tcps_sack_rexmits);
286 TCPSTAT_ADD(tcps_sack_rexmit_bytes,
287 min(len, tp->t_maxseg));
288 }
289 }
290after_sack_rexmit:
291 /*
292 * Get standard flags, and add SYN or FIN if requested by 'hidden'
293 * state flags.
294 */
295 if (tp->t_flags & TF_NEEDFIN)
296 flags |= TH_FIN;
297 if (tp->t_flags & TF_NEEDSYN)
298 flags |= TH_SYN;
299
300 SOCKBUF_LOCK(&so->so_snd);
301 /*
302 * If in persist timeout with window of 0, send 1 byte.
303 * Otherwise, if window is small but nonzero
304 * and timer expired, we will send what we can
305 * and go to transmit state.
306 */
307 if (tp->t_flags & TF_FORCEDATA) {
308 if (sendwin == 0) {
309 /*
310 * If we still have some data to send, then
311 * clear the FIN bit. Usually this would
312 * happen below when it realizes that we
313 * aren't sending all the data. However,
314 * if we have exactly 1 byte of unsent data,
315 * then it won't clear the FIN bit below,
316 * and if we are in persist state, we wind
317 * up sending the packet without recording
318 * that we sent the FIN bit.
319 *
320 * We can't just blindly clear the FIN bit,
321 * because if we don't have any more data
322 * to send then the probe will be the FIN
323 * itself.
324 */
325 if (off < so->so_snd.sb_cc)
326 flags &= ~TH_FIN;
327 sendwin = 1;
328 } else {
329 tcp_timer_activate(tp, TT_PERSIST, 0);
330 tp->t_rxtshift = 0;
331 }
332 }
333
334 /*
335 * If snd_nxt == snd_max and we have transmitted a FIN, the
336 * offset will be > 0 even if so_snd.sb_cc is 0, resulting in
337 * a negative length. This can also occur when TCP opens up
338 * its congestion window while receiving additional duplicate
339 * acks after fast-retransmit because TCP will reset snd_nxt
340 * to snd_max after the fast-retransmit.
341 *
342 * In the normal retransmit-FIN-only case, however, snd_nxt will
343 * be set to snd_una, the offset will be 0, and the length may
344 * wind up 0.
345 *
346 * If sack_rxmit is true we are retransmitting from the scoreboard
347 * in which case len is already set.
348 */
349 if (sack_rxmit == 0) {
350 if (sack_bytes_rxmt == 0)
351 len = ((long)ulmin(so->so_snd.sb_cc, sendwin) - off);
352 else {
353 long cwin;
354
355 /*
356 * We are inside of a SACK recovery episode and are
357 * sending new data, having retransmitted all the
358 * data possible in the scoreboard.
359 */
360 len = ((long)ulmin(so->so_snd.sb_cc, tp->snd_wnd)
361 - off);
362 /*
363 * Don't remove this (len > 0) check !
364 * We explicitly check for len > 0 here (although it
365 * isn't really necessary), to work around a gcc
366 * optimization issue - to force gcc to compute
367 * len above. Without this check, the computation
368 * of len is bungled by the optimizer.
369 */
370 if (len > 0) {
371 cwin = tp->snd_cwnd -
372 (tp->snd_nxt - tp->sack_newdata) -
373 sack_bytes_rxmt;
374 if (cwin < 0)
375 cwin = 0;
376 len = lmin(len, cwin);
377 }
378 }
379 }
380
381 /*
382 * Lop off SYN bit if it has already been sent. However, if this
383 * is SYN-SENT state and if segment contains data and if we don't
384 * know that foreign host supports TAO, suppress sending segment.
385 */
386 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) {
387 if (tp->t_state != TCPS_SYN_RECEIVED)
388 flags &= ~TH_SYN;
389 off--, len++;
390 }
391
392 /*
393 * Be careful not to send data and/or FIN on SYN segments.
394 * This measure is needed to prevent interoperability problems
395 * with not fully conformant TCP implementations.
396 */
397 if ((flags & TH_SYN) && (tp->t_flags & TF_NOOPT)) {
398 len = 0;
399 flags &= ~TH_FIN;
400 }
401
402 if (len < 0) {
403 /*
404 * If FIN has been sent but not acked,
405 * but we haven't been called to retransmit,
406 * len will be < 0. Otherwise, window shrank
407 * after we sent into it. If window shrank to 0,
408 * cancel pending retransmit, pull snd_nxt back
409 * to (closed) window, and set the persist timer
410 * if it isn't already going. If the window didn't
411 * close completely, just wait for an ACK.
412 */
413 len = 0;
414 if (sendwin == 0) {
415 tcp_timer_activate(tp, TT_REXMT, 0);
416 tp->t_rxtshift = 0;
417 tp->snd_nxt = tp->snd_una;
418 if (!tcp_timer_active(tp, TT_PERSIST))
419 tcp_setpersist(tp);
420 }
421 }
422
423 /* len will be >= 0 after this point. */
424 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
425
426 /*
427 * Automatic sizing of send socket buffer. Often the send buffer
428 * size is not optimally adjusted to the actual network conditions
429 * at hand (delay bandwidth product). Setting the buffer size too
430 * small limits throughput on links with high bandwidth and high
431 * delay (eg. trans-continental/oceanic links). Setting the
432 * buffer size too big consumes too much real kernel memory,
433 * especially with many connections on busy servers.
434 *
435 * The criteria to step up the send buffer one notch are:
436 * 1. receive window of remote host is larger than send buffer
437 * (with a fudge factor of 5/4th);
438 * 2. send buffer is filled to 7/8th with data (so we actually
439 * have data to make use of it);
440 * 3. send buffer fill has not hit maximal automatic size;
441 * 4. our send window (slow start and cogestion controlled) is
442 * larger than sent but unacknowledged data in send buffer.
443 *
444 * The remote host receive window scaling factor may limit the
445 * growing of the send buffer before it reaches its allowed
446 * maximum.
447 *
448 * It scales directly with slow start or congestion window
449 * and does at most one step per received ACK. This fast
450 * scaling has the drawback of growing the send buffer beyond
451 * what is strictly necessary to make full use of a given
452 * delay*bandwith product. However testing has shown this not
453 * to be much of an problem. At worst we are trading wasting
454 * of available bandwith (the non-use of it) for wasting some
455 * socket buffer memory.
456 *
457 * TODO: Shrink send buffer during idle periods together
458 * with congestion window. Requires another timer. Has to
459 * wait for upcoming tcp timer rewrite.
460 */
461 if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
462 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat &&
463 so->so_snd.sb_cc >= (so->so_snd.sb_hiwat / 8 * 7) &&
464 so->so_snd.sb_cc < V_tcp_autosndbuf_max &&
465 sendwin >= (so->so_snd.sb_cc - (tp->snd_nxt - tp->snd_una))) {
466 if (!sbreserve_locked(&so->so_snd,
467 min(so->so_snd.sb_hiwat + V_tcp_autosndbuf_inc,
468 V_tcp_autosndbuf_max), so, curthread))
469 so->so_snd.sb_flags &= ~SB_AUTOSIZE;
470 }
471 }
472
473 /*
474 * Decide if we can use TCP Segmentation Offloading (if supported by
475 * hardware).
476 *
477 * TSO may only be used if we are in a pure bulk sending state. The
478 * presence of TCP-MD5, SACK retransmits, SACK advertizements and
479 * IP options prevent using TSO. With TSO the TCP header is the same
480 * (except for the sequence number) for all generated packets. This
481 * makes it impossible to transmit any options which vary per generated
482 * segment or packet.
483 */
484#ifdef IPSEC
485 /*
486 * Pre-calculate here as we save another lookup into the darknesses
487 * of IPsec that way and can actually decide if TSO is ok.
488 */
489 ipsec_optlen = ipsec_hdrsiz_tcp(tp);
490#endif
491 if ((tp->t_flags & TF_TSO) && V_tcp_do_tso && len > tp->t_maxseg &&
492 ((tp->t_flags & TF_SIGNATURE) == 0) &&
493 tp->rcv_numsacks == 0 && sack_rxmit == 0 &&
494#ifdef IPSEC
495 ipsec_optlen == 0 &&
496#endif
497 tp->t_inpcb->inp_options == NULL &&
498 tp->t_inpcb->in6p_options == NULL)
499 tso = 1;
500
501 if (sack_rxmit) {
502 if (SEQ_LT(p->rxmit + len, tp->snd_una + so->so_snd.sb_cc))
503 flags &= ~TH_FIN;
504 } else {
505 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc))
506 flags &= ~TH_FIN;
507 }
508
509 recwin = sbspace(&so->so_rcv);
510
511 /*
512 * Sender silly window avoidance. We transmit under the following
513 * conditions when len is non-zero:
514 *
515 * - We have a full segment (or more with TSO)
516 * - This is the last buffer in a write()/send() and we are
517 * either idle or running NODELAY
518 * - we've timed out (e.g. persist timer)
519 * - we have more then 1/2 the maximum send window's worth of
520 * data (receiver may be limited the window size)
521 * - we need to retransmit
522 */
523 if (len) {
524 if (len >= tp->t_maxseg)
525 goto send;
526 /*
527 * NOTE! on localhost connections an 'ack' from the remote
528 * end may occur synchronously with the output and cause
529 * us to flush a buffer queued with moretocome. XXX
530 *
531 * note: the len + off check is almost certainly unnecessary.
532 */
533 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
534 (idle || (tp->t_flags & TF_NODELAY)) &&
535 len + off >= so->so_snd.sb_cc &&
536 (tp->t_flags & TF_NOPUSH) == 0) {
537 goto send;
538 }
539 if (tp->t_flags & TF_FORCEDATA) /* typ. timeout case */
540 goto send;
541 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0)
542 goto send;
543 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) /* retransmit case */
544 goto send;
545 if (sack_rxmit)
546 goto send;
547 }
548
549 /*
550 * Sending of standalone window updates.
551 *
552 * Window updates are important when we close our window due to a
553 * full socket buffer and are opening it again after the application
554 * reads data from it. Once the window has opened again and the
555 * remote end starts to send again the ACK clock takes over and
556 * provides the most current window information.
557 *
558 * We must avoid the silly window syndrome whereas every read
559 * from the receive buffer, no matter how small, causes a window
560 * update to be sent. We also should avoid sending a flurry of
561 * window updates when the socket buffer had queued a lot of data
562 * and the application is doing small reads.
563 *
564 * Prevent a flurry of pointless window updates by only sending
565 * an update when we can increase the advertized window by more
566 * than 1/4th of the socket buffer capacity. When the buffer is
567 * getting full or is very small be more aggressive and send an
568 * update whenever we can increase by two mss sized segments.
569 * In all other situations the ACK's to new incoming data will
570 * carry further window increases.
571 *
572 * Don't send an independent window update if a delayed
573 * ACK is pending (it will get piggy-backed on it) or the
574 * remote side already has done a half-close and won't send
575 * more data. Skip this if the connection is in T/TCP
576 * half-open state.
577 */
578 if (recwin > 0 && !(tp->t_flags & TF_NEEDSYN) &&
579 !(tp->t_flags & TF_DELACK) &&
580 !TCPS_HAVERCVDFIN(tp->t_state)) {
581 /*
582 * "adv" is the amount we could increase the window,
583 * taking into account that we are limited by
584 * TCP_MAXWIN << tp->rcv_scale.
585 */
586 long adv;
587 int oldwin;
588
589 adv = min(recwin, (long)TCP_MAXWIN << tp->rcv_scale);
590 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) {
591 oldwin = (tp->rcv_adv - tp->rcv_nxt);
592 adv -= oldwin;
593 } else
594 oldwin = 0;
595
596 /*
597 * If the new window size ends up being the same as the old
598 * size when it is scaled, then don't force a window update.
599 */
600 if (oldwin >> tp->rcv_scale == (adv + oldwin) >> tp->rcv_scale)
601 goto dontupdate;
602
603 if (adv >= (long)(2 * tp->t_maxseg) &&
604 (adv >= (long)(so->so_rcv.sb_hiwat / 4) ||
605 recwin <= (long)(so->so_rcv.sb_hiwat / 8) ||
606 so->so_rcv.sb_hiwat <= 8 * tp->t_maxseg))
607 goto send;
608 }
609dontupdate:
610
611 /*
612 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
613 * is also a catch-all for the retransmit timer timeout case.
614 */
615 if (tp->t_flags & TF_ACKNOW)
616 goto send;
617 if ((flags & TH_RST) ||
618 ((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0))
619 goto send;
620 if (SEQ_GT(tp->snd_up, tp->snd_una))
621 goto send;
622 /*
623 * If our state indicates that FIN should be sent
624 * and we have not yet done so, then we need to send.
625 */
626 if (flags & TH_FIN &&
627 ((tp->t_flags & TF_SENTFIN) == 0 || tp->snd_nxt == tp->snd_una))
628 goto send;
629 /*
630 * In SACK, it is possible for tcp_output to fail to send a segment
631 * after the retransmission timer has been turned off. Make sure
632 * that the retransmission timer is set.
633 */
634 if ((tp->t_flags & TF_SACK_PERMIT) &&
635 SEQ_GT(tp->snd_max, tp->snd_una) &&
636 !tcp_timer_active(tp, TT_REXMT) &&
637 !tcp_timer_active(tp, TT_PERSIST)) {
638 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
639 goto just_return;
640 }
641 /*
642 * TCP window updates are not reliable, rather a polling protocol
643 * using ``persist'' packets is used to insure receipt of window
644 * updates. The three ``states'' for the output side are:
645 * idle not doing retransmits or persists
646 * persisting to move a small or zero window
647 * (re)transmitting and thereby not persisting
648 *
649 * tcp_timer_active(tp, TT_PERSIST)
650 * is true when we are in persist state.
651 * (tp->t_flags & TF_FORCEDATA)
652 * is set when we are called to send a persist packet.
653 * tcp_timer_active(tp, TT_REXMT)
654 * is set when we are retransmitting
655 * The output side is idle when both timers are zero.
656 *
657 * If send window is too small, there is data to transmit, and no
658 * retransmit or persist is pending, then go to persist state.
659 * If nothing happens soon, send when timer expires:
660 * if window is nonzero, transmit what we can,
661 * otherwise force out a byte.
662 */
663 if (so->so_snd.sb_cc && !tcp_timer_active(tp, TT_REXMT) &&
664 !tcp_timer_active(tp, TT_PERSIST)) {
665 tp->t_rxtshift = 0;
666 tcp_setpersist(tp);
667 }
668
669 /*
670 * No reason to send a segment, just return.
671 */
672just_return:
673 SOCKBUF_UNLOCK(&so->so_snd);
674 return (0);
675
676send:
677 SOCKBUF_LOCK_ASSERT(&so->so_snd);
678 /*
679 * Before ESTABLISHED, force sending of initial options
680 * unless TCP set not to do any options.
681 * NOTE: we assume that the IP/TCP header plus TCP options
682 * always fit in a single mbuf, leaving room for a maximum
683 * link header, i.e.
684 * max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MCLBYTES
685 */
686 optlen = 0;
687#ifdef INET6
688 if (isipv6)
689 hdrlen = sizeof (struct ip6_hdr) + sizeof (struct tcphdr);
690 else
691#endif
692 hdrlen = sizeof (struct tcpiphdr);
693
694 /*
695 * Compute options for segment.
696 * We only have to care about SYN and established connection
697 * segments. Options for SYN-ACK segments are handled in TCP
698 * syncache.
699 */
700 if ((tp->t_flags & TF_NOOPT) == 0) {
701 to.to_flags = 0;
702 /* Maximum segment size. */
703 if (flags & TH_SYN) {
704 tp->snd_nxt = tp->iss;
705 to.to_mss = tcp_mssopt(&tp->t_inpcb->inp_inc);
706 to.to_flags |= TOF_MSS;
707 }
708 /* Window scaling. */
709 if ((flags & TH_SYN) && (tp->t_flags & TF_REQ_SCALE)) {
710 to.to_wscale = tp->request_r_scale;
711 to.to_flags |= TOF_SCALE;
712 }
713 /* Timestamps. */
714 if ((tp->t_flags & TF_RCVD_TSTMP) ||
715 ((flags & TH_SYN) && (tp->t_flags & TF_REQ_TSTMP))) {
716 to.to_tsval = tcp_ts_getticks() + tp->ts_offset;
717 to.to_tsecr = tp->ts_recent;
718 to.to_flags |= TOF_TS;
719 /* Set receive buffer autosizing timestamp. */
720 if (tp->rfbuf_ts == 0 &&
721 (so->so_rcv.sb_flags & SB_AUTOSIZE))
722 tp->rfbuf_ts = tcp_ts_getticks();
723 }
724 /* Selective ACK's. */
725 if (tp->t_flags & TF_SACK_PERMIT) {
726 if (flags & TH_SYN)
727 to.to_flags |= TOF_SACKPERM;
728 else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
729 (tp->t_flags & TF_SACK_PERMIT) &&
730 tp->rcv_numsacks > 0) {
731 to.to_flags |= TOF_SACK;
732 to.to_nsacks = tp->rcv_numsacks;
733 to.to_sacks = (u_char *)tp->sackblks;
734 }
735 }
736#ifdef TCP_SIGNATURE
737 /* TCP-MD5 (RFC2385). */
738 if (tp->t_flags & TF_SIGNATURE)
739 to.to_flags |= TOF_SIGNATURE;
740#endif /* TCP_SIGNATURE */
741
742 /* Processing the options. */
743 hdrlen += optlen = tcp_addoptions(&to, opt);
744 }
745
746#ifdef INET6
747 if (isipv6)
748 ipoptlen = ip6_optlen(tp->t_inpcb);
749 else
750#endif
751 if (tp->t_inpcb->inp_options)
752 ipoptlen = tp->t_inpcb->inp_options->m_len -
753 offsetof(struct ipoption, ipopt_list);
754 else
755 ipoptlen = 0;
756#ifdef IPSEC
757 ipoptlen += ipsec_optlen;
758#endif
759
760 /*
761 * Adjust data length if insertion of options will
762 * bump the packet length beyond the t_maxopd length.
763 * Clear the FIN bit because we cut off the tail of
764 * the segment.
765 */
766 if (len + optlen + ipoptlen > tp->t_maxopd) {
767 flags &= ~TH_FIN;
768
769 if (tso) {
770 KASSERT(ipoptlen == 0,
771 ("%s: TSO can't do IP options", __func__));
772
773 /*
774 * Limit a burst to t_tsomax minus IP,
775 * TCP and options length to keep ip->ip_len
776 * from overflowing or exceeding the maximum
777 * length allowed by the network interface.
778 */
779 if (len > tp->t_tsomax - hdrlen) {
780 len = tp->t_tsomax - hdrlen;
781 sendalot = 1;
782 }
783
784 /*
785 * Prevent the last segment from being
786 * fractional unless the send sockbuf can
787 * be emptied.
788 */
789 if (sendalot && off + len < so->so_snd.sb_cc) {
790 len -= len % (tp->t_maxopd - optlen);
791 sendalot = 1;
792 }
793
794 /*
795 * Send the FIN in a separate segment
796 * after the bulk sending is done.
797 * We don't trust the TSO implementations
798 * to clear the FIN flag on all but the
799 * last segment.
800 */
801 if (tp->t_flags & TF_NEEDFIN)
802 sendalot = 1;
803
804 } else {
805 len = tp->t_maxopd - optlen - ipoptlen;
806 sendalot = 1;
807 }
808 } else
809 tso = 0;
810
811 KASSERT(len + hdrlen + ipoptlen <= IP_MAXPACKET,
812 ("%s: len > IP_MAXPACKET", __func__));
813
814/*#ifdef DIAGNOSTIC*/
815#ifdef INET6
816 if (max_linkhdr + hdrlen > MCLBYTES)
817#else
818 if (max_linkhdr + hdrlen > MHLEN)
819#endif
820 panic("tcphdr too big");
821/*#endif*/
822
823 /*
824 * This KASSERT is here to catch edge cases at a well defined place.
825 * Before, those had triggered (random) panic conditions further down.
826 */
827 KASSERT(len >= 0, ("[%s:%d]: len < 0", __func__, __LINE__));
828
829 /*
830 * Grab a header mbuf, attaching a copy of data to
831 * be transmitted, and initialize the header from
832 * the template for sends on this connection.
833 */
834 if (len) {
835 struct mbuf *mb;
836 u_int moff;
837
838 if ((tp->t_flags & TF_FORCEDATA) && len == 1)
839 TCPSTAT_INC(tcps_sndprobe);
840 else if (SEQ_LT(tp->snd_nxt, tp->snd_max) || sack_rxmit) {
841 tp->t_sndrexmitpack++;
842 TCPSTAT_INC(tcps_sndrexmitpack);
843 TCPSTAT_ADD(tcps_sndrexmitbyte, len);
844 } else {
845 TCPSTAT_INC(tcps_sndpack);
846 TCPSTAT_ADD(tcps_sndbyte, len);
847 }
848#ifdef INET6
849 if (MHLEN < hdrlen + max_linkhdr)
850 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
851 else
852#endif
853 m = m_gethdr(M_NOWAIT, MT_DATA);
854
855 if (m == NULL) {
856 SOCKBUF_UNLOCK(&so->so_snd);
857 error = ENOBUFS;
858 sack_rxmit = 0;
859 goto out;
860 }
861
862 m->m_data += max_linkhdr;
863 m->m_len = hdrlen;
864
865 /*
866 * Start the m_copy functions from the closest mbuf
867 * to the offset in the socket buffer chain.
868 */
869 mb = sbsndptr(&so->so_snd, off, len, &moff);
870
871 if (len <= MHLEN - hdrlen - max_linkhdr) {
872 m_copydata(mb, moff, (int)len,
873 mtod(m, caddr_t) + hdrlen);
874 m->m_len += len;
875 } else {
876 m->m_next = m_copy(mb, moff, (int)len);
877 if (m->m_next == NULL) {
878 SOCKBUF_UNLOCK(&so->so_snd);
879 (void) m_free(m);
880 error = ENOBUFS;
881 sack_rxmit = 0;
882 goto out;
883 }
884 }
885
886 /*
887 * If we're sending everything we've got, set PUSH.
888 * (This will keep happy those implementations which only
889 * give data to the user when a buffer fills or
890 * a PUSH comes in.)
891 */
892 if (off + len == so->so_snd.sb_cc)
893 flags |= TH_PUSH;
894 SOCKBUF_UNLOCK(&so->so_snd);
895 } else {
896 SOCKBUF_UNLOCK(&so->so_snd);
897 if (tp->t_flags & TF_ACKNOW)
898 TCPSTAT_INC(tcps_sndacks);
899 else if (flags & (TH_SYN|TH_FIN|TH_RST))
900 TCPSTAT_INC(tcps_sndctrl);
901 else if (SEQ_GT(tp->snd_up, tp->snd_una))
902 TCPSTAT_INC(tcps_sndurg);
903 else
904 TCPSTAT_INC(tcps_sndwinup);
905
906 m = m_gethdr(M_NOWAIT, MT_DATA);
907 if (m == NULL) {
908 error = ENOBUFS;
909 sack_rxmit = 0;
910 goto out;
911 }
912#ifdef INET6
913 if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
914 MHLEN >= hdrlen) {
915 MH_ALIGN(m, hdrlen);
916 } else
917#endif
918 m->m_data += max_linkhdr;
919 m->m_len = hdrlen;
920 }
921 SOCKBUF_UNLOCK_ASSERT(&so->so_snd);
922 m->m_pkthdr.rcvif = (struct ifnet *)0;
923#ifdef MAC
924 mac_inpcb_create_mbuf(tp->t_inpcb, m);
925#endif
926#ifdef INET6
927 if (isipv6) {
928 ip6 = mtod(m, struct ip6_hdr *);
929 th = (struct tcphdr *)(ip6 + 1);
930 tcpip_fillheaders(tp->t_inpcb, ip6, th);
931 } else
932#endif /* INET6 */
933 {
934 ip = mtod(m, struct ip *);
935 ipov = (struct ipovly *)ip;
936 th = (struct tcphdr *)(ip + 1);
937 tcpip_fillheaders(tp->t_inpcb, ip, th);
938 }
939
940 /*
941 * Fill in fields, remembering maximum advertised
942 * window for use in delaying messages about window sizes.
943 * If resending a FIN, be sure not to use a new sequence number.
944 */
945 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
946 tp->snd_nxt == tp->snd_max)
947 tp->snd_nxt--;
948 /*
949 * If we are starting a connection, send ECN setup
950 * SYN packet. If we are on a retransmit, we may
951 * resend those bits a number of times as per
952 * RFC 3168.
953 */
954 if (tp->t_state == TCPS_SYN_SENT && V_tcp_do_ecn) {
955 if (tp->t_rxtshift >= 1) {
956 if (tp->t_rxtshift <= V_tcp_ecn_maxretries)
957 flags |= TH_ECE|TH_CWR;
958 } else
959 flags |= TH_ECE|TH_CWR;
960 }
961
962 if (tp->t_state == TCPS_ESTABLISHED &&
963 (tp->t_flags & TF_ECN_PERMIT)) {
964 /*
965 * If the peer has ECN, mark data packets with
966 * ECN capable transmission (ECT).
967 * Ignore pure ack packets, retransmissions and window probes.
968 */
969 if (len > 0 && SEQ_GEQ(tp->snd_nxt, tp->snd_max) &&
970 !((tp->t_flags & TF_FORCEDATA) && len == 1)) {
971#ifdef INET6
972 if (isipv6)
973 ip6->ip6_flow |= htonl(IPTOS_ECN_ECT0 << 20);
974 else
975#endif
976 ip->ip_tos |= IPTOS_ECN_ECT0;
977 TCPSTAT_INC(tcps_ecn_ect0);
978 }
979
980 /*
981 * Reply with proper ECN notifications.
982 */
983 if (tp->t_flags & TF_ECN_SND_CWR) {
984 flags |= TH_CWR;
985 tp->t_flags &= ~TF_ECN_SND_CWR;
986 }
987 if (tp->t_flags & TF_ECN_SND_ECE)
988 flags |= TH_ECE;
989 }
990
991 /*
992 * If we are doing retransmissions, then snd_nxt will
993 * not reflect the first unsent octet. For ACK only
994 * packets, we do not want the sequence number of the
995 * retransmitted packet, we want the sequence number
996 * of the next unsent octet. So, if there is no data
997 * (and no SYN or FIN), use snd_max instead of snd_nxt
998 * when filling in ti_seq. But if we are in persist
999 * state, snd_max might reflect one byte beyond the
1000 * right edge of the window, so use snd_nxt in that
1001 * case, since we know we aren't doing a retransmission.
1002 * (retransmit and persist are mutually exclusive...)
1003 */
1004 if (sack_rxmit == 0) {
1005 if (len || (flags & (TH_SYN|TH_FIN)) ||
1006 tcp_timer_active(tp, TT_PERSIST))
1007 th->th_seq = htonl(tp->snd_nxt);
1008 else
1009 th->th_seq = htonl(tp->snd_max);
1010 } else {
1011 th->th_seq = htonl(p->rxmit);
1012 p->rxmit += len;
1013 tp->sackhint.sack_bytes_rexmit += len;
1014 }
1015 th->th_ack = htonl(tp->rcv_nxt);
1016 if (optlen) {
1017 bcopy(opt, th + 1, optlen);
1018 th->th_off = (sizeof (struct tcphdr) + optlen) >> 2;
1019 }
1020 th->th_flags = flags;
1021 /*
1022 * Calculate receive window. Don't shrink window,
1023 * but avoid silly window syndrome.
1024 */
1025 if (recwin < (long)(so->so_rcv.sb_hiwat / 4) &&
1026 recwin < (long)tp->t_maxseg)
1027 recwin = 0;
1028 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
1029 recwin < (long)(tp->rcv_adv - tp->rcv_nxt))
1030 recwin = (long)(tp->rcv_adv - tp->rcv_nxt);
1031 if (recwin > (long)TCP_MAXWIN << tp->rcv_scale)
1032 recwin = (long)TCP_MAXWIN << tp->rcv_scale;
1033
1034 /*
1035 * According to RFC1323 the window field in a SYN (i.e., a <SYN>
1036 * or <SYN,ACK>) segment itself is never scaled. The <SYN,ACK>
1037 * case is handled in syncache.
1038 */
1039 if (flags & TH_SYN)
1040 th->th_win = htons((u_short)
1041 (min(sbspace(&so->so_rcv), TCP_MAXWIN)));
1042 else
1043 th->th_win = htons((u_short)(recwin >> tp->rcv_scale));
1044
1045 /*
1046 * Adjust the RXWIN0SENT flag - indicate that we have advertised
1047 * a 0 window. This may cause the remote transmitter to stall. This
1048 * flag tells soreceive() to disable delayed acknowledgements when
1049 * draining the buffer. This can occur if the receiver is attempting
1050 * to read more data than can be buffered prior to transmitting on
1051 * the connection.
1052 */
1053 if (th->th_win == 0) {
1054 tp->t_sndzerowin++;
1055 tp->t_flags |= TF_RXWIN0SENT;
1056 } else
1057 tp->t_flags &= ~TF_RXWIN0SENT;
1058 if (SEQ_GT(tp->snd_up, tp->snd_nxt)) {
1059 th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt));
1060 th->th_flags |= TH_URG;
1061 } else
1062 /*
1063 * If no urgent pointer to send, then we pull
1064 * the urgent pointer to the left edge of the send window
1065 * so that it doesn't drift into the send window on sequence
1066 * number wraparound.
1067 */
1068 tp->snd_up = tp->snd_una; /* drag it along */
1069
1070#ifdef TCP_SIGNATURE
1071 if (tp->t_flags & TF_SIGNATURE) {
1072 int sigoff = to.to_signature - opt;
1073 tcp_signature_compute(m, 0, len, optlen,
1074 (u_char *)(th + 1) + sigoff, IPSEC_DIR_OUTBOUND);
1075 }
1076#endif
1077
1078 /*
1079 * Put TCP length in extended header, and then
1080 * checksum extended header and data.
1081 */
1082 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
1083 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
1084#ifdef INET6
1085 if (isipv6) {
1086 /*
1087 * ip6_plen is not need to be filled now, and will be filled
1088 * in ip6_output.
1089 */
1090 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6;
1091 th->th_sum = in6_cksum_pseudo(ip6, sizeof(struct tcphdr) +
1092 optlen + len, IPPROTO_TCP, 0);
1093 }
1094#endif
1095#if defined(INET6) && defined(INET)
1096 else
1097#endif
1098#ifdef INET
1099 {
1100 m->m_pkthdr.csum_flags = CSUM_TCP;
1101 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1102 htons(sizeof(struct tcphdr) + IPPROTO_TCP + len + optlen));
1103
1104 /* IP version must be set here for ipv4/ipv6 checking later */
1105 KASSERT(ip->ip_v == IPVERSION,
1106 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
1107 }
1108#endif
1109
1110 /*
1111 * Enable TSO and specify the size of the segments.
1112 * The TCP pseudo header checksum is always provided.
1113 * XXX: Fixme: This is currently not the case for IPv6.
1114 */
1115 if (tso) {
1116 KASSERT(len > tp->t_maxopd - optlen,
1117 ("%s: len <= tso_segsz", __func__));
1118 m->m_pkthdr.csum_flags |= CSUM_TSO;
1119 m->m_pkthdr.tso_segsz = tp->t_maxopd - optlen;
1120 }
1121
1122#ifdef IPSEC
1123 KASSERT(len + hdrlen + ipoptlen - ipsec_optlen == m_length(m, NULL),
1124 ("%s: mbuf chain shorter than expected: %ld + %u + %u - %u != %u",
1125 __func__, len, hdrlen, ipoptlen, ipsec_optlen, m_length(m, NULL)));
1126#else
1127 KASSERT(len + hdrlen + ipoptlen == m_length(m, NULL),
1128 ("%s: mbuf chain shorter than expected: %ld + %u + %u != %u",
1129 __func__, len, hdrlen, ipoptlen, m_length(m, NULL)));
1130#endif
1131
1132 /* Run HHOOK_TCP_ESTABLISHED_OUT helper hooks. */
1133 hhook_run_tcp_est_out(tp, th, &to, len, tso);
1134
1135#ifdef TCPDEBUG
1136 /*
1137 * Trace.
1138 */
1139 if (so->so_options & SO_DEBUG) {
1140 u_short save = 0;
1141#ifdef INET6
1142 if (!isipv6)
1143#endif
1144 {
1145 save = ipov->ih_len;
1146 ipov->ih_len = htons(m->m_pkthdr.len /* - hdrlen + (th->th_off << 2) */);
1147 }
1148 tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0);
1149#ifdef INET6
1150 if (!isipv6)
1151#endif
1152 ipov->ih_len = save;
1153 }
1154#endif /* TCPDEBUG */
1155
1156 /*
1157 * Fill in IP length and desired time to live and
1158 * send to IP level. There should be a better way
1159 * to handle ttl and tos; we could keep them in
1160 * the template, but need a way to checksum without them.
1161 */
1162 /*
1163 * m->m_pkthdr.len should have been set before cksum calcuration,
1164 * because in6_cksum() need it.
1165 */
1166#ifdef INET6
1167 if (isipv6) {
1168 struct route_in6 ro;
1169
1170 bzero(&ro, sizeof(ro));
1171 /*
1172 * we separately set hoplimit for every segment, since the
1173 * user might want to change the value via setsockopt.
1174 * Also, desired default hop limit might be changed via
1175 * Neighbor Discovery.
1176 */
1177 ip6->ip6_hlim = in6_selecthlim(tp->t_inpcb, NULL);
1178
1179 /*
1180 * Set the packet size here for the benefit of DTrace probes.
1181 * ip6_output() will set it properly; it's supposed to include
1182 * the option header lengths as well.
1183 */
1184 ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6));
1185
1186 if (tp->t_state == TCPS_SYN_SENT)
1187 TCP_PROBE5(connect__request, NULL, tp, ip6, tp, th);
1188
1189 TCP_PROBE5(send, NULL, tp, ip6, tp, th);
1190
1191 /* TODO: IPv6 IP6TOS_ECT bit on */
1192 error = ip6_output(m, tp->t_inpcb->in6p_outputopts, &ro,
1193 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0),
1194 NULL, NULL, tp->t_inpcb);
1195
1196 if (error == EMSGSIZE && ro.ro_rt != NULL)
1197 mtu = ro.ro_rt->rt_rmx.rmx_mtu;
1197 mtu = ro.ro_rt->rt_mtu;
1198 RO_RTFREE(&ro);
1199 }
1200#endif /* INET6 */
1201#if defined(INET) && defined(INET6)
1202 else
1203#endif
1204#ifdef INET
1205 {
1206 struct route ro;
1207
1208 bzero(&ro, sizeof(ro));
1209 ip->ip_len = htons(m->m_pkthdr.len);
1210#ifdef INET6
1211 if (tp->t_inpcb->inp_vflag & INP_IPV6PROTO)
1212 ip->ip_ttl = in6_selecthlim(tp->t_inpcb, NULL);
1213#endif /* INET6 */
1214 /*
1215 * If we do path MTU discovery, then we set DF on every packet.
1216 * This might not be the best thing to do according to RFC3390
1217 * Section 2. However the tcp hostcache migitates the problem
1218 * so it affects only the first tcp connection with a host.
1219 *
1220 * NB: Don't set DF on small MTU/MSS to have a safe fallback.
1221 */
1222 if (V_path_mtu_discovery && tp->t_maxopd > V_tcp_minmss)
1223 ip->ip_off |= htons(IP_DF);
1224
1225 if (tp->t_state == TCPS_SYN_SENT)
1226 TCP_PROBE5(connect__request, NULL, tp, ip, tp, th);
1227
1228 TCP_PROBE5(send, NULL, tp, ip, tp, th);
1229
1230 error = ip_output(m, tp->t_inpcb->inp_options, &ro,
1231 ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 0,
1232 tp->t_inpcb);
1233
1234 if (error == EMSGSIZE && ro.ro_rt != NULL)
1235 mtu = ro.ro_rt->rt_rmx.rmx_mtu;
1235 mtu = ro.ro_rt->rt_mtu;
1236 RO_RTFREE(&ro);
1237 }
1238#endif /* INET */
1239
1240out:
1241 /*
1242 * In transmit state, time the transmission and arrange for
1243 * the retransmit. In persist state, just set snd_max.
1244 */
1245 if ((tp->t_flags & TF_FORCEDATA) == 0 ||
1246 !tcp_timer_active(tp, TT_PERSIST)) {
1247 tcp_seq startseq = tp->snd_nxt;
1248
1249 /*
1250 * Advance snd_nxt over sequence space of this segment.
1251 */
1252 if (flags & (TH_SYN|TH_FIN)) {
1253 if (flags & TH_SYN)
1254 tp->snd_nxt++;
1255 if (flags & TH_FIN) {
1256 tp->snd_nxt++;
1257 tp->t_flags |= TF_SENTFIN;
1258 }
1259 }
1260 if (sack_rxmit)
1261 goto timer;
1262 tp->snd_nxt += len;
1263 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
1264 tp->snd_max = tp->snd_nxt;
1265 /*
1266 * Time this transmission if not a retransmission and
1267 * not currently timing anything.
1268 */
1269 if (tp->t_rtttime == 0) {
1270 tp->t_rtttime = ticks;
1271 tp->t_rtseq = startseq;
1272 TCPSTAT_INC(tcps_segstimed);
1273 }
1274 }
1275
1276 /*
1277 * Set retransmit timer if not currently set,
1278 * and not doing a pure ack or a keep-alive probe.
1279 * Initial value for retransmit timer is smoothed
1280 * round-trip time + 2 * round-trip time variance.
1281 * Initialize shift counter which is used for backoff
1282 * of retransmit time.
1283 */
1284timer:
1285 if (!tcp_timer_active(tp, TT_REXMT) &&
1286 ((sack_rxmit && tp->snd_nxt != tp->snd_max) ||
1287 (tp->snd_nxt != tp->snd_una))) {
1288 if (tcp_timer_active(tp, TT_PERSIST)) {
1289 tcp_timer_activate(tp, TT_PERSIST, 0);
1290 tp->t_rxtshift = 0;
1291 }
1292 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
1293 }
1294 } else {
1295 /*
1296 * Persist case, update snd_max but since we are in
1297 * persist mode (no window) we do not update snd_nxt.
1298 */
1299 int xlen = len;
1300 if (flags & TH_SYN)
1301 ++xlen;
1302 if (flags & TH_FIN) {
1303 ++xlen;
1304 tp->t_flags |= TF_SENTFIN;
1305 }
1306 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max))
1307 tp->snd_max = tp->snd_nxt + len;
1308 }
1309
1310 if (error) {
1311
1312 /*
1313 * We know that the packet was lost, so back out the
1314 * sequence number advance, if any.
1315 *
1316 * If the error is EPERM the packet got blocked by the
1317 * local firewall. Normally we should terminate the
1318 * connection but the blocking may have been spurious
1319 * due to a firewall reconfiguration cycle. So we treat
1320 * it like a packet loss and let the retransmit timer and
1321 * timeouts do their work over time.
1322 * XXX: It is a POLA question whether calling tcp_drop right
1323 * away would be the really correct behavior instead.
1324 */
1325 if (((tp->t_flags & TF_FORCEDATA) == 0 ||
1326 !tcp_timer_active(tp, TT_PERSIST)) &&
1327 ((flags & TH_SYN) == 0) &&
1328 (error != EPERM)) {
1329 if (sack_rxmit) {
1330 p->rxmit -= len;
1331 tp->sackhint.sack_bytes_rexmit -= len;
1332 KASSERT(tp->sackhint.sack_bytes_rexmit >= 0,
1333 ("sackhint bytes rtx >= 0"));
1334 } else
1335 tp->snd_nxt -= len;
1336 }
1337 SOCKBUF_UNLOCK_ASSERT(&so->so_snd); /* Check gotos. */
1338 switch (error) {
1339 case EPERM:
1340 tp->t_softerror = error;
1341 return (error);
1342 case ENOBUFS:
1343 if (!tcp_timer_active(tp, TT_REXMT) &&
1344 !tcp_timer_active(tp, TT_PERSIST))
1345 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
1346 tp->snd_cwnd = tp->t_maxseg;
1347 return (0);
1348 case EMSGSIZE:
1349 /*
1350 * For some reason the interface we used initially
1351 * to send segments changed to another or lowered
1352 * its MTU.
1353 * If TSO was active we either got an interface
1354 * without TSO capabilits or TSO was turned off.
1355 * If we obtained mtu from ip_output() then update
1356 * it and try again.
1357 */
1358 if (tso)
1359 tp->t_flags &= ~TF_TSO;
1360 if (mtu != 0) {
1361 tcp_mss_update(tp, -1, mtu, NULL, NULL);
1362 goto again;
1363 }
1364 return (error);
1365 case EHOSTDOWN:
1366 case EHOSTUNREACH:
1367 case ENETDOWN:
1368 case ENETUNREACH:
1369 if (TCPS_HAVERCVDSYN(tp->t_state)) {
1370 tp->t_softerror = error;
1371 return (0);
1372 }
1373 /* FALLTHROUGH */
1374 default:
1375 return (error);
1376 }
1377 }
1378 TCPSTAT_INC(tcps_sndtotal);
1379
1380 /*
1381 * Data sent (as far as we can tell).
1382 * If this advertises a larger window than any other segment,
1383 * then remember the size of the advertised window.
1384 * Any pending ACK has now been sent.
1385 */
1386 if (recwin >= 0 && SEQ_GT(tp->rcv_nxt + recwin, tp->rcv_adv))
1387 tp->rcv_adv = tp->rcv_nxt + recwin;
1388 tp->last_ack_sent = tp->rcv_nxt;
1389 tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
1390 if (tcp_timer_active(tp, TT_DELACK))
1391 tcp_timer_activate(tp, TT_DELACK, 0);
1392#if 0
1393 /*
1394 * This completely breaks TCP if newreno is turned on. What happens
1395 * is that if delayed-acks are turned on on the receiver, this code
1396 * on the transmitter effectively destroys the TCP window, forcing
1397 * it to four packets (1.5Kx4 = 6K window).
1398 */
1399 if (sendalot && --maxburst)
1400 goto again;
1401#endif
1402 if (sendalot)
1403 goto again;
1404 return (0);
1405}
1406
1407void
1408tcp_setpersist(struct tcpcb *tp)
1409{
1410 int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1;
1411 int tt;
1412
1413 tp->t_flags &= ~TF_PREVVALID;
1414 if (tcp_timer_active(tp, TT_REXMT))
1415 panic("tcp_setpersist: retransmit pending");
1416 /*
1417 * Start/restart persistance timer.
1418 */
1419 TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift],
1420 TCPTV_PERSMIN, TCPTV_PERSMAX);
1421 tcp_timer_activate(tp, TT_PERSIST, tt);
1422 if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
1423 tp->t_rxtshift++;
1424}
1425
1426/*
1427 * Insert TCP options according to the supplied parameters to the place
1428 * optp in a consistent way. Can handle unaligned destinations.
1429 *
1430 * The order of the option processing is crucial for optimal packing and
1431 * alignment for the scarce option space.
1432 *
1433 * The optimal order for a SYN/SYN-ACK segment is:
1434 * MSS (4) + NOP (1) + Window scale (3) + SACK permitted (2) +
1435 * Timestamp (10) + Signature (18) = 38 bytes out of a maximum of 40.
1436 *
1437 * The SACK options should be last. SACK blocks consume 8*n+2 bytes.
1438 * So a full size SACK blocks option is 34 bytes (with 4 SACK blocks).
1439 * At minimum we need 10 bytes (to generate 1 SACK block). If both
1440 * TCP Timestamps (12 bytes) and TCP Signatures (18 bytes) are present,
1441 * we only have 10 bytes for SACK options (40 - (12 + 18)).
1442 */
1443int
1444tcp_addoptions(struct tcpopt *to, u_char *optp)
1445{
1446 u_int mask, optlen = 0;
1447
1448 for (mask = 1; mask < TOF_MAXOPT; mask <<= 1) {
1449 if ((to->to_flags & mask) != mask)
1450 continue;
1451 if (optlen == TCP_MAXOLEN)
1452 break;
1453 switch (to->to_flags & mask) {
1454 case TOF_MSS:
1455 while (optlen % 4) {
1456 optlen += TCPOLEN_NOP;
1457 *optp++ = TCPOPT_NOP;
1458 }
1459 if (TCP_MAXOLEN - optlen < TCPOLEN_MAXSEG)
1460 continue;
1461 optlen += TCPOLEN_MAXSEG;
1462 *optp++ = TCPOPT_MAXSEG;
1463 *optp++ = TCPOLEN_MAXSEG;
1464 to->to_mss = htons(to->to_mss);
1465 bcopy((u_char *)&to->to_mss, optp, sizeof(to->to_mss));
1466 optp += sizeof(to->to_mss);
1467 break;
1468 case TOF_SCALE:
1469 while (!optlen || optlen % 2 != 1) {
1470 optlen += TCPOLEN_NOP;
1471 *optp++ = TCPOPT_NOP;
1472 }
1473 if (TCP_MAXOLEN - optlen < TCPOLEN_WINDOW)
1474 continue;
1475 optlen += TCPOLEN_WINDOW;
1476 *optp++ = TCPOPT_WINDOW;
1477 *optp++ = TCPOLEN_WINDOW;
1478 *optp++ = to->to_wscale;
1479 break;
1480 case TOF_SACKPERM:
1481 while (optlen % 2) {
1482 optlen += TCPOLEN_NOP;
1483 *optp++ = TCPOPT_NOP;
1484 }
1485 if (TCP_MAXOLEN - optlen < TCPOLEN_SACK_PERMITTED)
1486 continue;
1487 optlen += TCPOLEN_SACK_PERMITTED;
1488 *optp++ = TCPOPT_SACK_PERMITTED;
1489 *optp++ = TCPOLEN_SACK_PERMITTED;
1490 break;
1491 case TOF_TS:
1492 while (!optlen || optlen % 4 != 2) {
1493 optlen += TCPOLEN_NOP;
1494 *optp++ = TCPOPT_NOP;
1495 }
1496 if (TCP_MAXOLEN - optlen < TCPOLEN_TIMESTAMP)
1497 continue;
1498 optlen += TCPOLEN_TIMESTAMP;
1499 *optp++ = TCPOPT_TIMESTAMP;
1500 *optp++ = TCPOLEN_TIMESTAMP;
1501 to->to_tsval = htonl(to->to_tsval);
1502 to->to_tsecr = htonl(to->to_tsecr);
1503 bcopy((u_char *)&to->to_tsval, optp, sizeof(to->to_tsval));
1504 optp += sizeof(to->to_tsval);
1505 bcopy((u_char *)&to->to_tsecr, optp, sizeof(to->to_tsecr));
1506 optp += sizeof(to->to_tsecr);
1507 break;
1508 case TOF_SIGNATURE:
1509 {
1510 int siglen = TCPOLEN_SIGNATURE - 2;
1511
1512 while (!optlen || optlen % 4 != 2) {
1513 optlen += TCPOLEN_NOP;
1514 *optp++ = TCPOPT_NOP;
1515 }
1516 if (TCP_MAXOLEN - optlen < TCPOLEN_SIGNATURE)
1517 continue;
1518 optlen += TCPOLEN_SIGNATURE;
1519 *optp++ = TCPOPT_SIGNATURE;
1520 *optp++ = TCPOLEN_SIGNATURE;
1521 to->to_signature = optp;
1522 while (siglen--)
1523 *optp++ = 0;
1524 break;
1525 }
1526 case TOF_SACK:
1527 {
1528 int sackblks = 0;
1529 struct sackblk *sack = (struct sackblk *)to->to_sacks;
1530 tcp_seq sack_seq;
1531
1532 while (!optlen || optlen % 4 != 2) {
1533 optlen += TCPOLEN_NOP;
1534 *optp++ = TCPOPT_NOP;
1535 }
1536 if (TCP_MAXOLEN - optlen < TCPOLEN_SACKHDR + TCPOLEN_SACK)
1537 continue;
1538 optlen += TCPOLEN_SACKHDR;
1539 *optp++ = TCPOPT_SACK;
1540 sackblks = min(to->to_nsacks,
1541 (TCP_MAXOLEN - optlen) / TCPOLEN_SACK);
1542 *optp++ = TCPOLEN_SACKHDR + sackblks * TCPOLEN_SACK;
1543 while (sackblks--) {
1544 sack_seq = htonl(sack->start);
1545 bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq));
1546 optp += sizeof(sack_seq);
1547 sack_seq = htonl(sack->end);
1548 bcopy((u_char *)&sack_seq, optp, sizeof(sack_seq));
1549 optp += sizeof(sack_seq);
1550 optlen += TCPOLEN_SACK;
1551 sack++;
1552 }
1553 TCPSTAT_INC(tcps_sack_send_blocks);
1554 break;
1555 }
1556 default:
1557 panic("%s: unknown TCP option type", __func__);
1558 break;
1559 }
1560 }
1561
1562 /* Terminate and pad TCP options to a 4 byte boundary. */
1563 if (optlen % 4) {
1564 optlen += TCPOLEN_EOL;
1565 *optp++ = TCPOPT_EOL;
1566 }
1567 /*
1568 * According to RFC 793 (STD0007):
1569 * "The content of the header beyond the End-of-Option option
1570 * must be header padding (i.e., zero)."
1571 * and later: "The padding is composed of zeros."
1572 */
1573 while (optlen % 4) {
1574 optlen += TCPOLEN_PAD;
1575 *optp++ = TCPOPT_PAD;
1576 }
1577
1578 KASSERT(optlen <= TCP_MAXOLEN, ("%s: TCP options too long", __func__));
1579 return (optlen);
1580}