1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1988, 1993
5 *	The Regents of the University of California.
6 * Copyright (c) 2006-2007 Robert N. M. Watson
7 * Copyright (c) 2010-2011 Juniper Networks, Inc.
8 * All rights reserved.
9 *
10 * Portions of this software were developed by Robert N. M. Watson under
11 * contract to Juniper Networks, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	From: @(#)tcp_usrreq.c	8.2 (Berkeley) 1/3/94
38 */
39
40#include <sys/cdefs.h>
41__FBSDID("$FreeBSD$");
42
43#include "opt_ddb.h"
44#include "opt_inet.h"
45#include "opt_inet6.h"
46#include "opt_ipsec.h"
47#include "opt_kern_tls.h"
48#include "opt_tcpdebug.h"
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/arb.h>
53#include <sys/limits.h>
54#include <sys/malloc.h>
55#include <sys/refcount.h>
56#include <sys/kernel.h>
57#include <sys/ktls.h>
58#include <sys/qmath.h>
59#include <sys/sysctl.h>
60#include <sys/mbuf.h>
61#ifdef INET6
62#include <sys/domain.h>
63#endif /* INET6 */
64#include <sys/socket.h>
65#include <sys/socketvar.h>
66#include <sys/protosw.h>
67#include <sys/proc.h>
68#include <sys/jail.h>
69#include <sys/syslog.h>
70#include <sys/stats.h>
71
72#ifdef DDB
73#include <ddb/ddb.h>
74#endif
75
76#include <net/if.h>
77#include <net/if_var.h>
78#include <net/route.h>
79#include <net/vnet.h>
80
81#include <netinet/in.h>
82#include <netinet/in_kdtrace.h>
83#include <netinet/in_pcb.h>
84#include <netinet/in_systm.h>
85#include <netinet/in_var.h>
86#include <netinet/ip_var.h>
87#ifdef INET6
88#include <netinet/ip6.h>
89#include <netinet6/in6_pcb.h>
90#include <netinet6/ip6_var.h>
91#include <netinet6/scope6_var.h>
92#endif
93#include <netinet/tcp.h>
94#include <netinet/tcp_fsm.h>
95#include <netinet/tcp_seq.h>
96#include <netinet/tcp_timer.h>
97#include <netinet/tcp_var.h>
98#include <netinet/tcp_log_buf.h>
99#include <netinet/tcpip.h>
100#include <netinet/cc/cc.h>
101#include <netinet/tcp_fastopen.h>
102#include <netinet/tcp_hpts.h>
103#ifdef TCPPCAP
104#include <netinet/tcp_pcap.h>
105#endif
106#ifdef TCPDEBUG
107#include <netinet/tcp_debug.h>
108#endif
109#ifdef TCP_OFFLOAD
110#include <netinet/tcp_offload.h>
111#endif
112#include <netipsec/ipsec_support.h>
113
114#include <vm/vm.h>
115#include <vm/vm_param.h>
116#include <vm/pmap.h>
117#include <vm/vm_extern.h>
118#include <vm/vm_map.h>
119#include <vm/vm_page.h>
120
121/*
122 * TCP protocol interface to socket abstraction.
123 */
124#ifdef INET
125static int	tcp_connect(struct tcpcb *, struct sockaddr *,
126		    struct thread *td);
127#endif /* INET */
128#ifdef INET6
129static int	tcp6_connect(struct tcpcb *, struct sockaddr *,
130		    struct thread *td);
131#endif /* INET6 */
132static void	tcp_disconnect(struct tcpcb *);
133static void	tcp_usrclosed(struct tcpcb *);
134static void	tcp_fill_info(struct tcpcb *, struct tcp_info *);
135
136static int	tcp_pru_options_support(struct tcpcb *tp, int flags);
137
138#ifdef TCPDEBUG
139#define	TCPDEBUG0	int ostate = 0
140#define	TCPDEBUG1()	ostate = tp ? tp->t_state : 0
141#define	TCPDEBUG2(req)	if (tp && (so->so_options & SO_DEBUG)) \
142				tcp_trace(TA_USER, ostate, tp, 0, 0, req)
143#else
144#define	TCPDEBUG0
145#define	TCPDEBUG1()
146#define	TCPDEBUG2(req)
147#endif
148
149/*
150 * tcp_require_unique port requires a globally-unique source port for each
151 * outgoing connection.  The default is to require the 4-tuple to be unique.
152 */
153VNET_DEFINE(int, tcp_require_unique_port) = 0;
154SYSCTL_INT(_net_inet_tcp, OID_AUTO, require_unique_port,
155    CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_require_unique_port), 0,
156    "Require globally-unique ephemeral port for outgoing connections");
157#define	V_tcp_require_unique_port	VNET(tcp_require_unique_port)
158
159/*
160 * TCP attaches to socket via pru_attach(), reserving space,
161 * and an internet control block.
162 */
163static int
164tcp_usr_attach(struct socket *so, int proto, struct thread *td)
165{
166	struct inpcb *inp;
167	struct tcpcb *tp = NULL;
168	int error;
169	TCPDEBUG0;
170
171	inp = sotoinpcb(so);
172	KASSERT(inp == NULL, ("tcp_usr_attach: inp != NULL"));
173	TCPDEBUG1();
174
175	if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
176		error = soreserve(so, V_tcp_sendspace, V_tcp_recvspace);
177		if (error)
178			goto out;
179	}
180
181	so->so_rcv.sb_flags |= SB_AUTOSIZE;
182	so->so_snd.sb_flags |= SB_AUTOSIZE;
183	error = in_pcballoc(so, &V_tcbinfo);
184	if (error)
185		goto out;
186	inp = sotoinpcb(so);
187#ifdef INET6
188	if (inp->inp_vflag & INP_IPV6PROTO) {
189		inp->inp_vflag |= INP_IPV6;
190		if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0)
191			inp->inp_vflag |= INP_IPV4;
192		inp->in6p_hops = -1;	/* use kernel default */
193	}
194	else
195#endif
196		inp->inp_vflag |= INP_IPV4;
197	tp = tcp_newtcpcb(inp);
198	if (tp == NULL) {
199		error = ENOBUFS;
200		in_pcbdetach(inp);
201		in_pcbfree(inp);
202		goto out;
203	}
204	tp->t_state = TCPS_CLOSED;
205	INP_WUNLOCK(inp);
206	TCPSTATES_INC(TCPS_CLOSED);
207out:
208	TCPDEBUG2(PRU_ATTACH);
209	TCP_PROBE2(debug__user, tp, PRU_ATTACH);
210	return (error);
211}
212
213/*
214 * tcp_usr_detach is called when the socket layer loses its final reference
215 * to the socket, be it a file descriptor reference, a reference from TCP,
216 * etc.  At this point, there is only one case in which we will keep around
217 * inpcb state: time wait.
218 */
219static void
220tcp_usr_detach(struct socket *so)
221{
222	struct inpcb *inp;
223	struct tcpcb *tp;
224
225	inp = sotoinpcb(so);
226	KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
227	INP_WLOCK(inp);
228	KASSERT(so->so_pcb == inp && inp->inp_socket == so,
229		("%s: socket %p inp %p mismatch", __func__, so, inp));
230
231	tp = intotcpcb(inp);
232
233	if (inp->inp_flags & INP_TIMEWAIT) {
234		/*
235		 * There are two cases to handle: one in which the time wait
236		 * state is being discarded (INP_DROPPED), and one in which
237		 * this connection will remain in timewait.  In the former,
238		 * it is time to discard all state (except tcptw, which has
239		 * already been discarded by the timewait close code, which
240		 * should be further up the call stack somewhere).  In the
241		 * latter case, we detach from the socket, but leave the pcb
242		 * present until timewait ends.
243		 *
244		 * XXXRW: Would it be cleaner to free the tcptw here?
245		 *
246		 * Astute question indeed, from twtcp perspective there are
247		 * four cases to consider:
248		 *
249		 * #1 tcp_usr_detach is called at tcptw creation time by
250		 *  tcp_twstart, then do not discard the newly created tcptw
251		 *  and leave inpcb present until timewait ends
252		 * #2 tcp_usr_detach is called at tcptw creation time by
253		 *  tcp_twstart, but connection is local and tw will be
254		 *  discarded immediately
255		 * #3 tcp_usr_detach is called at timewait end (or reuse) by
256		 *  tcp_twclose, then the tcptw has already been discarded
257		 *  (or reused) and inpcb is freed here
258		 * #4 tcp_usr_detach is called() after timewait ends (or reuse)
259		 *  (e.g. by soclose), then tcptw has already been discarded
260		 *  (or reused) and inpcb is freed here
261		 *
262		 *  In all three cases the tcptw should not be freed here.
263		 */
264		if (inp->inp_flags & INP_DROPPED) {
265			in_pcbdetach(inp);
266			if (__predict_true(tp == NULL)) {
267				in_pcbfree(inp);
268			} else {
269				/*
270				 * This case should not happen as in TIMEWAIT
271				 * state the inp should not be destroyed before
272				 * its tcptw.  If INVARIANTS is defined, panic.
273				 */
274#ifdef INVARIANTS
275				panic("%s: Panic before an inp double-free: "
276				    "INP_TIMEWAIT && INP_DROPPED && tp != NULL"
277				    , __func__);
278#else
279				log(LOG_ERR, "%s: Avoid an inp double-free: "
280				    "INP_TIMEWAIT && INP_DROPPED && tp != NULL"
281				    , __func__);
282#endif
283				INP_WUNLOCK(inp);
284			}
285		} else {
286			in_pcbdetach(inp);
287			INP_WUNLOCK(inp);
288		}
289	} else {
290		/*
291		 * If the connection is not in timewait, we consider two
292		 * two conditions: one in which no further processing is
293		 * necessary (dropped || embryonic), and one in which TCP is
294		 * not yet done, but no longer requires the socket, so the
295		 * pcb will persist for the time being.
296		 *
297		 * XXXRW: Does the second case still occur?
298		 */
299		if (inp->inp_flags & INP_DROPPED ||
300		    tp->t_state < TCPS_SYN_SENT) {
301			tcp_discardcb(tp);
302			in_pcbdetach(inp);
303			in_pcbfree(inp);
304		} else {
305			in_pcbdetach(inp);
306			INP_WUNLOCK(inp);
307		}
308	}
309}
310
311#ifdef INET
312/*
313 * Give the socket an address.
314 */
315static int
316tcp_usr_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
317{
318	int error = 0;
319	struct inpcb *inp;
320	struct tcpcb *tp = NULL;
321	struct sockaddr_in *sinp;
322
323	sinp = (struct sockaddr_in *)nam;
324	if (nam->sa_family != AF_INET) {
325		/*
326		 * Preserve compatibility with old programs.
327		 */
328		if (nam->sa_family != AF_UNSPEC ||
329		    sinp->sin_addr.s_addr != INADDR_ANY)
330			return (EAFNOSUPPORT);
331		nam->sa_family = AF_INET;
332	}
333	if (nam->sa_len != sizeof(*sinp))
334		return (EINVAL);
335
336	/*
337	 * Must check for multicast addresses and disallow binding
338	 * to them.
339	 */
340	if (IN_MULTICAST(ntohl(sinp->sin_addr.s_addr)))
341		return (EAFNOSUPPORT);
342
343	TCPDEBUG0;
344	inp = sotoinpcb(so);
345	KASSERT(inp != NULL, ("tcp_usr_bind: inp == NULL"));
346	INP_WLOCK(inp);
347	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
348		error = EINVAL;
349		goto out;
350	}
351	tp = intotcpcb(inp);
352	TCPDEBUG1();
353	INP_HASH_WLOCK(&V_tcbinfo);
354	error = in_pcbbind(inp, nam, td->td_ucred);
355	INP_HASH_WUNLOCK(&V_tcbinfo);
356out:
357	TCPDEBUG2(PRU_BIND);
358	TCP_PROBE2(debug__user, tp, PRU_BIND);
359	INP_WUNLOCK(inp);
360
361	return (error);
362}
363#endif /* INET */
364
365#ifdef INET6
366static int
367tcp6_usr_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
368{
369	int error = 0;
370	struct inpcb *inp;
371	struct tcpcb *tp = NULL;
372	struct sockaddr_in6 *sin6;
373	u_char vflagsav;
374
375	sin6 = (struct sockaddr_in6 *)nam;
376	if (nam->sa_family != AF_INET6)
377		return (EAFNOSUPPORT);
378	if (nam->sa_len != sizeof(*sin6))
379		return (EINVAL);
380
381	/*
382	 * Must check for multicast addresses and disallow binding
383	 * to them.
384	 */
385	if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
386		return (EAFNOSUPPORT);
387
388	TCPDEBUG0;
389	inp = sotoinpcb(so);
390	KASSERT(inp != NULL, ("tcp6_usr_bind: inp == NULL"));
391	INP_WLOCK(inp);
392	vflagsav = inp->inp_vflag;
393	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
394		error = EINVAL;
395		goto out;
396	}
397	tp = intotcpcb(inp);
398	TCPDEBUG1();
399	INP_HASH_WLOCK(&V_tcbinfo);
400	inp->inp_vflag &= ~INP_IPV4;
401	inp->inp_vflag |= INP_IPV6;
402#ifdef INET
403	if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) {
404		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
405			inp->inp_vflag |= INP_IPV4;
406		else if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
407			struct sockaddr_in sin;
408
409			in6_sin6_2_sin(&sin, sin6);
410			if (IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
411				error = EAFNOSUPPORT;
412				INP_HASH_WUNLOCK(&V_tcbinfo);
413				goto out;
414			}
415			inp->inp_vflag |= INP_IPV4;
416			inp->inp_vflag &= ~INP_IPV6;
417			error = in_pcbbind(inp, (struct sockaddr *)&sin,
418			    td->td_ucred);
419			INP_HASH_WUNLOCK(&V_tcbinfo);
420			goto out;
421		}
422	}
423#endif
424	error = in6_pcbbind(inp, nam, td->td_ucred);
425	INP_HASH_WUNLOCK(&V_tcbinfo);
426out:
427	if (error != 0)
428		inp->inp_vflag = vflagsav;
429	TCPDEBUG2(PRU_BIND);
430	TCP_PROBE2(debug__user, tp, PRU_BIND);
431	INP_WUNLOCK(inp);
432	return (error);
433}
434#endif /* INET6 */
435
436#ifdef INET
437/*
438 * Prepare to accept connections.
439 */
440static int
441tcp_usr_listen(struct socket *so, int backlog, struct thread *td)
442{
443	int error = 0;
444	struct inpcb *inp;
445	struct tcpcb *tp = NULL;
446
447	TCPDEBUG0;
448	inp = sotoinpcb(so);
449	KASSERT(inp != NULL, ("tcp_usr_listen: inp == NULL"));
450	INP_WLOCK(inp);
451	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
452		error = EINVAL;
453		goto out;
454	}
455	tp = intotcpcb(inp);
456	TCPDEBUG1();
457	SOCK_LOCK(so);
458	error = solisten_proto_check(so);
459	INP_HASH_WLOCK(&V_tcbinfo);
460	if (error == 0 && inp->inp_lport == 0)
461		error = in_pcbbind(inp, (struct sockaddr *)0, td->td_ucred);
462	INP_HASH_WUNLOCK(&V_tcbinfo);
463	if (error == 0) {
464		tcp_state_change(tp, TCPS_LISTEN);
465		solisten_proto(so, backlog);
466#ifdef TCP_OFFLOAD
467		if ((so->so_options & SO_NO_OFFLOAD) == 0)
468			tcp_offload_listen_start(tp);
469#endif
470	}
471	SOCK_UNLOCK(so);
472
473	if (IS_FASTOPEN(tp->t_flags))
474		tp->t_tfo_pending = tcp_fastopen_alloc_counter();
475
476out:
477	TCPDEBUG2(PRU_LISTEN);
478	TCP_PROBE2(debug__user, tp, PRU_LISTEN);
479	INP_WUNLOCK(inp);
480	return (error);
481}
482#endif /* INET */
483
484#ifdef INET6
485static int
486tcp6_usr_listen(struct socket *so, int backlog, struct thread *td)
487{
488	int error = 0;
489	struct inpcb *inp;
490	struct tcpcb *tp = NULL;
491	u_char vflagsav;
492
493	TCPDEBUG0;
494	inp = sotoinpcb(so);
495	KASSERT(inp != NULL, ("tcp6_usr_listen: inp == NULL"));
496	INP_WLOCK(inp);
497	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
498		error = EINVAL;
499		goto out;
500	}
501	vflagsav = inp->inp_vflag;
502	tp = intotcpcb(inp);
503	TCPDEBUG1();
504	SOCK_LOCK(so);
505	error = solisten_proto_check(so);
506	INP_HASH_WLOCK(&V_tcbinfo);
507	if (error == 0 && inp->inp_lport == 0) {
508		inp->inp_vflag &= ~INP_IPV4;
509		if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0)
510			inp->inp_vflag |= INP_IPV4;
511		error = in6_pcbbind(inp, (struct sockaddr *)0, td->td_ucred);
512	}
513	INP_HASH_WUNLOCK(&V_tcbinfo);
514	if (error == 0) {
515		tcp_state_change(tp, TCPS_LISTEN);
516		solisten_proto(so, backlog);
517#ifdef TCP_OFFLOAD
518		if ((so->so_options & SO_NO_OFFLOAD) == 0)
519			tcp_offload_listen_start(tp);
520#endif
521	}
522	SOCK_UNLOCK(so);
523
524	if (IS_FASTOPEN(tp->t_flags))
525		tp->t_tfo_pending = tcp_fastopen_alloc_counter();
526
527	if (error != 0)
528		inp->inp_vflag = vflagsav;
529
530out:
531	TCPDEBUG2(PRU_LISTEN);
532	TCP_PROBE2(debug__user, tp, PRU_LISTEN);
533	INP_WUNLOCK(inp);
534	return (error);
535}
536#endif /* INET6 */
537
538#ifdef INET
539/*
540 * Initiate connection to peer.
541 * Create a template for use in transmissions on this connection.
542 * Enter SYN_SENT state, and mark socket as connecting.
543 * Start keep-alive timer, and seed output sequence space.
544 * Send initial segment on connection.
545 */
546static int
547tcp_usr_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
548{
549	struct epoch_tracker et;
550	int error = 0;
551	struct inpcb *inp;
552	struct tcpcb *tp = NULL;
553	struct sockaddr_in *sinp;
554
555	sinp = (struct sockaddr_in *)nam;
556	if (nam->sa_family != AF_INET)
557		return (EAFNOSUPPORT);
558	if (nam->sa_len != sizeof (*sinp))
559		return (EINVAL);
560
561	/*
562	 * Must disallow TCP ``connections'' to multicast addresses.
563	 */
564	if (IN_MULTICAST(ntohl(sinp->sin_addr.s_addr)))
565		return (EAFNOSUPPORT);
566	if (ntohl(sinp->sin_addr.s_addr) == INADDR_BROADCAST)
567		return (EACCES);
568	if ((error = prison_remote_ip4(td->td_ucred, &sinp->sin_addr)) != 0)
569		return (error);
570
571	TCPDEBUG0;
572	inp = sotoinpcb(so);
573	KASSERT(inp != NULL, ("tcp_usr_connect: inp == NULL"));
574	INP_WLOCK(inp);
575	if (inp->inp_flags & INP_TIMEWAIT) {
576		error = EADDRINUSE;
577		goto out;
578	}
579	if (inp->inp_flags & INP_DROPPED) {
580		error = ECONNREFUSED;
581		goto out;
582	}
583	tp = intotcpcb(inp);
584	TCPDEBUG1();
585	NET_EPOCH_ENTER(et);
586	if ((error = tcp_connect(tp, nam, td)) != 0)
587		goto out_in_epoch;
588#ifdef TCP_OFFLOAD
589	if (registered_toedevs > 0 &&
590	    (so->so_options & SO_NO_OFFLOAD) == 0 &&
591	    (error = tcp_offload_connect(so, nam)) == 0)
592		goto out_in_epoch;
593#endif
594	tcp_timer_activate(tp, TT_KEEP, TP_KEEPINIT(tp));
595	error = tp->t_fb->tfb_tcp_output(tp);
596out_in_epoch:
597	NET_EPOCH_EXIT(et);
598out:
599	TCPDEBUG2(PRU_CONNECT);
600	TCP_PROBE2(debug__user, tp, PRU_CONNECT);
601	INP_WUNLOCK(inp);
602	return (error);
603}
604#endif /* INET */
605
606#ifdef INET6
607static int
608tcp6_usr_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
609{
610	struct epoch_tracker et;
611	int error = 0;
612	struct inpcb *inp;
613	struct tcpcb *tp = NULL;
614	struct sockaddr_in6 *sin6;
615	u_int8_t incflagsav;
616	u_char vflagsav;
617
618	TCPDEBUG0;
619
620	sin6 = (struct sockaddr_in6 *)nam;
621	if (nam->sa_family != AF_INET6)
622		return (EAFNOSUPPORT);
623	if (nam->sa_len != sizeof (*sin6))
624		return (EINVAL);
625
626	/*
627	 * Must disallow TCP ``connections'' to multicast addresses.
628	 */
629	if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
630		return (EAFNOSUPPORT);
631
632	inp = sotoinpcb(so);
633	KASSERT(inp != NULL, ("tcp6_usr_connect: inp == NULL"));
634	INP_WLOCK(inp);
635	vflagsav = inp->inp_vflag;
636	incflagsav = inp->inp_inc.inc_flags;
637	if (inp->inp_flags & INP_TIMEWAIT) {
638		error = EADDRINUSE;
639		goto out;
640	}
641	if (inp->inp_flags & INP_DROPPED) {
642		error = ECONNREFUSED;
643		goto out;
644	}
645	tp = intotcpcb(inp);
646	TCPDEBUG1();
647#ifdef INET
648	/*
649	 * XXXRW: Some confusion: V4/V6 flags relate to binding, and
650	 * therefore probably require the hash lock, which isn't held here.
651	 * Is this a significant problem?
652	 */
653	if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
654		struct sockaddr_in sin;
655
656		if ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0) {
657			error = EINVAL;
658			goto out;
659		}
660		if ((inp->inp_vflag & INP_IPV4) == 0) {
661			error = EAFNOSUPPORT;
662			goto out;
663		}
664
665		in6_sin6_2_sin(&sin, sin6);
666		if (IN_MULTICAST(ntohl(sin.sin_addr.s_addr))) {
667			error = EAFNOSUPPORT;
668			goto out;
669		}
670		if (ntohl(sin.sin_addr.s_addr) == INADDR_BROADCAST) {
671			error = EACCES;
672			goto out;
673		}
674		if ((error = prison_remote_ip4(td->td_ucred,
675		    &sin.sin_addr)) != 0)
676			goto out;
677		inp->inp_vflag |= INP_IPV4;
678		inp->inp_vflag &= ~INP_IPV6;
679		NET_EPOCH_ENTER(et);
680		if ((error = tcp_connect(tp, (struct sockaddr *)&sin, td)) != 0)
681			goto out_in_epoch;
682#ifdef TCP_OFFLOAD
683		if (registered_toedevs > 0 &&
684		    (so->so_options & SO_NO_OFFLOAD) == 0 &&
685		    (error = tcp_offload_connect(so, nam)) == 0)
686			goto out_in_epoch;
687#endif
688		error = tp->t_fb->tfb_tcp_output(tp);
689		goto out_in_epoch;
690	} else {
691		if ((inp->inp_vflag & INP_IPV6) == 0) {
692			error = EAFNOSUPPORT;
693			goto out;
694		}
695	}
696#endif
697	if ((error = prison_remote_ip6(td->td_ucred, &sin6->sin6_addr)) != 0)
698		goto out;
699	inp->inp_vflag &= ~INP_IPV4;
700	inp->inp_vflag |= INP_IPV6;
701	inp->inp_inc.inc_flags |= INC_ISIPV6;
702	if ((error = tcp6_connect(tp, nam, td)) != 0)
703		goto out;
704#ifdef TCP_OFFLOAD
705	if (registered_toedevs > 0 &&
706	    (so->so_options & SO_NO_OFFLOAD) == 0 &&
707	    (error = tcp_offload_connect(so, nam)) == 0)
708		goto out;
709#endif
710	tcp_timer_activate(tp, TT_KEEP, TP_KEEPINIT(tp));
711	NET_EPOCH_ENTER(et);
712	error = tp->t_fb->tfb_tcp_output(tp);
713#ifdef INET
714out_in_epoch:
715#endif
716	NET_EPOCH_EXIT(et);
717out:
718	/*
719	 * If the implicit bind in the connect call fails, restore
720	 * the flags we modified.
721	 */
722	if (error != 0 && inp->inp_lport == 0) {
723		inp->inp_vflag = vflagsav;
724		inp->inp_inc.inc_flags = incflagsav;
725	}
726
727	TCPDEBUG2(PRU_CONNECT);
728	TCP_PROBE2(debug__user, tp, PRU_CONNECT);
729	INP_WUNLOCK(inp);
730	return (error);
731}
732#endif /* INET6 */
733
734/*
735 * Initiate disconnect from peer.
736 * If connection never passed embryonic stage, just drop;
737 * else if don't need to let data drain, then can just drop anyways,
738 * else have to begin TCP shutdown process: mark socket disconnecting,
739 * drain unread data, state switch to reflect user close, and
740 * send segment (e.g. FIN) to peer.  Socket will be really disconnected
741 * when peer sends FIN and acks ours.
742 *
743 * SHOULD IMPLEMENT LATER PRU_CONNECT VIA REALLOC TCPCB.
744 */
745static int
746tcp_usr_disconnect(struct socket *so)
747{
748	struct inpcb *inp;
749	struct tcpcb *tp = NULL;
750	struct epoch_tracker et;
751	int error = 0;
752
753	TCPDEBUG0;
754	NET_EPOCH_ENTER(et);
755	inp = sotoinpcb(so);
756	KASSERT(inp != NULL, ("tcp_usr_disconnect: inp == NULL"));
757	INP_WLOCK(inp);
758	if (inp->inp_flags & INP_TIMEWAIT)
759		goto out;
760	if (inp->inp_flags & INP_DROPPED) {
761		error = ECONNRESET;
762		goto out;
763	}
764	tp = intotcpcb(inp);
765	TCPDEBUG1();
766	tcp_disconnect(tp);
767out:
768	TCPDEBUG2(PRU_DISCONNECT);
769	TCP_PROBE2(debug__user, tp, PRU_DISCONNECT);
770	INP_WUNLOCK(inp);
771	NET_EPOCH_EXIT(et);
772	return (error);
773}
774
775#ifdef INET
776/*
777 * Accept a connection.  Essentially all the work is done at higher levels;
778 * just return the address of the peer, storing through addr.
779 */
780static int
781tcp_usr_accept(struct socket *so, struct sockaddr **nam)
782{
783	int error = 0;
784	struct inpcb *inp = NULL;
785	struct tcpcb *tp = NULL;
786	struct in_addr addr;
787	in_port_t port = 0;
788	TCPDEBUG0;
789
790	if (so->so_state & SS_ISDISCONNECTED)
791		return (ECONNABORTED);
792
793	inp = sotoinpcb(so);
794	KASSERT(inp != NULL, ("tcp_usr_accept: inp == NULL"));
795	INP_WLOCK(inp);
796	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
797		error = ECONNABORTED;
798		goto out;
799	}
800	tp = intotcpcb(inp);
801	TCPDEBUG1();
802
803	/*
804	 * We inline in_getpeeraddr and COMMON_END here, so that we can
805	 * copy the data of interest and defer the malloc until after we
806	 * release the lock.
807	 */
808	port = inp->inp_fport;
809	addr = inp->inp_faddr;
810
811out:
812	TCPDEBUG2(PRU_ACCEPT);
813	TCP_PROBE2(debug__user, tp, PRU_ACCEPT);
814	INP_WUNLOCK(inp);
815	if (error == 0)
816		*nam = in_sockaddr(port, &addr);
817	return error;
818}
819#endif /* INET */
820
821#ifdef INET6
822static int
823tcp6_usr_accept(struct socket *so, struct sockaddr **nam)
824{
825	struct inpcb *inp = NULL;
826	int error = 0;
827	struct tcpcb *tp = NULL;
828	struct in_addr addr;
829	struct in6_addr addr6;
830	struct epoch_tracker et;
831	in_port_t port = 0;
832	int v4 = 0;
833	TCPDEBUG0;
834
835	if (so->so_state & SS_ISDISCONNECTED)
836		return (ECONNABORTED);
837
838	inp = sotoinpcb(so);
839	KASSERT(inp != NULL, ("tcp6_usr_accept: inp == NULL"));
840	NET_EPOCH_ENTER(et);
841	INP_WLOCK(inp);
842	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
843		error = ECONNABORTED;
844		goto out;
845	}
846	tp = intotcpcb(inp);
847	TCPDEBUG1();
848
849	/*
850	 * We inline in6_mapped_peeraddr and COMMON_END here, so that we can
851	 * copy the data of interest and defer the malloc until after we
852	 * release the lock.
853	 */
854	if (inp->inp_vflag & INP_IPV4) {
855		v4 = 1;
856		port = inp->inp_fport;
857		addr = inp->inp_faddr;
858	} else {
859		port = inp->inp_fport;
860		addr6 = inp->in6p_faddr;
861	}
862
863out:
864	TCPDEBUG2(PRU_ACCEPT);
865	TCP_PROBE2(debug__user, tp, PRU_ACCEPT);
866	INP_WUNLOCK(inp);
867	NET_EPOCH_EXIT(et);
868	if (error == 0) {
869		if (v4)
870			*nam = in6_v4mapsin6_sockaddr(port, &addr);
871		else
872			*nam = in6_sockaddr(port, &addr6);
873	}
874	return error;
875}
876#endif /* INET6 */
877
878/*
879 * Mark the connection as being incapable of further output.
880 */
881static int
882tcp_usr_shutdown(struct socket *so)
883{
884	int error = 0;
885	struct inpcb *inp;
886	struct tcpcb *tp = NULL;
887	struct epoch_tracker et;
888
889	TCPDEBUG0;
890	NET_EPOCH_ENTER(et);
891	inp = sotoinpcb(so);
892	KASSERT(inp != NULL, ("inp == NULL"));
893	INP_WLOCK(inp);
894	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
895		error = ECONNRESET;
896		goto out;
897	}
898	tp = intotcpcb(inp);
899	TCPDEBUG1();
900	socantsendmore(so);
901	tcp_usrclosed(tp);
902	if (!(inp->inp_flags & INP_DROPPED))
903		error = tp->t_fb->tfb_tcp_output(tp);
904
905out:
906	TCPDEBUG2(PRU_SHUTDOWN);
907	TCP_PROBE2(debug__user, tp, PRU_SHUTDOWN);
908	INP_WUNLOCK(inp);
909	NET_EPOCH_EXIT(et);
910
911	return (error);
912}
913
914/*
915 * After a receive, possibly send window update to peer.
916 */
917static int
918tcp_usr_rcvd(struct socket *so, int flags)
919{
920	struct epoch_tracker et;
921	struct inpcb *inp;
922	struct tcpcb *tp = NULL;
923	int error = 0;
924
925	TCPDEBUG0;
926	inp = sotoinpcb(so);
927	KASSERT(inp != NULL, ("tcp_usr_rcvd: inp == NULL"));
928	INP_WLOCK(inp);
929	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
930		error = ECONNRESET;
931		goto out;
932	}
933	tp = intotcpcb(inp);
934	TCPDEBUG1();
935	/*
936	 * For passively-created TFO connections, don't attempt a window
937	 * update while still in SYN_RECEIVED as this may trigger an early
938	 * SYN|ACK.  It is preferable to have the SYN|ACK be sent along with
939	 * application response data, or failing that, when the DELACK timer
940	 * expires.
941	 */
942	if (IS_FASTOPEN(tp->t_flags) &&
943	    (tp->t_state == TCPS_SYN_RECEIVED))
944		goto out;
945	NET_EPOCH_ENTER(et);
946#ifdef TCP_OFFLOAD
947	if (tp->t_flags & TF_TOE)
948		tcp_offload_rcvd(tp);
949	else
950#endif
951	tp->t_fb->tfb_tcp_output(tp);
952	NET_EPOCH_EXIT(et);
953out:
954	TCPDEBUG2(PRU_RCVD);
955	TCP_PROBE2(debug__user, tp, PRU_RCVD);
956	INP_WUNLOCK(inp);
957	return (error);
958}
959
960/*
961 * Do a send by putting data in output queue and updating urgent
962 * marker if URG set.  Possibly send more data.  Unlike the other
963 * pru_*() routines, the mbuf chains are our responsibility.  We
964 * must either enqueue them or free them.  The other pru_* routines
965 * generally are caller-frees.
966 */
967static int
968tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
969    struct sockaddr *nam, struct mbuf *control, struct thread *td)
970{
971	struct epoch_tracker et;
972	int error = 0;
973	struct inpcb *inp;
974	struct tcpcb *tp = NULL;
975#ifdef INET
976#ifdef INET6
977	struct sockaddr_in sin;
978#endif
979	struct sockaddr_in *sinp;
980#endif
981#ifdef INET6
982	int isipv6;
983#endif
984	u_int8_t incflagsav;
985	u_char vflagsav;
986	bool restoreflags;
987	TCPDEBUG0;
988
989	/*
990	 * We require the pcbinfo "read lock" if we will close the socket
991	 * as part of this call.
992	 */
993	NET_EPOCH_ENTER(et);
994	inp = sotoinpcb(so);
995	KASSERT(inp != NULL, ("tcp_usr_send: inp == NULL"));
996	INP_WLOCK(inp);
997	vflagsav = inp->inp_vflag;
998	incflagsav = inp->inp_inc.inc_flags;
999	restoreflags = false;
1000	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
1001		if (control)
1002			m_freem(control);
1003		error = ECONNRESET;
1004		goto out;
1005	}
1006	if (control != NULL) {
1007		/* TCP doesn't do control messages (rights, creds, etc) */
1008		if (control->m_len) {
1009			m_freem(control);
1010			error = EINVAL;
1011			goto out;
1012		}
1013		m_freem(control);	/* empty control, just free it */
1014		control = NULL;
1015	}
1016	tp = intotcpcb(inp);
1017	if ((flags & PRUS_OOB) != 0 &&
1018	    (error = tcp_pru_options_support(tp, PRUS_OOB)) != 0)
1019		goto out;
1020
1021	TCPDEBUG1();
1022	if (nam != NULL && tp->t_state < TCPS_SYN_SENT) {
1023		switch (nam->sa_family) {
1024#ifdef INET
1025		case AF_INET:
1026			sinp = (struct sockaddr_in *)nam;
1027			if (sinp->sin_len != sizeof(struct sockaddr_in)) {
1028				error = EINVAL;
1029				goto out;
1030			}
1031			if ((inp->inp_vflag & INP_IPV6) != 0) {
1032				error = EAFNOSUPPORT;
1033				goto out;
1034			}
1035			if (IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) {
1036				error = EAFNOSUPPORT;
1037				goto out;
1038			}
1039			if (ntohl(sinp->sin_addr.s_addr) == INADDR_BROADCAST) {
1040				error = EACCES;
1041				goto out;
1042			}
1043			if ((error = prison_remote_ip4(td->td_ucred,
1044			    &sinp->sin_addr)))
1045				goto out;
1046#ifdef INET6
1047			isipv6 = 0;
1048#endif
1049			break;
1050#endif /* INET */
1051#ifdef INET6
1052		case AF_INET6:
1053		{
1054			struct sockaddr_in6 *sin6;
1055
1056			sin6 = (struct sockaddr_in6 *)nam;
1057			if (sin6->sin6_len != sizeof(*sin6)) {
1058				error = EINVAL;
1059				goto out;
1060			}
1061			if ((inp->inp_vflag & INP_IPV6PROTO) == 0) {
1062				error = EAFNOSUPPORT;
1063				goto out;
1064			}
1065			if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
1066				error = EAFNOSUPPORT;
1067				goto out;
1068			}
1069			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
1070#ifdef INET
1071				if ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0) {
1072					error = EINVAL;
1073					goto out;
1074				}
1075				if ((inp->inp_vflag & INP_IPV4) == 0) {
1076					error = EAFNOSUPPORT;
1077					goto out;
1078				}
1079				restoreflags = true;
1080				inp->inp_vflag &= ~INP_IPV6;
1081				sinp = &sin;
1082				in6_sin6_2_sin(sinp, sin6);
1083				if (IN_MULTICAST(
1084				    ntohl(sinp->sin_addr.s_addr))) {
1085					error = EAFNOSUPPORT;
1086					goto out;
1087				}
1088				if ((error = prison_remote_ip4(td->td_ucred,
1089				    &sinp->sin_addr)))
1090					goto out;
1091				isipv6 = 0;
1092#else /* !INET */
1093				error = EAFNOSUPPORT;
1094				goto out;
1095#endif /* INET */
1096			} else {
1097				if ((inp->inp_vflag & INP_IPV6) == 0) {
1098					error = EAFNOSUPPORT;
1099					goto out;
1100				}
1101				restoreflags = true;
1102				inp->inp_vflag &= ~INP_IPV4;
1103				inp->inp_inc.inc_flags |= INC_ISIPV6;
1104				if ((error = prison_remote_ip6(td->td_ucred,
1105				    &sin6->sin6_addr)))
1106					goto out;
1107				isipv6 = 1;
1108			}
1109			break;
1110		}
1111#endif /* INET6 */
1112		default:
1113			error = EAFNOSUPPORT;
1114			goto out;
1115		}
1116	}
1117	if (!(flags & PRUS_OOB)) {
1118		sbappendstream(&so->so_snd, m, flags);
1119		m = NULL;
1120		if (nam && tp->t_state < TCPS_SYN_SENT) {
1121			/*
1122			 * Do implied connect if not yet connected,
1123			 * initialize window to default value, and
1124			 * initialize maxseg using peer's cached MSS.
1125			 */
1126#ifdef INET6
1127			if (isipv6)
1128				error = tcp6_connect(tp, nam, td);
1129#endif /* INET6 */
1130#if defined(INET6) && defined(INET)
1131			else
1132#endif
1133#ifdef INET
1134				error = tcp_connect(tp,
1135				    (struct sockaddr *)sinp, td);
1136#endif
1137			/*
1138			 * The bind operation in tcp_connect succeeded. We
1139			 * no longer want to restore the flags if later
1140			 * operations fail.
1141			 */
1142			if (error == 0 || inp->inp_lport != 0)
1143				restoreflags = false;
1144
1145			if (error) {
1146				/* m is freed if PRUS_NOTREADY is unset. */
1147				sbflush(&so->so_snd);
1148				goto out;
1149			}
1150			if (IS_FASTOPEN(tp->t_flags))
1151				tcp_fastopen_connect(tp);
1152			else {
1153				tp->snd_wnd = TTCP_CLIENT_SND_WND;
1154				tcp_mss(tp, -1);
1155			}
1156		}
1157		if (flags & PRUS_EOF) {
1158			/*
1159			 * Close the send side of the connection after
1160			 * the data is sent.
1161			 */
1162			socantsendmore(so);
1163			tcp_usrclosed(tp);
1164		}
1165		if (TCPS_HAVEESTABLISHED(tp->t_state) &&
1166		    ((tp->t_flags2 & TF2_FBYTES_COMPLETE) == 0) &&
1167		    (tp->t_fbyte_out == 0) &&
1168		    (so->so_snd.sb_ccc > 0)) {
1169			tp->t_fbyte_out = ticks;
1170			if (tp->t_fbyte_out == 0)
1171				tp->t_fbyte_out = 1;
1172			if (tp->t_fbyte_out && tp->t_fbyte_in)
1173				tp->t_flags2 |= TF2_FBYTES_COMPLETE;
1174		}
1175		if (!(inp->inp_flags & INP_DROPPED) &&
1176		    !(flags & PRUS_NOTREADY)) {
1177			if (flags & PRUS_MORETOCOME)
1178				tp->t_flags |= TF_MORETOCOME;
1179			error = tp->t_fb->tfb_tcp_output(tp);
1180			if (flags & PRUS_MORETOCOME)
1181				tp->t_flags &= ~TF_MORETOCOME;
1182		}
1183	} else {
1184		/*
1185		 * XXXRW: PRUS_EOF not implemented with PRUS_OOB?
1186		 */
1187		SOCKBUF_LOCK(&so->so_snd);
1188		if (sbspace(&so->so_snd) < -512) {
1189			SOCKBUF_UNLOCK(&so->so_snd);
1190			error = ENOBUFS;
1191			goto out;
1192		}
1193		/*
1194		 * According to RFC961 (Assigned Protocols),
1195		 * the urgent pointer points to the last octet
1196		 * of urgent data.  We continue, however,
1197		 * to consider it to indicate the first octet
1198		 * of data past the urgent section.
1199		 * Otherwise, snd_up should be one lower.
1200		 */
1201		sbappendstream_locked(&so->so_snd, m, flags);
1202		SOCKBUF_UNLOCK(&so->so_snd);
1203		m = NULL;
1204		if (nam && tp->t_state < TCPS_SYN_SENT) {
1205			/*
1206			 * Do implied connect if not yet connected,
1207			 * initialize window to default value, and
1208			 * initialize maxseg using peer's cached MSS.
1209			 */
1210
1211			/*
1212			 * Not going to contemplate SYN|URG
1213			 */
1214			if (IS_FASTOPEN(tp->t_flags))
1215				tp->t_flags &= ~TF_FASTOPEN;
1216#ifdef INET6
1217			if (isipv6)
1218				error = tcp6_connect(tp, nam, td);
1219#endif /* INET6 */
1220#if defined(INET6) && defined(INET)
1221			else
1222#endif
1223#ifdef INET
1224				error = tcp_connect(tp,
1225				    (struct sockaddr *)sinp, td);
1226#endif
1227			/*
1228			 * The bind operation in tcp_connect succeeded. We
1229			 * no longer want to restore the flags if later
1230			 * operations fail.
1231			 */
1232			if (error == 0 || inp->inp_lport != 0)
1233				restoreflags = false;
1234
1235			if (error != 0) {
1236				/* m is freed if PRUS_NOTREADY is unset. */
1237				sbflush(&so->so_snd);
1238				goto out;
1239			}
1240			tp->snd_wnd = TTCP_CLIENT_SND_WND;
1241			tcp_mss(tp, -1);
1242		}
1243		tp->snd_up = tp->snd_una + sbavail(&so->so_snd);
1244		if ((flags & PRUS_NOTREADY) == 0) {
1245			tp->t_flags |= TF_FORCEDATA;
1246			error = tp->t_fb->tfb_tcp_output(tp);
1247			tp->t_flags &= ~TF_FORCEDATA;
1248		}
1249	}
1250	TCP_LOG_EVENT(tp, NULL,
1251	    &inp->inp_socket->so_rcv,
1252	    &inp->inp_socket->so_snd,
1253	    TCP_LOG_USERSEND, error,
1254	    0, NULL, false);
1255
1256out:
1257	/*
1258	 * In case of PRUS_NOTREADY, the caller or tcp_usr_ready() is
1259	 * responsible for freeing memory.
1260	 */
1261	if (m != NULL && (flags & PRUS_NOTREADY) == 0)
1262		m_freem(m);
1263
1264	/*
1265	 * If the request was unsuccessful and we changed flags,
1266	 * restore the original flags.
1267	 */
1268	if (error != 0 && restoreflags) {
1269		inp->inp_vflag = vflagsav;
1270		inp->inp_inc.inc_flags = incflagsav;
1271	}
1272	TCPDEBUG2((flags & PRUS_OOB) ? PRU_SENDOOB :
1273		  ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND));
1274	TCP_PROBE2(debug__user, tp, (flags & PRUS_OOB) ? PRU_SENDOOB :
1275		   ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND));
1276	INP_WUNLOCK(inp);
1277	NET_EPOCH_EXIT(et);
1278	return (error);
1279}
1280
1281static int
1282tcp_usr_ready(struct socket *so, struct mbuf *m, int count)
1283{
1284	struct epoch_tracker et;
1285	struct inpcb *inp;
1286	struct tcpcb *tp;
1287	int error;
1288
1289	inp = sotoinpcb(so);
1290	INP_WLOCK(inp);
1291	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
1292		INP_WUNLOCK(inp);
1293		mb_free_notready(m, count);
1294		return (ECONNRESET);
1295	}
1296	tp = intotcpcb(inp);
1297
1298	SOCKBUF_LOCK(&so->so_snd);
1299	error = sbready(&so->so_snd, m, count);
1300	SOCKBUF_UNLOCK(&so->so_snd);
1301	if (error == 0) {
1302		NET_EPOCH_ENTER(et);
1303		error = tp->t_fb->tfb_tcp_output(tp);
1304		NET_EPOCH_EXIT(et);
1305	}
1306	INP_WUNLOCK(inp);
1307
1308	return (error);
1309}
1310
1311/*
1312 * Abort the TCP.  Drop the connection abruptly.
1313 */
1314static void
1315tcp_usr_abort(struct socket *so)
1316{
1317	struct inpcb *inp;
1318	struct tcpcb *tp = NULL;
1319	struct epoch_tracker et;
1320	TCPDEBUG0;
1321
1322	inp = sotoinpcb(so);
1323	KASSERT(inp != NULL, ("tcp_usr_abort: inp == NULL"));
1324
1325	NET_EPOCH_ENTER(et);
1326	INP_WLOCK(inp);
1327	KASSERT(inp->inp_socket != NULL,
1328	    ("tcp_usr_abort: inp_socket == NULL"));
1329
1330	/*
1331	 * If we still have full TCP state, and we're not dropped, drop.
1332	 */
1333	if (!(inp->inp_flags & INP_TIMEWAIT) &&
1334	    !(inp->inp_flags & INP_DROPPED)) {
1335		tp = intotcpcb(inp);
1336		TCPDEBUG1();
1337		tp = tcp_drop(tp, ECONNABORTED);
1338		if (tp == NULL)
1339			goto dropped;
1340		TCPDEBUG2(PRU_ABORT);
1341		TCP_PROBE2(debug__user, tp, PRU_ABORT);
1342	}
1343	if (!(inp->inp_flags & INP_DROPPED)) {
1344		SOCK_LOCK(so);
1345		so->so_state |= SS_PROTOREF;
1346		SOCK_UNLOCK(so);
1347		inp->inp_flags |= INP_SOCKREF;
1348	}
1349	INP_WUNLOCK(inp);
1350dropped:
1351	NET_EPOCH_EXIT(et);
1352}
1353
1354/*
1355 * TCP socket is closed.  Start friendly disconnect.
1356 */
1357static void
1358tcp_usr_close(struct socket *so)
1359{
1360	struct inpcb *inp;
1361	struct tcpcb *tp = NULL;
1362	struct epoch_tracker et;
1363	TCPDEBUG0;
1364
1365	inp = sotoinpcb(so);
1366	KASSERT(inp != NULL, ("tcp_usr_close: inp == NULL"));
1367
1368	NET_EPOCH_ENTER(et);
1369	INP_WLOCK(inp);
1370	KASSERT(inp->inp_socket != NULL,
1371	    ("tcp_usr_close: inp_socket == NULL"));
1372
1373	/*
1374	 * If we still have full TCP state, and we're not dropped, initiate
1375	 * a disconnect.
1376	 */
1377	if (!(inp->inp_flags & INP_TIMEWAIT) &&
1378	    !(inp->inp_flags & INP_DROPPED)) {
1379		tp = intotcpcb(inp);
1380		TCPDEBUG1();
1381		tcp_disconnect(tp);
1382		TCPDEBUG2(PRU_CLOSE);
1383		TCP_PROBE2(debug__user, tp, PRU_CLOSE);
1384	}
1385	if (!(inp->inp_flags & INP_DROPPED)) {
1386		SOCK_LOCK(so);
1387		so->so_state |= SS_PROTOREF;
1388		SOCK_UNLOCK(so);
1389		inp->inp_flags |= INP_SOCKREF;
1390	}
1391	INP_WUNLOCK(inp);
1392	NET_EPOCH_EXIT(et);
1393}
1394
1395static int
1396tcp_pru_options_support(struct tcpcb *tp, int flags)
1397{
1398	/*
1399	 * If the specific TCP stack has a pru_options
1400	 * specified then it does not always support
1401	 * all the PRU_XX options and we must ask it.
1402	 * If the function is not specified then all
1403	 * of the PRU_XX options are supported.
1404	 */
1405	int ret = 0;
1406
1407	if (tp->t_fb->tfb_pru_options) {
1408		ret = (*tp->t_fb->tfb_pru_options)(tp, flags);
1409	}
1410	return (ret);
1411}
1412
1413/*
1414 * Receive out-of-band data.
1415 */
1416static int
1417tcp_usr_rcvoob(struct socket *so, struct mbuf *m, int flags)
1418{
1419	int error = 0;
1420	struct inpcb *inp;
1421	struct tcpcb *tp = NULL;
1422
1423	TCPDEBUG0;
1424	inp = sotoinpcb(so);
1425	KASSERT(inp != NULL, ("tcp_usr_rcvoob: inp == NULL"));
1426	INP_WLOCK(inp);
1427	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
1428		error = ECONNRESET;
1429		goto out;
1430	}
1431	tp = intotcpcb(inp);
1432	error = tcp_pru_options_support(tp, PRUS_OOB);
1433	if (error) {
1434		goto out;
1435	}
1436	TCPDEBUG1();
1437	if ((so->so_oobmark == 0 &&
1438	     (so->so_rcv.sb_state & SBS_RCVATMARK) == 0) ||
1439	    so->so_options & SO_OOBINLINE ||
1440	    tp->t_oobflags & TCPOOB_HADDATA) {
1441		error = EINVAL;
1442		goto out;
1443	}
1444	if ((tp->t_oobflags & TCPOOB_HAVEDATA) == 0) {
1445		error = EWOULDBLOCK;
1446		goto out;
1447	}
1448	m->m_len = 1;
1449	*mtod(m, caddr_t) = tp->t_iobc;
1450	if ((flags & MSG_PEEK) == 0)
1451		tp->t_oobflags ^= (TCPOOB_HAVEDATA | TCPOOB_HADDATA);
1452
1453out:
1454	TCPDEBUG2(PRU_RCVOOB);
1455	TCP_PROBE2(debug__user, tp, PRU_RCVOOB);
1456	INP_WUNLOCK(inp);
1457	return (error);
1458}
1459
1460#ifdef INET
1461struct pr_usrreqs tcp_usrreqs = {
1462	.pru_abort =		tcp_usr_abort,
1463	.pru_accept =		tcp_usr_accept,
1464	.pru_attach =		tcp_usr_attach,
1465	.pru_bind =		tcp_usr_bind,
1466	.pru_connect =		tcp_usr_connect,
1467	.pru_control =		in_control,
1468	.pru_detach =		tcp_usr_detach,
1469	.pru_disconnect =	tcp_usr_disconnect,
1470	.pru_listen =		tcp_usr_listen,
1471	.pru_peeraddr =		in_getpeeraddr,
1472	.pru_rcvd =		tcp_usr_rcvd,
1473	.pru_rcvoob =		tcp_usr_rcvoob,
1474	.pru_send =		tcp_usr_send,
1475	.pru_ready =		tcp_usr_ready,
1476	.pru_shutdown =		tcp_usr_shutdown,
1477	.pru_sockaddr =		in_getsockaddr,
1478	.pru_sosetlabel =	in_pcbsosetlabel,
1479	.pru_close =		tcp_usr_close,
1480};
1481#endif /* INET */
1482
1483#ifdef INET6
1484struct pr_usrreqs tcp6_usrreqs = {
1485	.pru_abort =		tcp_usr_abort,
1486	.pru_accept =		tcp6_usr_accept,
1487	.pru_attach =		tcp_usr_attach,
1488	.pru_bind =		tcp6_usr_bind,
1489	.pru_connect =		tcp6_usr_connect,
1490	.pru_control =		in6_control,
1491	.pru_detach =		tcp_usr_detach,
1492	.pru_disconnect =	tcp_usr_disconnect,
1493	.pru_listen =		tcp6_usr_listen,
1494	.pru_peeraddr =		in6_mapped_peeraddr,
1495	.pru_rcvd =		tcp_usr_rcvd,
1496	.pru_rcvoob =		tcp_usr_rcvoob,
1497	.pru_send =		tcp_usr_send,
1498	.pru_ready =		tcp_usr_ready,
1499	.pru_shutdown =		tcp_usr_shutdown,
1500	.pru_sockaddr =		in6_mapped_sockaddr,
1501	.pru_sosetlabel =	in_pcbsosetlabel,
1502	.pru_close =		tcp_usr_close,
1503};
1504#endif /* INET6 */
1505
1506#ifdef INET
1507/*
1508 * Common subroutine to open a TCP connection to remote host specified
1509 * by struct sockaddr_in in mbuf *nam.  Call in_pcbbind to assign a local
1510 * port number if needed.  Call in_pcbconnect_setup to do the routing and
1511 * to choose a local host address (interface).  If there is an existing
1512 * incarnation of the same connection in TIME-WAIT state and if the remote
1513 * host was sending CC options and if the connection duration was < MSL, then
1514 * truncate the previous TIME-WAIT state and proceed.
1515 * Initialize connection parameters and enter SYN-SENT state.
1516 */
1517static int
1518tcp_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td)
1519{
1520	struct inpcb *inp = tp->t_inpcb, *oinp;
1521	struct socket *so = inp->inp_socket;
1522	struct in_addr laddr;
1523	u_short lport;
1524	int error;
1525
1526	NET_EPOCH_ASSERT();
1527	INP_WLOCK_ASSERT(inp);
1528	INP_HASH_WLOCK(&V_tcbinfo);
1529
1530	if (V_tcp_require_unique_port && inp->inp_lport == 0) {
1531		error = in_pcbbind(inp, (struct sockaddr *)0, td->td_ucred);
1532		if (error)
1533			goto out;
1534	}
1535
1536	/*
1537	 * Cannot simply call in_pcbconnect, because there might be an
1538	 * earlier incarnation of this same connection still in
1539	 * TIME_WAIT state, creating an ADDRINUSE error.
1540	 */
1541	laddr = inp->inp_laddr;
1542	lport = inp->inp_lport;
1543	error = in_pcbconnect_setup(inp, nam, &laddr.s_addr, &lport,
1544	    &inp->inp_faddr.s_addr, &inp->inp_fport, &oinp, td->td_ucred);
1545	if (error && oinp == NULL)
1546		goto out;
1547	if (oinp) {
1548		error = EADDRINUSE;
1549		goto out;
1550	}
1551	/* Handle initial bind if it hadn't been done in advance. */
1552	if (inp->inp_lport == 0) {
1553		inp->inp_lport = lport;
1554		if (in_pcbinshash(inp) != 0) {
1555			inp->inp_lport = 0;
1556			error = EAGAIN;
1557			goto out;
1558		}
1559	}
1560	inp->inp_laddr = laddr;
1561	in_pcbrehash(inp);
1562	INP_HASH_WUNLOCK(&V_tcbinfo);
1563
1564	/*
1565	 * Compute window scaling to request:
1566	 * Scale to fit into sweet spot.  See tcp_syncache.c.
1567	 * XXX: This should move to tcp_output().
1568	 */
1569	while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
1570	    (TCP_MAXWIN << tp->request_r_scale) < sb_max)
1571		tp->request_r_scale++;
1572
1573	soisconnecting(so);
1574	TCPSTAT_INC(tcps_connattempt);
1575	tcp_state_change(tp, TCPS_SYN_SENT);
1576	tp->iss = tcp_new_isn(&inp->inp_inc);
1577	if (tp->t_flags & TF_REQ_TSTMP)
1578		tp->ts_offset = tcp_new_ts_offset(&inp->inp_inc);
1579	tcp_sendseqinit(tp);
1580
1581	return 0;
1582
1583out:
1584	INP_HASH_WUNLOCK(&V_tcbinfo);
1585	return (error);
1586}
1587#endif /* INET */
1588
1589#ifdef INET6
1590static int
1591tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td)
1592{
1593	struct inpcb *inp = tp->t_inpcb;
1594	int error;
1595
1596	INP_WLOCK_ASSERT(inp);
1597	INP_HASH_WLOCK(&V_tcbinfo);
1598
1599	if (V_tcp_require_unique_port && inp->inp_lport == 0) {
1600		error = in6_pcbbind(inp, (struct sockaddr *)0, td->td_ucred);
1601		if (error)
1602			goto out;
1603	}
1604	error = in6_pcbconnect(inp, nam, td->td_ucred);
1605	if (error != 0)
1606		goto out;
1607	INP_HASH_WUNLOCK(&V_tcbinfo);
1608
1609	/* Compute window scaling to request.  */
1610	while (tp->request_r_scale < TCP_MAX_WINSHIFT &&
1611	    (TCP_MAXWIN << tp->request_r_scale) < sb_max)
1612		tp->request_r_scale++;
1613
1614	soisconnecting(inp->inp_socket);
1615	TCPSTAT_INC(tcps_connattempt);
1616	tcp_state_change(tp, TCPS_SYN_SENT);
1617	tp->iss = tcp_new_isn(&inp->inp_inc);
1618	if (tp->t_flags & TF_REQ_TSTMP)
1619		tp->ts_offset = tcp_new_ts_offset(&inp->inp_inc);
1620	tcp_sendseqinit(tp);
1621
1622	return 0;
1623
1624out:
1625	INP_HASH_WUNLOCK(&V_tcbinfo);
1626	return error;
1627}
1628#endif /* INET6 */
1629
1630/*
1631 * Export TCP internal state information via a struct tcp_info, based on the
1632 * Linux 2.6 API.  Not ABI compatible as our constants are mapped differently
1633 * (TCP state machine, etc).  We export all information using FreeBSD-native
1634 * constants -- for example, the numeric values for tcpi_state will differ
1635 * from Linux.
1636 */
1637static void
1638tcp_fill_info(struct tcpcb *tp, struct tcp_info *ti)
1639{
1640
1641	INP_WLOCK_ASSERT(tp->t_inpcb);
1642	bzero(ti, sizeof(*ti));
1643
1644	ti->tcpi_state = tp->t_state;
1645	if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP))
1646		ti->tcpi_options |= TCPI_OPT_TIMESTAMPS;
1647	if (tp->t_flags & TF_SACK_PERMIT)
1648		ti->tcpi_options |= TCPI_OPT_SACK;
1649	if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) {
1650		ti->tcpi_options |= TCPI_OPT_WSCALE;
1651		ti->tcpi_snd_wscale = tp->snd_scale;
1652		ti->tcpi_rcv_wscale = tp->rcv_scale;
1653	}
1654	if (tp->t_flags2 & TF2_ECN_PERMIT)
1655		ti->tcpi_options |= TCPI_OPT_ECN;
1656
1657	ti->tcpi_rto = tp->t_rxtcur * tick;
1658	ti->tcpi_last_data_recv = ((uint32_t)ticks - tp->t_rcvtime) * tick;
1659	ti->tcpi_rtt = ((u_int64_t)tp->t_srtt * tick) >> TCP_RTT_SHIFT;
1660	ti->tcpi_rttvar = ((u_int64_t)tp->t_rttvar * tick) >> TCP_RTTVAR_SHIFT;
1661
1662	ti->tcpi_snd_ssthresh = tp->snd_ssthresh;
1663	ti->tcpi_snd_cwnd = tp->snd_cwnd;
1664
1665	/*
1666	 * FreeBSD-specific extension fields for tcp_info.
1667	 */
1668	ti->tcpi_rcv_space = tp->rcv_wnd;
1669	ti->tcpi_rcv_nxt = tp->rcv_nxt;
1670	ti->tcpi_snd_wnd = tp->snd_wnd;
1671	ti->tcpi_snd_bwnd = 0;		/* Unused, kept for compat. */
1672	ti->tcpi_snd_nxt = tp->snd_nxt;
1673	ti->tcpi_snd_mss = tp->t_maxseg;
1674	ti->tcpi_rcv_mss = tp->t_maxseg;
1675	ti->tcpi_snd_rexmitpack = tp->t_sndrexmitpack;
1676	ti->tcpi_rcv_ooopack = tp->t_rcvoopack;
1677	ti->tcpi_snd_zerowin = tp->t_sndzerowin;
1678#ifdef TCP_OFFLOAD
1679	if (tp->t_flags & TF_TOE) {
1680		ti->tcpi_options |= TCPI_OPT_TOE;
1681		tcp_offload_tcp_info(tp, ti);
1682	}
1683#endif
1684}
1685
1686/*
1687 * tcp_ctloutput() must drop the inpcb lock before performing copyin on
1688 * socket option arguments.  When it re-acquires the lock after the copy, it
1689 * has to revalidate that the connection is still valid for the socket
1690 * option.
1691 */
1692#define INP_WLOCK_RECHECK_CLEANUP(inp, cleanup) do {			\
1693	INP_WLOCK(inp);							\
1694	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {		\
1695		INP_WUNLOCK(inp);					\
1696		cleanup;						\
1697		return (ECONNRESET);					\
1698	}								\
1699	tp = intotcpcb(inp);						\
1700} while(0)
1701#define INP_WLOCK_RECHECK(inp) INP_WLOCK_RECHECK_CLEANUP((inp), /* noop */)
1702
1703int
1704tcp_ctloutput(struct socket *so, struct sockopt *sopt)
1705{
1706	int	error;
1707	struct	inpcb *inp;
1708	struct	tcpcb *tp;
1709	struct tcp_function_block *blk;
1710	struct tcp_function_set fsn;
1711
1712	error = 0;
1713	inp = sotoinpcb(so);
1714	KASSERT(inp != NULL, ("tcp_ctloutput: inp == NULL"));
1715	if (sopt->sopt_level != IPPROTO_TCP) {
1716#ifdef INET6
1717		if (inp->inp_vflag & INP_IPV6PROTO) {
1718			error = ip6_ctloutput(so, sopt);
1719			/*
1720			 * In case of the IPV6_USE_MIN_MTU socket option,
1721			 * the INC_IPV6MINMTU flag to announce a corresponding
1722			 * MSS during the initial handshake.
1723			 * If the TCP connection is not in the front states,
1724			 * just reduce the MSS being used.
1725			 * This avoids the sending of TCP segments which will
1726			 * be fragmented at the IPv6 layer.
1727			 */
1728			if ((error == 0) &&
1729			    (sopt->sopt_dir == SOPT_SET) &&
1730			    (sopt->sopt_level == IPPROTO_IPV6) &&
1731			    (sopt->sopt_name == IPV6_USE_MIN_MTU)) {
1732				INP_WLOCK(inp);
1733				if ((inp->inp_flags &
1734				    (INP_TIMEWAIT | INP_DROPPED))) {
1735					INP_WUNLOCK(inp);
1736					return (ECONNRESET);
1737				}
1738				inp->inp_inc.inc_flags |= INC_IPV6MINMTU;
1739				tp = intotcpcb(inp);
1740				if ((tp->t_state >= TCPS_SYN_SENT) &&
1741				    (inp->inp_inc.inc_flags & INC_ISIPV6)) {
1742					struct ip6_pktopts *opt;
1743
1744					opt = inp->in6p_outputopts;
1745					if ((opt != NULL) &&
1746					    (opt->ip6po_minmtu ==
1747					    IP6PO_MINMTU_ALL)) {
1748						if (tp->t_maxseg > TCP6_MSS) {
1749							tp->t_maxseg = TCP6_MSS;
1750						}
1751					}
1752				}
1753				INP_WUNLOCK(inp);
1754			}
1755		}
1756#endif /* INET6 */
1757#if defined(INET6) && defined(INET)
1758		else
1759#endif
1760#ifdef INET
1761		{
1762			error = ip_ctloutput(so, sopt);
1763		}
1764#endif
1765		return (error);
1766	}
1767	INP_WLOCK(inp);
1768	if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
1769		INP_WUNLOCK(inp);
1770		return (ECONNRESET);
1771	}
1772	tp = intotcpcb(inp);
1773	/*
1774	 * Protect the TCP option TCP_FUNCTION_BLK so
1775	 * that a sub-function can *never* overwrite this.
1776	 */
1777	if ((sopt->sopt_dir == SOPT_SET) &&
1778	    (sopt->sopt_name == TCP_FUNCTION_BLK)) {
1779		INP_WUNLOCK(inp);
1780		error = sooptcopyin(sopt, &fsn, sizeof fsn,
1781		    sizeof fsn);
1782		if (error)
1783			return (error);
1784		INP_WLOCK_RECHECK(inp);
1785		blk = find_and_ref_tcp_functions(&fsn);
1786		if (blk == NULL) {
1787			INP_WUNLOCK(inp);
1788			return (ENOENT);
1789		}
1790		if (tp->t_fb == blk) {
1791			/* You already have this */
1792			refcount_release(&blk->tfb_refcnt);
1793			INP_WUNLOCK(inp);
1794			return (0);
1795		}
1796		if (tp->t_state != TCPS_CLOSED) {
1797			/*
1798			 * The user has advanced the state
1799			 * past the initial point, we may not
1800			 * be able to switch.
1801			 */
1802			if (blk->tfb_tcp_handoff_ok != NULL) {
1803				/*
1804				 * Does the stack provide a
1805				 * query mechanism, if so it may
1806				 * still be possible?
1807				 */
1808				error = (*blk->tfb_tcp_handoff_ok)(tp);
1809			} else
1810				error = EINVAL;
1811			if (error) {
1812				refcount_release(&blk->tfb_refcnt);
1813				INP_WUNLOCK(inp);
1814				return(error);
1815			}
1816		}
1817		if (blk->tfb_flags & TCP_FUNC_BEING_REMOVED) {
1818			refcount_release(&blk->tfb_refcnt);
1819			INP_WUNLOCK(inp);
1820			return (ENOENT);
1821		}
1822		/*
1823		 * Release the old refcnt, the
1824		 * lookup acquired a ref on the
1825		 * new one already.
1826		 */
1827		if (tp->t_fb->tfb_tcp_fb_fini) {
1828			/*
1829			 * Tell the stack to cleanup with 0 i.e.
1830			 * the tcb is not going away.
1831			 */
1832			(*tp->t_fb->tfb_tcp_fb_fini)(tp, 0);
1833		}
1834#ifdef TCPHPTS
1835		/* Assure that we are not on any hpts */
1836		tcp_hpts_remove(tp->t_inpcb, HPTS_REMOVE_ALL);
1837#endif
1838		if (blk->tfb_tcp_fb_init) {
1839			error = (*blk->tfb_tcp_fb_init)(tp);
1840			if (error) {
1841				refcount_release(&blk->tfb_refcnt);
1842				if (tp->t_fb->tfb_tcp_fb_init) {
1843					if((*tp->t_fb->tfb_tcp_fb_init)(tp) != 0)  {
1844						/* Fall back failed, drop the connection */
1845						INP_WUNLOCK(inp);
1846						soabort(so);
1847						return(error);
1848					}
1849				}
1850				goto err_out;
1851			}
1852		}
1853		refcount_release(&tp->t_fb->tfb_refcnt);
1854		tp->t_fb = blk;
1855#ifdef TCP_OFFLOAD
1856		if (tp->t_flags & TF_TOE) {
1857			tcp_offload_ctloutput(tp, sopt->sopt_dir,
1858			     sopt->sopt_name);
1859		}
1860#endif
1861err_out:
1862		INP_WUNLOCK(inp);
1863		return (error);
1864	} else if ((sopt->sopt_dir == SOPT_GET) &&
1865	    (sopt->sopt_name == TCP_FUNCTION_BLK)) {
1866		strncpy(fsn.function_set_name, tp->t_fb->tfb_tcp_block_name,
1867		    TCP_FUNCTION_NAME_LEN_MAX);
1868		fsn.function_set_name[TCP_FUNCTION_NAME_LEN_MAX - 1] = '\0';
1869		fsn.pcbcnt = tp->t_fb->tfb_refcnt;
1870		INP_WUNLOCK(inp);
1871		error = sooptcopyout(sopt, &fsn, sizeof fsn);
1872		return (error);
1873	}
1874	/* Pass in the INP locked, called must unlock it */
1875	return (tp->t_fb->tfb_tcp_ctloutput(so, sopt, inp, tp));
1876}
1877
1878/*
1879 * If this assert becomes untrue, we need to change the size of the buf
1880 * variable in tcp_default_ctloutput().
1881 */
1882#ifdef CTASSERT
1883CTASSERT(TCP_CA_NAME_MAX <= TCP_LOG_ID_LEN);
1884CTASSERT(TCP_LOG_REASON_LEN <= TCP_LOG_ID_LEN);
1885#endif
1886
1887#ifdef KERN_TLS
1888static int
1889copyin_tls_enable(struct sockopt *sopt, struct tls_enable *tls)
1890{
1891	struct tls_enable_v0 tls_v0;
1892	int error;
1893
1894	if (sopt->sopt_valsize == sizeof(tls_v0)) {
1895		error = sooptcopyin(sopt, &tls_v0, sizeof(tls_v0),
1896		    sizeof(tls_v0));
1897		if (error)
1898			return (error);
1899		memset(tls, 0, sizeof(*tls));
1900		tls->cipher_key = tls_v0.cipher_key;
1901		tls->iv = tls_v0.iv;
1902		tls->auth_key = tls_v0.auth_key;
1903		tls->cipher_algorithm = tls_v0.cipher_algorithm;
1904		tls->cipher_key_len = tls_v0.cipher_key_len;
1905		tls->iv_len = tls_v0.iv_len;
1906		tls->auth_algorithm = tls_v0.auth_algorithm;
1907		tls->auth_key_len = tls_v0.auth_key_len;
1908		tls->flags = tls_v0.flags;
1909		tls->tls_vmajor = tls_v0.tls_vmajor;
1910		tls->tls_vminor = tls_v0.tls_vminor;
1911		return (0);
1912	}
1913
1914	return (sooptcopyin(sopt, tls, sizeof(*tls), sizeof(*tls)));
1915}
1916#endif
1917
1918int
1919tcp_default_ctloutput(struct socket *so, struct sockopt *sopt, struct inpcb *inp, struct tcpcb *tp)
1920{
1921	int	error, opt, optval;
1922	u_int	ui;
1923	struct	tcp_info ti;
1924#ifdef KERN_TLS
1925	struct tls_enable tls;
1926#endif
1927	struct cc_algo *algo;
1928	char	*pbuf, buf[TCP_LOG_ID_LEN];
1929#ifdef STATS
1930	struct statsblob *sbp;
1931#endif
1932	size_t	len;
1933
1934	/*
1935	 * For TCP_CCALGOOPT forward the control to CC module, for both
1936	 * SOPT_SET and SOPT_GET.
1937	 */
1938	switch (sopt->sopt_name) {
1939	case TCP_CCALGOOPT:
1940		INP_WUNLOCK(inp);
1941		if (sopt->sopt_valsize > CC_ALGOOPT_LIMIT)
1942			return (EINVAL);
1943		pbuf = malloc(sopt->sopt_valsize, M_TEMP, M_WAITOK | M_ZERO);
1944		error = sooptcopyin(sopt, pbuf, sopt->sopt_valsize,
1945		    sopt->sopt_valsize);
1946		if (error) {
1947			free(pbuf, M_TEMP);
1948			return (error);
1949		}
1950		INP_WLOCK_RECHECK_CLEANUP(inp, free(pbuf, M_TEMP));
1951		if (CC_ALGO(tp)->ctl_output != NULL)
1952			error = CC_ALGO(tp)->ctl_output(tp->ccv, sopt, pbuf);
1953		else
1954			error = ENOENT;
1955		INP_WUNLOCK(inp);
1956		if (error == 0 && sopt->sopt_dir == SOPT_GET)
1957			error = sooptcopyout(sopt, pbuf, sopt->sopt_valsize);
1958		free(pbuf, M_TEMP);
1959		return (error);
1960	}
1961
1962	switch (sopt->sopt_dir) {
1963	case SOPT_SET:
1964		switch (sopt->sopt_name) {
1965#if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
1966		case TCP_MD5SIG:
1967			if (!TCPMD5_ENABLED()) {
1968				INP_WUNLOCK(inp);
1969				return (ENOPROTOOPT);
1970			}
1971			error = TCPMD5_PCBCTL(inp, sopt);
1972			if (error)
1973				return (error);
1974			goto unlock_and_done;
1975#endif /* IPSEC */
1976
1977		case TCP_NODELAY:
1978		case TCP_NOOPT:
1979			INP_WUNLOCK(inp);
1980			error = sooptcopyin(sopt, &optval, sizeof optval,
1981			    sizeof optval);
1982			if (error)
1983				return (error);
1984
1985			INP_WLOCK_RECHECK(inp);
1986			switch (sopt->sopt_name) {
1987			case TCP_NODELAY:
1988				opt = TF_NODELAY;
1989				break;
1990			case TCP_NOOPT:
1991				opt = TF_NOOPT;
1992				break;
1993			default:
1994				opt = 0; /* dead code to fool gcc */
1995				break;
1996			}
1997
1998			if (optval)
1999				tp->t_flags |= opt;
2000			else
2001				tp->t_flags &= ~opt;
2002unlock_and_done:
2003#ifdef TCP_OFFLOAD
2004			if (tp->t_flags & TF_TOE) {
2005				tcp_offload_ctloutput(tp, sopt->sopt_dir,
2006				    sopt->sopt_name);
2007			}
2008#endif
2009			INP_WUNLOCK(inp);
2010			break;
2011
2012		case TCP_NOPUSH:
2013			INP_WUNLOCK(inp);
2014			error = sooptcopyin(sopt, &optval, sizeof optval,
2015			    sizeof optval);
2016			if (error)
2017				return (error);
2018
2019			INP_WLOCK_RECHECK(inp);
2020			if (optval)
2021				tp->t_flags |= TF_NOPUSH;
2022			else if (tp->t_flags & TF_NOPUSH) {
2023				tp->t_flags &= ~TF_NOPUSH;
2024				if (TCPS_HAVEESTABLISHED(tp->t_state)) {
2025					struct epoch_tracker et;
2026
2027					NET_EPOCH_ENTER(et);
2028					error = tp->t_fb->tfb_tcp_output(tp);
2029					NET_EPOCH_EXIT(et);
2030				}
2031			}
2032			goto unlock_and_done;
2033
2034		case TCP_REMOTE_UDP_ENCAPS_PORT:
2035			INP_WUNLOCK(inp);
2036			error = sooptcopyin(sopt, &optval, sizeof optval,
2037			    sizeof optval);
2038			if (error)
2039				return (error);
2040			if ((optval < TCP_TUNNELING_PORT_MIN) ||
2041			    (optval > TCP_TUNNELING_PORT_MAX)) {
2042				/* Its got to be in range */
2043				return (EINVAL);
2044			}
2045			if ((V_tcp_udp_tunneling_port == 0) && (optval != 0)) {
2046				/* You have to have enabled a UDP tunneling port first */
2047				return (EINVAL);
2048			}
2049			INP_WLOCK_RECHECK(inp);
2050			if (tp->t_state != TCPS_CLOSED) {
2051				/* You can't change after you are connected */
2052				error = EINVAL;
2053			} else {
2054				/* Ok we are all good set the port */
2055				tp->t_port = htons(optval);
2056			}
2057			goto unlock_and_done;
2058
2059		case TCP_MAXSEG:
2060			INP_WUNLOCK(inp);
2061			error = sooptcopyin(sopt, &optval, sizeof optval,
2062			    sizeof optval);
2063			if (error)
2064				return (error);
2065
2066			INP_WLOCK_RECHECK(inp);
2067			if (optval > 0 && optval <= tp->t_maxseg &&
2068			    optval + 40 >= V_tcp_minmss)
2069				tp->t_maxseg = optval;
2070			else
2071				error = EINVAL;
2072			goto unlock_and_done;
2073
2074		case TCP_INFO:
2075			INP_WUNLOCK(inp);
2076			error = EINVAL;
2077			break;
2078
2079		case TCP_STATS:
2080			INP_WUNLOCK(inp);
2081#ifdef STATS
2082			error = sooptcopyin(sopt, &optval, sizeof optval,
2083			    sizeof optval);
2084			if (error)
2085				return (error);
2086
2087			if (optval > 0)
2088				sbp = stats_blob_alloc(
2089				    V_tcp_perconn_stats_dflt_tpl, 0);
2090			else
2091				sbp = NULL;
2092
2093			INP_WLOCK_RECHECK(inp);
2094			if ((tp->t_stats != NULL && sbp == NULL) ||
2095			    (tp->t_stats == NULL && sbp != NULL)) {
2096				struct statsblob *t = tp->t_stats;
2097				tp->t_stats = sbp;
2098				sbp = t;
2099			}
2100			INP_WUNLOCK(inp);
2101
2102			stats_blob_destroy(sbp);
2103#else
2104			return (EOPNOTSUPP);
2105#endif /* !STATS */
2106			break;
2107
2108		case TCP_CONGESTION:
2109			INP_WUNLOCK(inp);
2110			error = sooptcopyin(sopt, buf, TCP_CA_NAME_MAX - 1, 1);
2111			if (error)
2112				break;
2113			buf[sopt->sopt_valsize] = '\0';
2114			INP_WLOCK_RECHECK(inp);
2115			CC_LIST_RLOCK();
2116			STAILQ_FOREACH(algo, &cc_list, entries)
2117				if (strncmp(buf, algo->name,
2118				    TCP_CA_NAME_MAX) == 0)
2119					break;
2120			CC_LIST_RUNLOCK();
2121			if (algo == NULL) {
2122				INP_WUNLOCK(inp);
2123				error = EINVAL;
2124				break;
2125			}
2126			/*
2127			 * We hold a write lock over the tcb so it's safe to
2128			 * do these things without ordering concerns.
2129			 */
2130			if (CC_ALGO(tp)->cb_destroy != NULL)
2131				CC_ALGO(tp)->cb_destroy(tp->ccv);
2132			CC_DATA(tp) = NULL;
2133			CC_ALGO(tp) = algo;
2134			/*
2135			 * If something goes pear shaped initialising the new
2136			 * algo, fall back to newreno (which does not
2137			 * require initialisation).
2138			 */
2139			if (algo->cb_init != NULL &&
2140			    algo->cb_init(tp->ccv) != 0) {
2141				CC_ALGO(tp) = &newreno_cc_algo;
2142				/*
2143				 * The only reason init should fail is
2144				 * because of malloc.
2145				 */
2146				error = ENOMEM;
2147			}
2148			INP_WUNLOCK(inp);
2149			break;
2150
2151		case TCP_REUSPORT_LB_NUMA:
2152			INP_WUNLOCK(inp);
2153			error = sooptcopyin(sopt, &optval, sizeof(optval),
2154			    sizeof(optval));
2155			INP_WLOCK_RECHECK(inp);
2156			if (!error)
2157				error = in_pcblbgroup_numa(inp, optval);
2158			INP_WUNLOCK(inp);
2159			break;
2160
2161#ifdef KERN_TLS
2162		case TCP_TXTLS_ENABLE:
2163			INP_WUNLOCK(inp);
2164			error = copyin_tls_enable(sopt, &tls);
2165			if (error)
2166				break;
2167			error = ktls_enable_tx(so, &tls);
2168			break;
2169		case TCP_TXTLS_MODE:
2170			INP_WUNLOCK(inp);
2171			error = sooptcopyin(sopt, &ui, sizeof(ui), sizeof(ui));
2172			if (error)
2173				return (error);
2174
2175			INP_WLOCK_RECHECK(inp);
2176			error = ktls_set_tx_mode(so, ui);
2177			INP_WUNLOCK(inp);
2178			break;
2179		case TCP_RXTLS_ENABLE:
2180			INP_WUNLOCK(inp);
2181			error = sooptcopyin(sopt, &tls, sizeof(tls),
2182			    sizeof(tls));
2183			if (error)
2184				break;
2185			error = ktls_enable_rx(so, &tls);
2186			break;
2187#endif
2188
2189		case TCP_KEEPIDLE:
2190		case TCP_KEEPINTVL:
2191		case TCP_KEEPINIT:
2192			INP_WUNLOCK(inp);
2193			error = sooptcopyin(sopt, &ui, sizeof(ui), sizeof(ui));
2194			if (error)
2195				return (error);
2196
2197			if (ui > (UINT_MAX / hz)) {
2198				error = EINVAL;
2199				break;
2200			}
2201			ui *= hz;
2202
2203			INP_WLOCK_RECHECK(inp);
2204			switch (sopt->sopt_name) {
2205			case TCP_KEEPIDLE:
2206				tp->t_keepidle = ui;
2207				/*
2208				 * XXX: better check current remaining
2209				 * timeout and "merge" it with new value.
2210				 */
2211				if ((tp->t_state > TCPS_LISTEN) &&
2212				    (tp->t_state <= TCPS_CLOSING))
2213					tcp_timer_activate(tp, TT_KEEP,
2214					    TP_KEEPIDLE(tp));
2215				break;
2216			case TCP_KEEPINTVL:
2217				tp->t_keepintvl = ui;
2218				if ((tp->t_state == TCPS_FIN_WAIT_2) &&
2219				    (TP_MAXIDLE(tp) > 0))
2220					tcp_timer_activate(tp, TT_2MSL,
2221					    TP_MAXIDLE(tp));
2222				break;
2223			case TCP_KEEPINIT:
2224				tp->t_keepinit = ui;
2225				if (tp->t_state == TCPS_SYN_RECEIVED ||
2226				    tp->t_state == TCPS_SYN_SENT)
2227					tcp_timer_activate(tp, TT_KEEP,
2228					    TP_KEEPINIT(tp));
2229				break;
2230			}
2231			goto unlock_and_done;
2232
2233		case TCP_KEEPCNT:
2234			INP_WUNLOCK(inp);
2235			error = sooptcopyin(sopt, &ui, sizeof(ui), sizeof(ui));
2236			if (error)
2237				return (error);
2238
2239			INP_WLOCK_RECHECK(inp);
2240			tp->t_keepcnt = ui;
2241			if ((tp->t_state == TCPS_FIN_WAIT_2) &&
2242			    (TP_MAXIDLE(tp) > 0))
2243				tcp_timer_activate(tp, TT_2MSL,
2244				    TP_MAXIDLE(tp));
2245			goto unlock_and_done;
2246
2247#ifdef TCPPCAP
2248		case TCP_PCAP_OUT:
2249		case TCP_PCAP_IN:
2250			INP_WUNLOCK(inp);
2251			error = sooptcopyin(sopt, &optval, sizeof optval,
2252			    sizeof optval);
2253			if (error)
2254				return (error);
2255
2256			INP_WLOCK_RECHECK(inp);
2257			if (optval >= 0)
2258				tcp_pcap_set_sock_max(TCP_PCAP_OUT ?
2259					&(tp->t_outpkts) : &(tp->t_inpkts),
2260					optval);
2261			else
2262				error = EINVAL;
2263			goto unlock_and_done;
2264#endif
2265
2266		case TCP_FASTOPEN: {
2267			struct tcp_fastopen tfo_optval;
2268
2269			INP_WUNLOCK(inp);
2270			if (!V_tcp_fastopen_client_enable &&
2271			    !V_tcp_fastopen_server_enable)
2272				return (EPERM);
2273
2274			error = sooptcopyin(sopt, &tfo_optval,
2275				    sizeof(tfo_optval), sizeof(int));
2276			if (error)
2277				return (error);
2278
2279			INP_WLOCK_RECHECK(inp);
2280			if ((tp->t_state != TCPS_CLOSED) &&
2281			    (tp->t_state != TCPS_LISTEN)) {
2282				error = EINVAL;
2283				goto unlock_and_done;
2284			}
2285			if (tfo_optval.enable) {
2286				if (tp->t_state == TCPS_LISTEN) {
2287					if (!V_tcp_fastopen_server_enable) {
2288						error = EPERM;
2289						goto unlock_and_done;
2290					}
2291
2292					if (tp->t_tfo_pending == NULL)
2293						tp->t_tfo_pending =
2294						    tcp_fastopen_alloc_counter();
2295				} else {
2296					/*
2297					 * If a pre-shared key was provided,
2298					 * stash it in the client cookie
2299					 * field of the tcpcb for use during
2300					 * connect.
2301					 */
2302					if (sopt->sopt_valsize ==
2303					    sizeof(tfo_optval)) {
2304						memcpy(tp->t_tfo_cookie.client,
2305						       tfo_optval.psk,
2306						       TCP_FASTOPEN_PSK_LEN);
2307						tp->t_tfo_client_cookie_len =
2308						    TCP_FASTOPEN_PSK_LEN;
2309					}
2310				}
2311				tp->t_flags |= TF_FASTOPEN;
2312			} else
2313				tp->t_flags &= ~TF_FASTOPEN;
2314			goto unlock_and_done;
2315		}
2316
2317#ifdef TCP_BLACKBOX
2318		case TCP_LOG:
2319			INP_WUNLOCK(inp);
2320			error = sooptcopyin(sopt, &optval, sizeof optval,
2321			    sizeof optval);
2322			if (error)
2323				return (error);
2324
2325			INP_WLOCK_RECHECK(inp);
2326			error = tcp_log_state_change(tp, optval);
2327			goto unlock_and_done;
2328
2329		case TCP_LOGBUF:
2330			INP_WUNLOCK(inp);
2331			error = EINVAL;
2332			break;
2333
2334		case TCP_LOGID:
2335			INP_WUNLOCK(inp);
2336			error = sooptcopyin(sopt, buf, TCP_LOG_ID_LEN - 1, 0);
2337			if (error)
2338				break;
2339			buf[sopt->sopt_valsize] = '\0';
2340			INP_WLOCK_RECHECK(inp);
2341			error = tcp_log_set_id(tp, buf);
2342			/* tcp_log_set_id() unlocks the INP. */
2343			break;
2344
2345		case TCP_LOGDUMP:
2346		case TCP_LOGDUMPID:
2347			INP_WUNLOCK(inp);
2348			error =
2349			    sooptcopyin(sopt, buf, TCP_LOG_REASON_LEN - 1, 0);
2350			if (error)
2351				break;
2352			buf[sopt->sopt_valsize] = '\0';
2353			INP_WLOCK_RECHECK(inp);
2354			if (sopt->sopt_name == TCP_LOGDUMP) {
2355				error = tcp_log_dump_tp_logbuf(tp, buf,
2356				    M_WAITOK, true);
2357				INP_WUNLOCK(inp);
2358			} else {
2359				tcp_log_dump_tp_bucket_logbufs(tp, buf);
2360				/*
2361				 * tcp_log_dump_tp_bucket_logbufs() drops the
2362				 * INP lock.
2363				 */
2364			}
2365			break;
2366#endif
2367
2368		default:
2369			INP_WUNLOCK(inp);
2370			error = ENOPROTOOPT;
2371			break;
2372		}
2373		break;
2374
2375	case SOPT_GET:
2376		tp = intotcpcb(inp);
2377		switch (sopt->sopt_name) {
2378#if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE)
2379		case TCP_MD5SIG:
2380			if (!TCPMD5_ENABLED()) {
2381				INP_WUNLOCK(inp);
2382				return (ENOPROTOOPT);
2383			}
2384			error = TCPMD5_PCBCTL(inp, sopt);
2385			break;
2386#endif
2387
2388		case TCP_NODELAY:
2389			optval = tp->t_flags & TF_NODELAY;
2390			INP_WUNLOCK(inp);
2391			error = sooptcopyout(sopt, &optval, sizeof optval);
2392			break;
2393		case TCP_MAXSEG:
2394			optval = tp->t_maxseg;
2395			INP_WUNLOCK(inp);
2396			error = sooptcopyout(sopt, &optval, sizeof optval);
2397			break;
2398		case TCP_REMOTE_UDP_ENCAPS_PORT:
2399			optval = ntohs(tp->t_port);
2400			INP_WUNLOCK(inp);
2401			error = sooptcopyout(sopt, &optval, sizeof optval);
2402			break;
2403		case TCP_NOOPT:
2404			optval = tp->t_flags & TF_NOOPT;
2405			INP_WUNLOCK(inp);
2406			error = sooptcopyout(sopt, &optval, sizeof optval);
2407			break;
2408		case TCP_NOPUSH:
2409			optval = tp->t_flags & TF_NOPUSH;
2410			INP_WUNLOCK(inp);
2411			error = sooptcopyout(sopt, &optval, sizeof optval);
2412			break;
2413		case TCP_INFO:
2414			tcp_fill_info(tp, &ti);
2415			INP_WUNLOCK(inp);
2416			error = sooptcopyout(sopt, &ti, sizeof ti);
2417			break;
2418		case TCP_STATS:
2419			{
2420#ifdef STATS
2421			int nheld;
2422			TYPEOF_MEMBER(struct statsblob, flags) sbflags = 0;
2423
2424			error = 0;
2425			socklen_t outsbsz = sopt->sopt_valsize;
2426			if (tp->t_stats == NULL)
2427				error = ENOENT;
2428			else if (outsbsz >= tp->t_stats->cursz)
2429				outsbsz = tp->t_stats->cursz;
2430			else if (outsbsz >= sizeof(struct statsblob))
2431				outsbsz = sizeof(struct statsblob);
2432			else
2433				error = EINVAL;
2434			INP_WUNLOCK(inp);
2435			if (error)
2436				break;
2437
2438			sbp = sopt->sopt_val;
2439			nheld = atop(round_page(((vm_offset_t)sbp) +
2440			    (vm_size_t)outsbsz) - trunc_page((vm_offset_t)sbp));
2441			vm_page_t ma[nheld];
2442			if (vm_fault_quick_hold_pages(
2443			    &curproc->p_vmspace->vm_map, (vm_offset_t)sbp,
2444			    outsbsz, VM_PROT_READ | VM_PROT_WRITE, ma,
2445			    nheld) < 0) {
2446				error = EFAULT;
2447				break;
2448			}
2449
2450			if ((error = copyin_nofault(&(sbp->flags), &sbflags,
2451			    SIZEOF_MEMBER(struct statsblob, flags))))
2452				goto unhold;
2453
2454			INP_WLOCK_RECHECK(inp);
2455			error = stats_blob_snapshot(&sbp, outsbsz, tp->t_stats,
2456			    sbflags | SB_CLONE_USRDSTNOFAULT);
2457			INP_WUNLOCK(inp);
2458			sopt->sopt_valsize = outsbsz;
2459unhold:
2460			vm_page_unhold_pages(ma, nheld);
2461#else
2462			INP_WUNLOCK(inp);
2463			error = EOPNOTSUPP;
2464#endif /* !STATS */
2465			break;
2466			}
2467		case TCP_CONGESTION:
2468			len = strlcpy(buf, CC_ALGO(tp)->name, TCP_CA_NAME_MAX);
2469			INP_WUNLOCK(inp);
2470			error = sooptcopyout(sopt, buf, len + 1);
2471			break;
2472		case TCP_KEEPIDLE:
2473		case TCP_KEEPINTVL:
2474		case TCP_KEEPINIT:
2475		case TCP_KEEPCNT:
2476			switch (sopt->sopt_name) {
2477			case TCP_KEEPIDLE:
2478				ui = TP_KEEPIDLE(tp) / hz;
2479				break;
2480			case TCP_KEEPINTVL:
2481				ui = TP_KEEPINTVL(tp) / hz;
2482				break;
2483			case TCP_KEEPINIT:
2484				ui = TP_KEEPINIT(tp) / hz;
2485				break;
2486			case TCP_KEEPCNT:
2487				ui = TP_KEEPCNT(tp);
2488				break;
2489			}
2490			INP_WUNLOCK(inp);
2491			error = sooptcopyout(sopt, &ui, sizeof(ui));
2492			break;
2493#ifdef TCPPCAP
2494		case TCP_PCAP_OUT:
2495		case TCP_PCAP_IN:
2496			optval = tcp_pcap_get_sock_max(TCP_PCAP_OUT ?
2497					&(tp->t_outpkts) : &(tp->t_inpkts));
2498			INP_WUNLOCK(inp);
2499			error = sooptcopyout(sopt, &optval, sizeof optval);
2500			break;
2501#endif
2502		case TCP_FASTOPEN:
2503			optval = tp->t_flags & TF_FASTOPEN;
2504			INP_WUNLOCK(inp);
2505			error = sooptcopyout(sopt, &optval, sizeof optval);
2506			break;
2507#ifdef TCP_BLACKBOX
2508		case TCP_LOG:
2509			optval = tp->t_logstate;
2510			INP_WUNLOCK(inp);
2511			error = sooptcopyout(sopt, &optval, sizeof(optval));
2512			break;
2513		case TCP_LOGBUF:
2514			/* tcp_log_getlogbuf() does INP_WUNLOCK(inp) */
2515			error = tcp_log_getlogbuf(sopt, tp);
2516			break;
2517		case TCP_LOGID:
2518			len = tcp_log_get_id(tp, buf);
2519			INP_WUNLOCK(inp);
2520			error = sooptcopyout(sopt, buf, len + 1);
2521			break;
2522		case TCP_LOGDUMP:
2523		case TCP_LOGDUMPID:
2524			INP_WUNLOCK(inp);
2525			error = EINVAL;
2526			break;
2527#endif
2528#ifdef KERN_TLS
2529		case TCP_TXTLS_MODE:
2530			optval = ktls_get_tx_mode(so);
2531			INP_WUNLOCK(inp);
2532			error = sooptcopyout(sopt, &optval, sizeof(optval));
2533			break;
2534		case TCP_RXTLS_MODE:
2535			optval = ktls_get_rx_mode(so);
2536			INP_WUNLOCK(inp);
2537			error = sooptcopyout(sopt, &optval, sizeof(optval));
2538			break;
2539#endif
2540		default:
2541			INP_WUNLOCK(inp);
2542			error = ENOPROTOOPT;
2543			break;
2544		}
2545		break;
2546	}
2547	return (error);
2548}
2549#undef INP_WLOCK_RECHECK
2550#undef INP_WLOCK_RECHECK_CLEANUP
2551
2552/*
2553 * Initiate (or continue) disconnect.
2554 * If embryonic state, just send reset (once).
2555 * If in ``let data drain'' option and linger null, just drop.
2556 * Otherwise (hard), mark socket disconnecting and drop
2557 * current input data; switch states based on user close, and
2558 * send segment to peer (with FIN).
2559 */
2560static void
2561tcp_disconnect(struct tcpcb *tp)
2562{
2563	struct inpcb *inp = tp->t_inpcb;
2564	struct socket *so = inp->inp_socket;
2565
2566	NET_EPOCH_ASSERT();
2567	INP_WLOCK_ASSERT(inp);
2568
2569	/*
2570	 * Neither tcp_close() nor tcp_drop() should return NULL, as the
2571	 * socket is still open.
2572	 */
2573	if (tp->t_state < TCPS_ESTABLISHED &&
2574	    !(tp->t_state > TCPS_LISTEN && IS_FASTOPEN(tp->t_flags))) {
2575		tp = tcp_close(tp);
2576		KASSERT(tp != NULL,
2577		    ("tcp_disconnect: tcp_close() returned NULL"));
2578	} else if ((so->so_options & SO_LINGER) && so->so_linger == 0) {
2579		tp = tcp_drop(tp, 0);
2580		KASSERT(tp != NULL,
2581		    ("tcp_disconnect: tcp_drop() returned NULL"));
2582	} else {
2583		soisdisconnecting(so);
2584		sbflush(&so->so_rcv);
2585		tcp_usrclosed(tp);
2586		if (!(inp->inp_flags & INP_DROPPED))
2587			tp->t_fb->tfb_tcp_output(tp);
2588	}
2589}
2590
2591/*
2592 * User issued close, and wish to trail through shutdown states:
2593 * if never received SYN, just forget it.  If got a SYN from peer,
2594 * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN.
2595 * If already got a FIN from peer, then almost done; go to LAST_ACK
2596 * state.  In all other cases, have already sent FIN to peer (e.g.
2597 * after PRU_SHUTDOWN), and just have to play tedious game waiting
2598 * for peer to send FIN or not respond to keep-alives, etc.
2599 * We can let the user exit from the close as soon as the FIN is acked.
2600 */
2601static void
2602tcp_usrclosed(struct tcpcb *tp)
2603{
2604
2605	NET_EPOCH_ASSERT();
2606	INP_WLOCK_ASSERT(tp->t_inpcb);
2607
2608	switch (tp->t_state) {
2609	case TCPS_LISTEN:
2610#ifdef TCP_OFFLOAD
2611		tcp_offload_listen_stop(tp);
2612#endif
2613		tcp_state_change(tp, TCPS_CLOSED);
2614		/* FALLTHROUGH */
2615	case TCPS_CLOSED:
2616		tp = tcp_close(tp);
2617		/*
2618		 * tcp_close() should never return NULL here as the socket is
2619		 * still open.
2620		 */
2621		KASSERT(tp != NULL,
2622		    ("tcp_usrclosed: tcp_close() returned NULL"));
2623		break;
2624
2625	case TCPS_SYN_SENT:
2626	case TCPS_SYN_RECEIVED:
2627		tp->t_flags |= TF_NEEDFIN;
2628		break;
2629
2630	case TCPS_ESTABLISHED:
2631		tcp_state_change(tp, TCPS_FIN_WAIT_1);
2632		break;
2633
2634	case TCPS_CLOSE_WAIT:
2635		tcp_state_change(tp, TCPS_LAST_ACK);
2636		break;
2637	}
2638	if (tp->t_state >= TCPS_FIN_WAIT_2) {
2639		soisdisconnected(tp->t_inpcb->inp_socket);
2640		/* Prevent the connection hanging in FIN_WAIT_2 forever. */
2641		if (tp->t_state == TCPS_FIN_WAIT_2) {
2642			int timeout;
2643
2644			timeout = (tcp_fast_finwait2_recycle) ?
2645			    tcp_finwait2_timeout : TP_MAXIDLE(tp);
2646			tcp_timer_activate(tp, TT_2MSL, timeout);
2647		}
2648	}
2649}
2650
2651#ifdef DDB
2652static void
2653db_print_indent(int indent)
2654{
2655	int i;
2656
2657	for (i = 0; i < indent; i++)
2658		db_printf(" ");
2659}
2660
2661static void
2662db_print_tstate(int t_state)
2663{
2664
2665	switch (t_state) {
2666	case TCPS_CLOSED:
2667		db_printf("TCPS_CLOSED");
2668		return;
2669
2670	case TCPS_LISTEN:
2671		db_printf("TCPS_LISTEN");
2672		return;
2673
2674	case TCPS_SYN_SENT:
2675		db_printf("TCPS_SYN_SENT");
2676		return;
2677
2678	case TCPS_SYN_RECEIVED:
2679		db_printf("TCPS_SYN_RECEIVED");
2680		return;
2681
2682	case TCPS_ESTABLISHED:
2683		db_printf("TCPS_ESTABLISHED");
2684		return;
2685
2686	case TCPS_CLOSE_WAIT:
2687		db_printf("TCPS_CLOSE_WAIT");
2688		return;
2689
2690	case TCPS_FIN_WAIT_1:
2691		db_printf("TCPS_FIN_WAIT_1");
2692		return;
2693
2694	case TCPS_CLOSING:
2695		db_printf("TCPS_CLOSING");
2696		return;
2697
2698	case TCPS_LAST_ACK:
2699		db_printf("TCPS_LAST_ACK");
2700		return;
2701
2702	case TCPS_FIN_WAIT_2:
2703		db_printf("TCPS_FIN_WAIT_2");
2704		return;
2705
2706	case TCPS_TIME_WAIT:
2707		db_printf("TCPS_TIME_WAIT");
2708		return;
2709
2710	default:
2711		db_printf("unknown");
2712		return;
2713	}
2714}
2715
2716static void
2717db_print_tflags(u_int t_flags)
2718{
2719	int comma;
2720
2721	comma = 0;
2722	if (t_flags & TF_ACKNOW) {
2723		db_printf("%sTF_ACKNOW", comma ? ", " : "");
2724		comma = 1;
2725	}
2726	if (t_flags & TF_DELACK) {
2727		db_printf("%sTF_DELACK", comma ? ", " : "");
2728		comma = 1;
2729	}
2730	if (t_flags & TF_NODELAY) {
2731		db_printf("%sTF_NODELAY", comma ? ", " : "");
2732		comma = 1;
2733	}
2734	if (t_flags & TF_NOOPT) {
2735		db_printf("%sTF_NOOPT", comma ? ", " : "");
2736		comma = 1;
2737	}
2738	if (t_flags & TF_SENTFIN) {
2739		db_printf("%sTF_SENTFIN", comma ? ", " : "");
2740		comma = 1;
2741	}
2742	if (t_flags & TF_REQ_SCALE) {
2743		db_printf("%sTF_REQ_SCALE", comma ? ", " : "");
2744		comma = 1;
2745	}
2746	if (t_flags & TF_RCVD_SCALE) {
2747		db_printf("%sTF_RECVD_SCALE", comma ? ", " : "");
2748		comma = 1;
2749	}
2750	if (t_flags & TF_REQ_TSTMP) {
2751		db_printf("%sTF_REQ_TSTMP", comma ? ", " : "");
2752		comma = 1;
2753	}
2754	if (t_flags & TF_RCVD_TSTMP) {
2755		db_printf("%sTF_RCVD_TSTMP", comma ? ", " : "");
2756		comma = 1;
2757	}
2758	if (t_flags & TF_SACK_PERMIT) {
2759		db_printf("%sTF_SACK_PERMIT", comma ? ", " : "");
2760		comma = 1;
2761	}
2762	if (t_flags & TF_NEEDSYN) {
2763		db_printf("%sTF_NEEDSYN", comma ? ", " : "");
2764		comma = 1;
2765	}
2766	if (t_flags & TF_NEEDFIN) {
2767		db_printf("%sTF_NEEDFIN", comma ? ", " : "");
2768		comma = 1;
2769	}
2770	if (t_flags & TF_NOPUSH) {
2771		db_printf("%sTF_NOPUSH", comma ? ", " : "");
2772		comma = 1;
2773	}
2774	if (t_flags & TF_MORETOCOME) {
2775		db_printf("%sTF_MORETOCOME", comma ? ", " : "");
2776		comma = 1;
2777	}
2778	if (t_flags & TF_LQ_OVERFLOW) {
2779		db_printf("%sTF_LQ_OVERFLOW", comma ? ", " : "");
2780		comma = 1;
2781	}
2782	if (t_flags & TF_LASTIDLE) {
2783		db_printf("%sTF_LASTIDLE", comma ? ", " : "");
2784		comma = 1;
2785	}
2786	if (t_flags & TF_RXWIN0SENT) {
2787		db_printf("%sTF_RXWIN0SENT", comma ? ", " : "");
2788		comma = 1;
2789	}
2790	if (t_flags & TF_FASTRECOVERY) {
2791		db_printf("%sTF_FASTRECOVERY", comma ? ", " : "");
2792		comma = 1;
2793	}
2794	if (t_flags & TF_CONGRECOVERY) {
2795		db_printf("%sTF_CONGRECOVERY", comma ? ", " : "");
2796		comma = 1;
2797	}
2798	if (t_flags & TF_WASFRECOVERY) {
2799		db_printf("%sTF_WASFRECOVERY", comma ? ", " : "");
2800		comma = 1;
2801	}
2802	if (t_flags & TF_SIGNATURE) {
2803		db_printf("%sTF_SIGNATURE", comma ? ", " : "");
2804		comma = 1;
2805	}
2806	if (t_flags & TF_FORCEDATA) {
2807		db_printf("%sTF_FORCEDATA", comma ? ", " : "");
2808		comma = 1;
2809	}
2810	if (t_flags & TF_TSO) {
2811		db_printf("%sTF_TSO", comma ? ", " : "");
2812		comma = 1;
2813	}
2814	if (t_flags & TF_FASTOPEN) {
2815		db_printf("%sTF_FASTOPEN", comma ? ", " : "");
2816		comma = 1;
2817	}
2818}
2819
2820static void
2821db_print_tflags2(u_int t_flags2)
2822{
2823	int comma;
2824
2825	comma = 0;
2826	if (t_flags2 & TF2_ECN_PERMIT) {
2827		db_printf("%sTF2_ECN_PERMIT", comma ? ", " : "");
2828		comma = 1;
2829	}
2830}
2831
2832static void
2833db_print_toobflags(char t_oobflags)
2834{
2835	int comma;
2836
2837	comma = 0;
2838	if (t_oobflags & TCPOOB_HAVEDATA) {
2839		db_printf("%sTCPOOB_HAVEDATA", comma ? ", " : "");
2840		comma = 1;
2841	}
2842	if (t_oobflags & TCPOOB_HADDATA) {
2843		db_printf("%sTCPOOB_HADDATA", comma ? ", " : "");
2844		comma = 1;
2845	}
2846}
2847
2848static void
2849db_print_tcpcb(struct tcpcb *tp, const char *name, int indent)
2850{
2851
2852	db_print_indent(indent);
2853	db_printf("%s at %p\n", name, tp);
2854
2855	indent += 2;
2856
2857	db_print_indent(indent);
2858	db_printf("t_segq first: %p   t_segqlen: %d   t_dupacks: %d\n",
2859	   TAILQ_FIRST(&tp->t_segq), tp->t_segqlen, tp->t_dupacks);
2860
2861	db_print_indent(indent);
2862	db_printf("tt_rexmt: %p   tt_persist: %p   tt_keep: %p\n",
2863	    &tp->t_timers->tt_rexmt, &tp->t_timers->tt_persist, &tp->t_timers->tt_keep);
2864
2865	db_print_indent(indent);
2866	db_printf("tt_2msl: %p   tt_delack: %p   t_inpcb: %p\n", &tp->t_timers->tt_2msl,
2867	    &tp->t_timers->tt_delack, tp->t_inpcb);
2868
2869	db_print_indent(indent);
2870	db_printf("t_state: %d (", tp->t_state);
2871	db_print_tstate(tp->t_state);
2872	db_printf(")\n");
2873
2874	db_print_indent(indent);
2875	db_printf("t_flags: 0x%x (", tp->t_flags);
2876	db_print_tflags(tp->t_flags);
2877	db_printf(")\n");
2878
2879	db_print_indent(indent);
2880	db_printf("t_flags2: 0x%x (", tp->t_flags2);
2881	db_print_tflags2(tp->t_flags2);
2882	db_printf(")\n");
2883
2884	db_print_indent(indent);
2885	db_printf("snd_una: 0x%08x   snd_max: 0x%08x   snd_nxt: x0%08x\n",
2886	    tp->snd_una, tp->snd_max, tp->snd_nxt);
2887
2888	db_print_indent(indent);
2889	db_printf("snd_up: 0x%08x   snd_wl1: 0x%08x   snd_wl2: 0x%08x\n",
2890	   tp->snd_up, tp->snd_wl1, tp->snd_wl2);
2891
2892	db_print_indent(indent);
2893	db_printf("iss: 0x%08x   irs: 0x%08x   rcv_nxt: 0x%08x\n",
2894	    tp->iss, tp->irs, tp->rcv_nxt);
2895
2896	db_print_indent(indent);
2897	db_printf("rcv_adv: 0x%08x   rcv_wnd: %u   rcv_up: 0x%08x\n",
2898	    tp->rcv_adv, tp->rcv_wnd, tp->rcv_up);
2899
2900	db_print_indent(indent);
2901	db_printf("snd_wnd: %u   snd_cwnd: %u\n",
2902	   tp->snd_wnd, tp->snd_cwnd);
2903
2904	db_print_indent(indent);
2905	db_printf("snd_ssthresh: %u   snd_recover: "
2906	    "0x%08x\n", tp->snd_ssthresh, tp->snd_recover);
2907
2908	db_print_indent(indent);
2909	db_printf("t_rcvtime: %u   t_startime: %u\n",
2910	    tp->t_rcvtime, tp->t_starttime);
2911
2912	db_print_indent(indent);
2913	db_printf("t_rttime: %u   t_rtsq: 0x%08x\n",
2914	    tp->t_rtttime, tp->t_rtseq);
2915
2916	db_print_indent(indent);
2917	db_printf("t_rxtcur: %d   t_maxseg: %u   t_srtt: %d\n",
2918	    tp->t_rxtcur, tp->t_maxseg, tp->t_srtt);
2919
2920	db_print_indent(indent);
2921	db_printf("t_rttvar: %d   t_rxtshift: %d   t_rttmin: %u   "
2922	    "t_rttbest: %u\n", tp->t_rttvar, tp->t_rxtshift, tp->t_rttmin,
2923	    tp->t_rttbest);
2924
2925	db_print_indent(indent);
2926	db_printf("t_rttupdated: %lu   max_sndwnd: %u   t_softerror: %d\n",
2927	    tp->t_rttupdated, tp->max_sndwnd, tp->t_softerror);
2928
2929	db_print_indent(indent);
2930	db_printf("t_oobflags: 0x%x (", tp->t_oobflags);
2931	db_print_toobflags(tp->t_oobflags);
2932	db_printf(")   t_iobc: 0x%02x\n", tp->t_iobc);
2933
2934	db_print_indent(indent);
2935	db_printf("snd_scale: %u   rcv_scale: %u   request_r_scale: %u\n",
2936	    tp->snd_scale, tp->rcv_scale, tp->request_r_scale);
2937
2938	db_print_indent(indent);
2939	db_printf("ts_recent: %u   ts_recent_age: %u\n",
2940	    tp->ts_recent, tp->ts_recent_age);
2941
2942	db_print_indent(indent);
2943	db_printf("ts_offset: %u   last_ack_sent: 0x%08x   snd_cwnd_prev: "
2944	    "%u\n", tp->ts_offset, tp->last_ack_sent, tp->snd_cwnd_prev);
2945
2946	db_print_indent(indent);
2947	db_printf("snd_ssthresh_prev: %u   snd_recover_prev: 0x%08x   "
2948	    "t_badrxtwin: %u\n", tp->snd_ssthresh_prev,
2949	    tp->snd_recover_prev, tp->t_badrxtwin);
2950
2951	db_print_indent(indent);
2952	db_printf("snd_numholes: %d  snd_holes first: %p\n",
2953	    tp->snd_numholes, TAILQ_FIRST(&tp->snd_holes));
2954
2955	db_print_indent(indent);
2956	db_printf("snd_fack: 0x%08x   rcv_numsacks: %d\n",
2957	    tp->snd_fack, tp->rcv_numsacks);
2958
2959	/* Skip sackblks, sackhint. */
2960
2961	db_print_indent(indent);
2962	db_printf("t_rttlow: %d   rfbuf_ts: %u   rfbuf_cnt: %d\n",
2963	    tp->t_rttlow, tp->rfbuf_ts, tp->rfbuf_cnt);
2964}
2965
2966DB_SHOW_COMMAND(tcpcb, db_show_tcpcb)
2967{
2968	struct tcpcb *tp;
2969
2970	if (!have_addr) {
2971		db_printf("usage: show tcpcb <addr>\n");
2972		return;
2973	}
2974	tp = (struct tcpcb *)addr;
2975
2976	db_print_tcpcb(tp, "tcpcb", 0);
2977}
2978#endif
2979