1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993, 1995
3 *	The Regents of the University of California.
4 * Copyright (c) 2007-2009 Robert N. M. Watson
5 * Copyright (c) 2010-2011 Juniper Networks, Inc.
6 * All rights reserved.
7 *
8 * Portions of this software were developed by Robert N. M. Watson under
9 * contract to Juniper Networks, Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 4. Neither the name of the University nor the names of its contributors
20 *    may be used to endorse or promote products derived from this software
21 *    without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 *	@(#)in_pcb.c	8.4 (Berkeley) 5/24/95
36 */
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD$");
40
41#include "opt_ddb.h"
42#include "opt_ipsec.h"
43#include "opt_inet.h"
44#include "opt_inet6.h"
45#include "opt_pcbgroup.h"
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/malloc.h>
50#include <sys/mbuf.h>
51#include <sys/callout.h>
52#include <sys/domain.h>
53#include <sys/protosw.h>
54#include <sys/socket.h>
55#include <sys/socketvar.h>
56#include <sys/priv.h>
57#include <sys/proc.h>
58#include <sys/refcount.h>
59#include <sys/jail.h>
60#include <sys/kernel.h>
61#include <sys/sysctl.h>
62
63#ifdef DDB
64#include <ddb/ddb.h>
65#endif
66
67#include <vm/uma.h>
68
69#include <net/if.h>
70#include <net/if_types.h>
71#include <net/route.h>
72#include <net/vnet.h>
73
74#if defined(INET) || defined(INET6)
75#include <netinet/in.h>
76#include <netinet/in_pcb.h>
77#include <netinet/ip_var.h>
78#include <netinet/tcp_var.h>
79#include <netinet/udp.h>
80#include <netinet/udp_var.h>
81#endif
82#ifdef INET
83#include <netinet/in_var.h>
84#endif
85#ifdef INET6
86#include <netinet/ip6.h>
87#include <netinet6/in6_pcb.h>
88#include <netinet6/in6_var.h>
89#include <netinet6/ip6_var.h>
90#endif /* INET6 */
91
92
93#ifdef IPSEC
94#include <netipsec/ipsec.h>
95#include <netipsec/key.h>
96#endif /* IPSEC */
97
98#include <security/mac/mac_framework.h>
99
100static struct callout	ipport_tick_callout;
101
102/*
103 * These configure the range of local port addresses assigned to
104 * "unspecified" outgoing connections/packets/whatever.
105 */
106VNET_DEFINE(int, ipport_lowfirstauto) = IPPORT_RESERVED - 1;	/* 1023 */
107VNET_DEFINE(int, ipport_lowlastauto) = IPPORT_RESERVEDSTART;	/* 600 */
108VNET_DEFINE(int, ipport_firstauto) = IPPORT_EPHEMERALFIRST;	/* 10000 */
109VNET_DEFINE(int, ipport_lastauto) = IPPORT_EPHEMERALLAST;	/* 65535 */
110VNET_DEFINE(int, ipport_hifirstauto) = IPPORT_HIFIRSTAUTO;	/* 49152 */
111VNET_DEFINE(int, ipport_hilastauto) = IPPORT_HILASTAUTO;	/* 65535 */
112
113/*
114 * Reserved ports accessible only to root. There are significant
115 * security considerations that must be accounted for when changing these,
116 * but the security benefits can be great. Please be careful.
117 */
118VNET_DEFINE(int, ipport_reservedhigh) = IPPORT_RESERVED - 1;	/* 1023 */
119VNET_DEFINE(int, ipport_reservedlow);
120
121/* Variables dealing with random ephemeral port allocation. */
122VNET_DEFINE(int, ipport_randomized) = 1;	/* user controlled via sysctl */
123VNET_DEFINE(int, ipport_randomcps) = 10;	/* user controlled via sysctl */
124VNET_DEFINE(int, ipport_randomtime) = 45;	/* user controlled via sysctl */
125VNET_DEFINE(int, ipport_stoprandom);		/* toggled by ipport_tick */
126VNET_DEFINE(int, ipport_tcpallocs);
127static VNET_DEFINE(int, ipport_tcplastcount);
128
129#define	V_ipport_tcplastcount		VNET(ipport_tcplastcount)
130
131static void	in_pcbremlists(struct inpcb *inp);
132#ifdef INET
133static struct inpcb	*in_pcblookup_hash_locked(struct inpcbinfo *pcbinfo,
134			    struct in_addr faddr, u_int fport_arg,
135			    struct in_addr laddr, u_int lport_arg,
136			    int lookupflags, struct ifnet *ifp);
137
138#define RANGECHK(var, min, max) \
139	if ((var) < (min)) { (var) = (min); } \
140	else if ((var) > (max)) { (var) = (max); }
141
142static int
143sysctl_net_ipport_check(SYSCTL_HANDLER_ARGS)
144{
145	int error;
146
147#ifdef VIMAGE
148	error = vnet_sysctl_handle_int(oidp, arg1, arg2, req);
149#else
150	error = sysctl_handle_int(oidp, arg1, arg2, req);
151#endif
152	if (error == 0) {
153		RANGECHK(V_ipport_lowfirstauto, 1, IPPORT_RESERVED - 1);
154		RANGECHK(V_ipport_lowlastauto, 1, IPPORT_RESERVED - 1);
155		RANGECHK(V_ipport_firstauto, IPPORT_RESERVED, IPPORT_MAX);
156		RANGECHK(V_ipport_lastauto, IPPORT_RESERVED, IPPORT_MAX);
157		RANGECHK(V_ipport_hifirstauto, IPPORT_RESERVED, IPPORT_MAX);
158		RANGECHK(V_ipport_hilastauto, IPPORT_RESERVED, IPPORT_MAX);
159	}
160	return (error);
161}
162
163#undef RANGECHK
164
165static SYSCTL_NODE(_net_inet_ip, IPPROTO_IP, portrange, CTLFLAG_RW, 0,
166    "IP Ports");
167
168SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, lowfirst,
169	CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_lowfirstauto), 0,
170	&sysctl_net_ipport_check, "I", "");
171SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, lowlast,
172	CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_lowlastauto), 0,
173	&sysctl_net_ipport_check, "I", "");
174SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, first,
175	CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_firstauto), 0,
176	&sysctl_net_ipport_check, "I", "");
177SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, last,
178	CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_lastauto), 0,
179	&sysctl_net_ipport_check, "I", "");
180SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, hifirst,
181	CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_hifirstauto), 0,
182	&sysctl_net_ipport_check, "I", "");
183SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, hilast,
184	CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_hilastauto), 0,
185	&sysctl_net_ipport_check, "I", "");
186SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, reservedhigh,
187	CTLFLAG_RW|CTLFLAG_SECURE, &VNET_NAME(ipport_reservedhigh), 0, "");
188SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, reservedlow,
189	CTLFLAG_RW|CTLFLAG_SECURE, &VNET_NAME(ipport_reservedlow), 0, "");
190SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, randomized, CTLFLAG_RW,
191	&VNET_NAME(ipport_randomized), 0, "Enable random port allocation");
192SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, randomcps, CTLFLAG_RW,
193	&VNET_NAME(ipport_randomcps), 0, "Maximum number of random port "
194	"allocations before switching to a sequental one");
195SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, randomtime, CTLFLAG_RW,
196	&VNET_NAME(ipport_randomtime), 0,
197	"Minimum time to keep sequental port "
198	"allocation before switching to a random one");
199#endif /* INET */
200
201/*
202 * in_pcb.c: manage the Protocol Control Blocks.
203 *
204 * NOTE: It is assumed that most of these functions will be called with
205 * the pcbinfo lock held, and often, the inpcb lock held, as these utility
206 * functions often modify hash chains or addresses in pcbs.
207 */
208
209/*
210 * Initialize an inpcbinfo -- we should be able to reduce the number of
211 * arguments in time.
212 */
213void
214in_pcbinfo_init(struct inpcbinfo *pcbinfo, const char *name,
215    struct inpcbhead *listhead, int hash_nelements, int porthash_nelements,
216    char *inpcbzone_name, uma_init inpcbzone_init, uma_fini inpcbzone_fini,
217    uint32_t inpcbzone_flags, u_int hashfields)
218{
219
220	INP_INFO_LOCK_INIT(pcbinfo, name);
221	INP_HASH_LOCK_INIT(pcbinfo, "pcbinfohash");	/* XXXRW: argument? */
222#ifdef VIMAGE
223	pcbinfo->ipi_vnet = curvnet;
224#endif
225	pcbinfo->ipi_listhead = listhead;
226	LIST_INIT(pcbinfo->ipi_listhead);
227	pcbinfo->ipi_count = 0;
228	pcbinfo->ipi_hashbase = hashinit(hash_nelements, M_PCB,
229	    &pcbinfo->ipi_hashmask);
230	pcbinfo->ipi_porthashbase = hashinit(porthash_nelements, M_PCB,
231	    &pcbinfo->ipi_porthashmask);
232#ifdef PCBGROUP
233	in_pcbgroup_init(pcbinfo, hashfields, hash_nelements);
234#endif
235	pcbinfo->ipi_zone = uma_zcreate(inpcbzone_name, sizeof(struct inpcb),
236	    NULL, NULL, inpcbzone_init, inpcbzone_fini, UMA_ALIGN_PTR,
237	    inpcbzone_flags);
238	uma_zone_set_max(pcbinfo->ipi_zone, maxsockets);
239	uma_zone_set_warning(pcbinfo->ipi_zone,
240	    "kern.ipc.maxsockets limit reached");
241}
242
243/*
244 * Destroy an inpcbinfo.
245 */
246void
247in_pcbinfo_destroy(struct inpcbinfo *pcbinfo)
248{
249
250	KASSERT(pcbinfo->ipi_count == 0,
251	    ("%s: ipi_count = %u", __func__, pcbinfo->ipi_count));
252
253	hashdestroy(pcbinfo->ipi_hashbase, M_PCB, pcbinfo->ipi_hashmask);
254	hashdestroy(pcbinfo->ipi_porthashbase, M_PCB,
255	    pcbinfo->ipi_porthashmask);
256#ifdef PCBGROUP
257	in_pcbgroup_destroy(pcbinfo);
258#endif
259	uma_zdestroy(pcbinfo->ipi_zone);
260	INP_HASH_LOCK_DESTROY(pcbinfo);
261	INP_INFO_LOCK_DESTROY(pcbinfo);
262}
263
264/*
265 * Allocate a PCB and associate it with the socket.
266 * On success return with the PCB locked.
267 */
268int
269in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo)
270{
271	struct inpcb *inp;
272	int error;
273
274	INP_INFO_WLOCK_ASSERT(pcbinfo);
275	error = 0;
276	inp = uma_zalloc(pcbinfo->ipi_zone, M_NOWAIT);
277	if (inp == NULL)
278		return (ENOBUFS);
279	bzero(inp, inp_zero_size);
280	inp->inp_pcbinfo = pcbinfo;
281	inp->inp_socket = so;
282	inp->inp_cred = crhold(so->so_cred);
283	inp->inp_inc.inc_fibnum = so->so_fibnum;
284#ifdef MAC
285	error = mac_inpcb_init(inp, M_NOWAIT);
286	if (error != 0)
287		goto out;
288	mac_inpcb_create(so, inp);
289#endif
290#ifdef IPSEC
291	error = ipsec_init_policy(so, &inp->inp_sp);
292	if (error != 0) {
293#ifdef MAC
294		mac_inpcb_destroy(inp);
295#endif
296		goto out;
297	}
298#endif /*IPSEC*/
299#ifdef INET6
300	if (INP_SOCKAF(so) == AF_INET6) {
301		inp->inp_vflag |= INP_IPV6PROTO;
302		if (V_ip6_v6only)
303			inp->inp_flags |= IN6P_IPV6_V6ONLY;
304	}
305#endif
306	LIST_INSERT_HEAD(pcbinfo->ipi_listhead, inp, inp_list);
307	pcbinfo->ipi_count++;
308	so->so_pcb = (caddr_t)inp;
309#ifdef INET6
310	if (V_ip6_auto_flowlabel)
311		inp->inp_flags |= IN6P_AUTOFLOWLABEL;
312#endif
313	INP_WLOCK(inp);
314	inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
315	refcount_init(&inp->inp_refcount, 1);	/* Reference from inpcbinfo */
316#if defined(IPSEC) || defined(MAC)
317out:
318	if (error != 0) {
319		crfree(inp->inp_cred);
320		uma_zfree(pcbinfo->ipi_zone, inp);
321	}
322#endif
323	return (error);
324}
325
326#ifdef INET
327int
328in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct ucred *cred)
329{
330	int anonport, error;
331
332	INP_WLOCK_ASSERT(inp);
333	INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo);
334
335	if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY)
336		return (EINVAL);
337	anonport = nam == NULL || ((struct sockaddr_in *)nam)->sin_port == 0;
338	error = in_pcbbind_setup(inp, nam, &inp->inp_laddr.s_addr,
339	    &inp->inp_lport, cred);
340	if (error)
341		return (error);
342	if (in_pcbinshash(inp) != 0) {
343		inp->inp_laddr.s_addr = INADDR_ANY;
344		inp->inp_lport = 0;
345		return (EAGAIN);
346	}
347	if (anonport)
348		inp->inp_flags |= INP_ANONPORT;
349	return (0);
350}
351#endif
352
353#if defined(INET) || defined(INET6)
354int
355in_pcb_lport(struct inpcb *inp, struct in_addr *laddrp, u_short *lportp,
356    struct ucred *cred, int lookupflags)
357{
358	struct inpcbinfo *pcbinfo;
359	struct inpcb *tmpinp;
360	unsigned short *lastport;
361	int count, dorandom, error;
362	u_short aux, first, last, lport;
363#ifdef INET
364	struct in_addr laddr;
365#endif
366
367	pcbinfo = inp->inp_pcbinfo;
368
369	/*
370	 * Because no actual state changes occur here, a global write lock on
371	 * the pcbinfo isn't required.
372	 */
373	INP_LOCK_ASSERT(inp);
374	INP_HASH_LOCK_ASSERT(pcbinfo);
375
376	if (inp->inp_flags & INP_HIGHPORT) {
377		first = V_ipport_hifirstauto;	/* sysctl */
378		last  = V_ipport_hilastauto;
379		lastport = &pcbinfo->ipi_lasthi;
380	} else if (inp->inp_flags & INP_LOWPORT) {
381		error = priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT, 0);
382		if (error)
383			return (error);
384		first = V_ipport_lowfirstauto;	/* 1023 */
385		last  = V_ipport_lowlastauto;	/* 600 */
386		lastport = &pcbinfo->ipi_lastlow;
387	} else {
388		first = V_ipport_firstauto;	/* sysctl */
389		last  = V_ipport_lastauto;
390		lastport = &pcbinfo->ipi_lastport;
391	}
392	/*
393	 * For UDP, use random port allocation as long as the user
394	 * allows it.  For TCP (and as of yet unknown) connections,
395	 * use random port allocation only if the user allows it AND
396	 * ipport_tick() allows it.
397	 */
398	if (V_ipport_randomized &&
399		(!V_ipport_stoprandom || pcbinfo == &V_udbinfo))
400		dorandom = 1;
401	else
402		dorandom = 0;
403	/*
404	 * It makes no sense to do random port allocation if
405	 * we have the only port available.
406	 */
407	if (first == last)
408		dorandom = 0;
409	/* Make sure to not include UDP packets in the count. */
410	if (pcbinfo != &V_udbinfo)
411		V_ipport_tcpallocs++;
412	/*
413	 * Instead of having two loops further down counting up or down
414	 * make sure that first is always <= last and go with only one
415	 * code path implementing all logic.
416	 */
417	if (first > last) {
418		aux = first;
419		first = last;
420		last = aux;
421	}
422
423#ifdef INET
424	/* Make the compiler happy. */
425	laddr.s_addr = 0;
426	if ((inp->inp_vflag & (INP_IPV4|INP_IPV6)) == INP_IPV4) {
427		KASSERT(laddrp != NULL, ("%s: laddrp NULL for v4 inp %p",
428		    __func__, inp));
429		laddr = *laddrp;
430	}
431#endif
432	tmpinp = NULL;	/* Make compiler happy. */
433	lport = *lportp;
434
435	if (dorandom)
436		*lastport = first + (arc4random() % (last - first));
437
438	count = last - first;
439
440	do {
441		if (count-- < 0)	/* completely used? */
442			return (EADDRNOTAVAIL);
443		++*lastport;
444		if (*lastport < first || *lastport > last)
445			*lastport = first;
446		lport = htons(*lastport);
447
448#ifdef INET6
449		if ((inp->inp_vflag & INP_IPV6) != 0)
450			tmpinp = in6_pcblookup_local(pcbinfo,
451			    &inp->in6p_laddr, lport, lookupflags, cred);
452#endif
453#if defined(INET) && defined(INET6)
454		else
455#endif
456#ifdef INET
457			tmpinp = in_pcblookup_local(pcbinfo, laddr,
458			    lport, lookupflags, cred);
459#endif
460	} while (tmpinp != NULL);
461
462#ifdef INET
463	if ((inp->inp_vflag & (INP_IPV4|INP_IPV6)) == INP_IPV4)
464		laddrp->s_addr = laddr.s_addr;
465#endif
466	*lportp = lport;
467
468	return (0);
469}
470
471/*
472 * Return cached socket options.
473 */
474short
475inp_so_options(const struct inpcb *inp)
476{
477   short so_options;
478
479   so_options = 0;
480
481   if ((inp->inp_flags2 & INP_REUSEPORT) != 0)
482	   so_options |= SO_REUSEPORT;
483   if ((inp->inp_flags2 & INP_REUSEADDR) != 0)
484	   so_options |= SO_REUSEADDR;
485   return (so_options);
486}
487#endif /* INET || INET6 */
488
489#ifdef INET
490/*
491 * Set up a bind operation on a PCB, performing port allocation
492 * as required, but do not actually modify the PCB. Callers can
493 * either complete the bind by setting inp_laddr/inp_lport and
494 * calling in_pcbinshash(), or they can just use the resulting
495 * port and address to authorise the sending of a once-off packet.
496 *
497 * On error, the values of *laddrp and *lportp are not changed.
498 */
499int
500in_pcbbind_setup(struct inpcb *inp, struct sockaddr *nam, in_addr_t *laddrp,
501    u_short *lportp, struct ucred *cred)
502{
503	struct socket *so = inp->inp_socket;
504	struct sockaddr_in *sin;
505	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
506	struct in_addr laddr;
507	u_short lport = 0;
508	int lookupflags = 0, reuseport = (so->so_options & SO_REUSEPORT);
509	int error;
510
511	/*
512	 * No state changes, so read locks are sufficient here.
513	 */
514	INP_LOCK_ASSERT(inp);
515	INP_HASH_LOCK_ASSERT(pcbinfo);
516
517	if (TAILQ_EMPTY(&V_in_ifaddrhead)) /* XXX broken! */
518		return (EADDRNOTAVAIL);
519	laddr.s_addr = *laddrp;
520	if (nam != NULL && laddr.s_addr != INADDR_ANY)
521		return (EINVAL);
522	if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0)
523		lookupflags = INPLOOKUP_WILDCARD;
524	if (nam == NULL) {
525		if ((error = prison_local_ip4(cred, &laddr)) != 0)
526			return (error);
527	} else {
528		sin = (struct sockaddr_in *)nam;
529		if (nam->sa_len != sizeof (*sin))
530			return (EINVAL);
531#ifdef notdef
532		/*
533		 * We should check the family, but old programs
534		 * incorrectly fail to initialize it.
535		 */
536		if (sin->sin_family != AF_INET)
537			return (EAFNOSUPPORT);
538#endif
539		error = prison_local_ip4(cred, &sin->sin_addr);
540		if (error)
541			return (error);
542		if (sin->sin_port != *lportp) {
543			/* Don't allow the port to change. */
544			if (*lportp != 0)
545				return (EINVAL);
546			lport = sin->sin_port;
547		}
548		/* NB: lport is left as 0 if the port isn't being changed. */
549		if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
550			/*
551			 * Treat SO_REUSEADDR as SO_REUSEPORT for multicast;
552			 * allow complete duplication of binding if
553			 * SO_REUSEPORT is set, or if SO_REUSEADDR is set
554			 * and a multicast address is bound on both
555			 * new and duplicated sockets.
556			 */
557			if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) != 0)
558				reuseport = SO_REUSEADDR|SO_REUSEPORT;
559		} else if (sin->sin_addr.s_addr != INADDR_ANY) {
560			sin->sin_port = 0;		/* yech... */
561			bzero(&sin->sin_zero, sizeof(sin->sin_zero));
562			/*
563			 * Is the address a local IP address?
564			 * If INP_BINDANY is set, then the socket may be bound
565			 * to any endpoint address, local or not.
566			 */
567			if ((inp->inp_flags & INP_BINDANY) == 0 &&
568			    ifa_ifwithaddr_check((struct sockaddr *)sin) == 0)
569				return (EADDRNOTAVAIL);
570		}
571		laddr = sin->sin_addr;
572		if (lport) {
573			struct inpcb *t;
574			struct tcptw *tw;
575
576			/* GROSS */
577			if (ntohs(lport) <= V_ipport_reservedhigh &&
578			    ntohs(lport) >= V_ipport_reservedlow &&
579			    priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT,
580			    0))
581				return (EACCES);
582			if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)) &&
583			    priv_check_cred(inp->inp_cred,
584			    PRIV_NETINET_REUSEPORT, 0) != 0) {
585				t = in_pcblookup_local(pcbinfo, sin->sin_addr,
586				    lport, INPLOOKUP_WILDCARD, cred);
587	/*
588	 * XXX
589	 * This entire block sorely needs a rewrite.
590	 */
591				if (t &&
592				    ((t->inp_flags & INP_TIMEWAIT) == 0) &&
593				    (so->so_type != SOCK_STREAM ||
594				     ntohl(t->inp_faddr.s_addr) == INADDR_ANY) &&
595				    (ntohl(sin->sin_addr.s_addr) != INADDR_ANY ||
596				     ntohl(t->inp_laddr.s_addr) != INADDR_ANY ||
597				     (t->inp_flags2 & INP_REUSEPORT) == 0) &&
598				    (inp->inp_cred->cr_uid !=
599				     t->inp_cred->cr_uid))
600					return (EADDRINUSE);
601			}
602			t = in_pcblookup_local(pcbinfo, sin->sin_addr,
603			    lport, lookupflags, cred);
604			if (t && (t->inp_flags & INP_TIMEWAIT)) {
605				/*
606				 * XXXRW: If an incpb has had its timewait
607				 * state recycled, we treat the address as
608				 * being in use (for now).  This is better
609				 * than a panic, but not desirable.
610				 */
611				tw = intotw(t);
612				if (tw == NULL ||
613				    (reuseport & tw->tw_so_options) == 0)
614					return (EADDRINUSE);
615			} else if (t && (reuseport & inp_so_options(t)) == 0) {
616#ifdef INET6
617				if (ntohl(sin->sin_addr.s_addr) !=
618				    INADDR_ANY ||
619				    ntohl(t->inp_laddr.s_addr) !=
620				    INADDR_ANY ||
621				    (inp->inp_vflag & INP_IPV6PROTO) == 0 ||
622				    (t->inp_vflag & INP_IPV6PROTO) == 0)
623#endif
624				return (EADDRINUSE);
625			}
626		}
627	}
628	if (*lportp != 0)
629		lport = *lportp;
630	if (lport == 0) {
631		error = in_pcb_lport(inp, &laddr, &lport, cred, lookupflags);
632		if (error != 0)
633			return (error);
634
635	}
636	*laddrp = laddr.s_addr;
637	*lportp = lport;
638	return (0);
639}
640
641/*
642 * Connect from a socket to a specified address.
643 * Both address and port must be specified in argument sin.
644 * If don't have a local address for this socket yet,
645 * then pick one.
646 */
647int
648in_pcbconnect_mbuf(struct inpcb *inp, struct sockaddr *nam,
649    struct ucred *cred, struct mbuf *m)
650{
651	u_short lport, fport;
652	in_addr_t laddr, faddr;
653	int anonport, error;
654
655	INP_WLOCK_ASSERT(inp);
656	INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo);
657
658	lport = inp->inp_lport;
659	laddr = inp->inp_laddr.s_addr;
660	anonport = (lport == 0);
661	error = in_pcbconnect_setup(inp, nam, &laddr, &lport, &faddr, &fport,
662	    NULL, cred);
663	if (error)
664		return (error);
665
666	/* Do the initial binding of the local address if required. */
667	if (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0) {
668		inp->inp_lport = lport;
669		inp->inp_laddr.s_addr = laddr;
670		if (in_pcbinshash(inp) != 0) {
671			inp->inp_laddr.s_addr = INADDR_ANY;
672			inp->inp_lport = 0;
673			return (EAGAIN);
674		}
675	}
676
677	/* Commit the remaining changes. */
678	inp->inp_lport = lport;
679	inp->inp_laddr.s_addr = laddr;
680	inp->inp_faddr.s_addr = faddr;
681	inp->inp_fport = fport;
682	in_pcbrehash_mbuf(inp, m);
683
684	if (anonport)
685		inp->inp_flags |= INP_ANONPORT;
686	return (0);
687}
688
689int
690in_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct ucred *cred)
691{
692
693	return (in_pcbconnect_mbuf(inp, nam, cred, NULL));
694}
695
696/*
697 * Do proper source address selection on an unbound socket in case
698 * of connect. Take jails into account as well.
699 */
700static int
701in_pcbladdr(struct inpcb *inp, struct in_addr *faddr, struct in_addr *laddr,
702    struct ucred *cred)
703{
704	struct ifaddr *ifa;
705	struct sockaddr *sa;
706	struct sockaddr_in *sin;
707	struct route sro;
708	int error;
709
710	KASSERT(laddr != NULL, ("%s: laddr NULL", __func__));
711
712	/*
713	 * Bypass source address selection and use the primary jail IP
714	 * if requested.
715	 */
716	if (cred != NULL && !prison_saddrsel_ip4(cred, laddr))
717		return (0);
718
719	error = 0;
720	bzero(&sro, sizeof(sro));
721
722	sin = (struct sockaddr_in *)&sro.ro_dst;
723	sin->sin_family = AF_INET;
724	sin->sin_len = sizeof(struct sockaddr_in);
725	sin->sin_addr.s_addr = faddr->s_addr;
726
727	/*
728	 * If route is known our src addr is taken from the i/f,
729	 * else punt.
730	 *
731	 * Find out route to destination.
732	 */
733	if ((inp->inp_socket->so_options & SO_DONTROUTE) == 0)
734		in_rtalloc_ign(&sro, 0, inp->inp_inc.inc_fibnum);
735
736	/*
737	 * If we found a route, use the address corresponding to
738	 * the outgoing interface.
739	 *
740	 * Otherwise assume faddr is reachable on a directly connected
741	 * network and try to find a corresponding interface to take
742	 * the source address from.
743	 */
744	if (sro.ro_rt == NULL || sro.ro_rt->rt_ifp == NULL) {
745		struct in_ifaddr *ia;
746		struct ifnet *ifp;
747
748		ia = ifatoia(ifa_ifwithdstaddr((struct sockaddr *)sin));
749		if (ia == NULL)
750			ia = ifatoia(ifa_ifwithnet((struct sockaddr *)sin, 0));
751		if (ia == NULL) {
752			error = ENETUNREACH;
753			goto done;
754		}
755
756		if (cred == NULL || !prison_flag(cred, PR_IP4)) {
757			laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
758			ifa_free(&ia->ia_ifa);
759			goto done;
760		}
761
762		ifp = ia->ia_ifp;
763		ifa_free(&ia->ia_ifa);
764		ia = NULL;
765		IF_ADDR_RLOCK(ifp);
766		TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
767
768			sa = ifa->ifa_addr;
769			if (sa->sa_family != AF_INET)
770				continue;
771			sin = (struct sockaddr_in *)sa;
772			if (prison_check_ip4(cred, &sin->sin_addr) == 0) {
773				ia = (struct in_ifaddr *)ifa;
774				break;
775			}
776		}
777		if (ia != NULL) {
778			laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
779			IF_ADDR_RUNLOCK(ifp);
780			goto done;
781		}
782		IF_ADDR_RUNLOCK(ifp);
783
784		/* 3. As a last resort return the 'default' jail address. */
785		error = prison_get_ip4(cred, laddr);
786		goto done;
787	}
788
789	/*
790	 * If the outgoing interface on the route found is not
791	 * a loopback interface, use the address from that interface.
792	 * In case of jails do those three steps:
793	 * 1. check if the interface address belongs to the jail. If so use it.
794	 * 2. check if we have any address on the outgoing interface
795	 *    belonging to this jail. If so use it.
796	 * 3. as a last resort return the 'default' jail address.
797	 */
798	if ((sro.ro_rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0) {
799		struct in_ifaddr *ia;
800		struct ifnet *ifp;
801
802		/* If not jailed, use the default returned. */
803		if (cred == NULL || !prison_flag(cred, PR_IP4)) {
804			ia = (struct in_ifaddr *)sro.ro_rt->rt_ifa;
805			laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
806			goto done;
807		}
808
809		/* Jailed. */
810		/* 1. Check if the iface address belongs to the jail. */
811		sin = (struct sockaddr_in *)sro.ro_rt->rt_ifa->ifa_addr;
812		if (prison_check_ip4(cred, &sin->sin_addr) == 0) {
813			ia = (struct in_ifaddr *)sro.ro_rt->rt_ifa;
814			laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
815			goto done;
816		}
817
818		/*
819		 * 2. Check if we have any address on the outgoing interface
820		 *    belonging to this jail.
821		 */
822		ia = NULL;
823		ifp = sro.ro_rt->rt_ifp;
824		IF_ADDR_RLOCK(ifp);
825		TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
826			sa = ifa->ifa_addr;
827			if (sa->sa_family != AF_INET)
828				continue;
829			sin = (struct sockaddr_in *)sa;
830			if (prison_check_ip4(cred, &sin->sin_addr) == 0) {
831				ia = (struct in_ifaddr *)ifa;
832				break;
833			}
834		}
835		if (ia != NULL) {
836			laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
837			IF_ADDR_RUNLOCK(ifp);
838			goto done;
839		}
840		IF_ADDR_RUNLOCK(ifp);
841
842		/* 3. As a last resort return the 'default' jail address. */
843		error = prison_get_ip4(cred, laddr);
844		goto done;
845	}
846
847	/*
848	 * The outgoing interface is marked with 'loopback net', so a route
849	 * to ourselves is here.
850	 * Try to find the interface of the destination address and then
851	 * take the address from there. That interface is not necessarily
852	 * a loopback interface.
853	 * In case of jails, check that it is an address of the jail
854	 * and if we cannot find, fall back to the 'default' jail address.
855	 */
856	if ((sro.ro_rt->rt_ifp->if_flags & IFF_LOOPBACK) != 0) {
857		struct sockaddr_in sain;
858		struct in_ifaddr *ia;
859
860		bzero(&sain, sizeof(struct sockaddr_in));
861		sain.sin_family = AF_INET;
862		sain.sin_len = sizeof(struct sockaddr_in);
863		sain.sin_addr.s_addr = faddr->s_addr;
864
865		ia = ifatoia(ifa_ifwithdstaddr(sintosa(&sain)));
866		if (ia == NULL)
867			ia = ifatoia(ifa_ifwithnet(sintosa(&sain), 0));
868		if (ia == NULL)
869			ia = ifatoia(ifa_ifwithaddr(sintosa(&sain)));
870
871		if (cred == NULL || !prison_flag(cred, PR_IP4)) {
872			if (ia == NULL) {
873				error = ENETUNREACH;
874				goto done;
875			}
876			laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
877			ifa_free(&ia->ia_ifa);
878			goto done;
879		}
880
881		/* Jailed. */
882		if (ia != NULL) {
883			struct ifnet *ifp;
884
885			ifp = ia->ia_ifp;
886			ifa_free(&ia->ia_ifa);
887			ia = NULL;
888			IF_ADDR_RLOCK(ifp);
889			TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
890
891				sa = ifa->ifa_addr;
892				if (sa->sa_family != AF_INET)
893					continue;
894				sin = (struct sockaddr_in *)sa;
895				if (prison_check_ip4(cred,
896				    &sin->sin_addr) == 0) {
897					ia = (struct in_ifaddr *)ifa;
898					break;
899				}
900			}
901			if (ia != NULL) {
902				laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
903				IF_ADDR_RUNLOCK(ifp);
904				goto done;
905			}
906			IF_ADDR_RUNLOCK(ifp);
907		}
908
909		/* 3. As a last resort return the 'default' jail address. */
910		error = prison_get_ip4(cred, laddr);
911		goto done;
912	}
913
914done:
915	if (sro.ro_rt != NULL)
916		RTFREE(sro.ro_rt);
917	return (error);
918}
919
920/*
921 * Set up for a connect from a socket to the specified address.
922 * On entry, *laddrp and *lportp should contain the current local
923 * address and port for the PCB; these are updated to the values
924 * that should be placed in inp_laddr and inp_lport to complete
925 * the connect.
926 *
927 * On success, *faddrp and *fportp will be set to the remote address
928 * and port. These are not updated in the error case.
929 *
930 * If the operation fails because the connection already exists,
931 * *oinpp will be set to the PCB of that connection so that the
932 * caller can decide to override it. In all other cases, *oinpp
933 * is set to NULL.
934 */
935int
936in_pcbconnect_setup(struct inpcb *inp, struct sockaddr *nam,
937    in_addr_t *laddrp, u_short *lportp, in_addr_t *faddrp, u_short *fportp,
938    struct inpcb **oinpp, struct ucred *cred)
939{
940	struct sockaddr_in *sin = (struct sockaddr_in *)nam;
941	struct in_ifaddr *ia;
942	struct inpcb *oinp;
943	struct in_addr laddr, faddr;
944	u_short lport, fport;
945	int error;
946
947	/*
948	 * Because a global state change doesn't actually occur here, a read
949	 * lock is sufficient.
950	 */
951	INP_LOCK_ASSERT(inp);
952	INP_HASH_LOCK_ASSERT(inp->inp_pcbinfo);
953
954	if (oinpp != NULL)
955		*oinpp = NULL;
956	if (nam->sa_len != sizeof (*sin))
957		return (EINVAL);
958	if (sin->sin_family != AF_INET)
959		return (EAFNOSUPPORT);
960	if (sin->sin_port == 0)
961		return (EADDRNOTAVAIL);
962	laddr.s_addr = *laddrp;
963	lport = *lportp;
964	faddr = sin->sin_addr;
965	fport = sin->sin_port;
966
967	if (!TAILQ_EMPTY(&V_in_ifaddrhead)) {
968		/*
969		 * If the destination address is INADDR_ANY,
970		 * use the primary local address.
971		 * If the supplied address is INADDR_BROADCAST,
972		 * and the primary interface supports broadcast,
973		 * choose the broadcast address for that interface.
974		 */
975		if (faddr.s_addr == INADDR_ANY) {
976			IN_IFADDR_RLOCK();
977			faddr =
978			    IA_SIN(TAILQ_FIRST(&V_in_ifaddrhead))->sin_addr;
979			IN_IFADDR_RUNLOCK();
980			if (cred != NULL &&
981			    (error = prison_get_ip4(cred, &faddr)) != 0)
982				return (error);
983		} else if (faddr.s_addr == (u_long)INADDR_BROADCAST) {
984			IN_IFADDR_RLOCK();
985			if (TAILQ_FIRST(&V_in_ifaddrhead)->ia_ifp->if_flags &
986			    IFF_BROADCAST)
987				faddr = satosin(&TAILQ_FIRST(
988				    &V_in_ifaddrhead)->ia_broadaddr)->sin_addr;
989			IN_IFADDR_RUNLOCK();
990		}
991	}
992	if (laddr.s_addr == INADDR_ANY) {
993		error = in_pcbladdr(inp, &faddr, &laddr, cred);
994		/*
995		 * If the destination address is multicast and an outgoing
996		 * interface has been set as a multicast option, prefer the
997		 * address of that interface as our source address.
998		 */
999		if (IN_MULTICAST(ntohl(faddr.s_addr)) &&
1000		    inp->inp_moptions != NULL) {
1001			struct ip_moptions *imo;
1002			struct ifnet *ifp;
1003
1004			imo = inp->inp_moptions;
1005			if (imo->imo_multicast_ifp != NULL) {
1006				ifp = imo->imo_multicast_ifp;
1007				IN_IFADDR_RLOCK();
1008				TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
1009					if ((ia->ia_ifp == ifp) &&
1010					    (cred == NULL ||
1011					    prison_check_ip4(cred,
1012					    &ia->ia_addr.sin_addr) == 0))
1013						break;
1014				}
1015				if (ia == NULL)
1016					error = EADDRNOTAVAIL;
1017				else {
1018					laddr = ia->ia_addr.sin_addr;
1019					error = 0;
1020				}
1021				IN_IFADDR_RUNLOCK();
1022			}
1023		}
1024		if (error)
1025			return (error);
1026	}
1027	oinp = in_pcblookup_hash_locked(inp->inp_pcbinfo, faddr, fport,
1028	    laddr, lport, 0, NULL);
1029	if (oinp != NULL) {
1030		if (oinpp != NULL)
1031			*oinpp = oinp;
1032		return (EADDRINUSE);
1033	}
1034	if (lport == 0) {
1035		error = in_pcbbind_setup(inp, NULL, &laddr.s_addr, &lport,
1036		    cred);
1037		if (error)
1038			return (error);
1039	}
1040	*laddrp = laddr.s_addr;
1041	*lportp = lport;
1042	*faddrp = faddr.s_addr;
1043	*fportp = fport;
1044	return (0);
1045}
1046
1047void
1048in_pcbdisconnect(struct inpcb *inp)
1049{
1050
1051	INP_WLOCK_ASSERT(inp);
1052	INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo);
1053
1054	inp->inp_faddr.s_addr = INADDR_ANY;
1055	inp->inp_fport = 0;
1056	in_pcbrehash(inp);
1057}
1058#endif /* INET */
1059
1060/*
1061 * in_pcbdetach() is responsibe for disassociating a socket from an inpcb.
1062 * For most protocols, this will be invoked immediately prior to calling
1063 * in_pcbfree().  However, with TCP the inpcb may significantly outlive the
1064 * socket, in which case in_pcbfree() is deferred.
1065 */
1066void
1067in_pcbdetach(struct inpcb *inp)
1068{
1069
1070	KASSERT(inp->inp_socket != NULL, ("%s: inp_socket == NULL", __func__));
1071
1072	inp->inp_socket->so_pcb = NULL;
1073	inp->inp_socket = NULL;
1074}
1075
1076/*
1077 * in_pcbref() bumps the reference count on an inpcb in order to maintain
1078 * stability of an inpcb pointer despite the inpcb lock being released.  This
1079 * is used in TCP when the inpcbinfo lock needs to be acquired or upgraded,
1080 * but where the inpcb lock may already held, or when acquiring a reference
1081 * via a pcbgroup.
1082 *
1083 * in_pcbref() should be used only to provide brief memory stability, and
1084 * must always be followed by a call to INP_WLOCK() and in_pcbrele() to
1085 * garbage collect the inpcb if it has been in_pcbfree()'d from another
1086 * context.  Until in_pcbrele() has returned that the inpcb is still valid,
1087 * lock and rele are the *only* safe operations that may be performed on the
1088 * inpcb.
1089 *
1090 * While the inpcb will not be freed, releasing the inpcb lock means that the
1091 * connection's state may change, so the caller should be careful to
1092 * revalidate any cached state on reacquiring the lock.  Drop the reference
1093 * using in_pcbrele().
1094 */
1095void
1096in_pcbref(struct inpcb *inp)
1097{
1098
1099	KASSERT(inp->inp_refcount > 0, ("%s: refcount 0", __func__));
1100
1101	refcount_acquire(&inp->inp_refcount);
1102}
1103
1104/*
1105 * Drop a refcount on an inpcb elevated using in_pcbref(); because a call to
1106 * in_pcbfree() may have been made between in_pcbref() and in_pcbrele(), we
1107 * return a flag indicating whether or not the inpcb remains valid.  If it is
1108 * valid, we return with the inpcb lock held.
1109 *
1110 * Notice that, unlike in_pcbref(), the inpcb lock must be held to drop a
1111 * reference on an inpcb.  Historically more work was done here (actually, in
1112 * in_pcbfree_internal()) but has been moved to in_pcbfree() to avoid the
1113 * need for the pcbinfo lock in in_pcbrele().  Deferring the free is entirely
1114 * about memory stability (and continued use of the write lock).
1115 */
1116int
1117in_pcbrele_rlocked(struct inpcb *inp)
1118{
1119	struct inpcbinfo *pcbinfo;
1120
1121	KASSERT(inp->inp_refcount > 0, ("%s: refcount 0", __func__));
1122
1123	INP_RLOCK_ASSERT(inp);
1124
1125	if (refcount_release(&inp->inp_refcount) == 0) {
1126		/*
1127		 * If the inpcb has been freed, let the caller know, even if
1128		 * this isn't the last reference.
1129		 */
1130		if (inp->inp_flags2 & INP_FREED) {
1131			INP_RUNLOCK(inp);
1132			return (1);
1133		}
1134		return (0);
1135	}
1136
1137	KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__));
1138
1139	INP_RUNLOCK(inp);
1140	pcbinfo = inp->inp_pcbinfo;
1141	uma_zfree(pcbinfo->ipi_zone, inp);
1142	return (1);
1143}
1144
1145int
1146in_pcbrele_wlocked(struct inpcb *inp)
1147{
1148	struct inpcbinfo *pcbinfo;
1149
1150	KASSERT(inp->inp_refcount > 0, ("%s: refcount 0", __func__));
1151
1152	INP_WLOCK_ASSERT(inp);
1153
1154	if (refcount_release(&inp->inp_refcount) == 0)
1155		return (0);
1156
1157	KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__));
1158
1159	INP_WUNLOCK(inp);
1160	pcbinfo = inp->inp_pcbinfo;
1161	uma_zfree(pcbinfo->ipi_zone, inp);
1162	return (1);
1163}
1164
1165/*
1166 * Temporary wrapper.
1167 */
1168int
1169in_pcbrele(struct inpcb *inp)
1170{
1171
1172	return (in_pcbrele_wlocked(inp));
1173}
1174
1175/*
1176 * Unconditionally schedule an inpcb to be freed by decrementing its
1177 * reference count, which should occur only after the inpcb has been detached
1178 * from its socket.  If another thread holds a temporary reference (acquired
1179 * using in_pcbref()) then the free is deferred until that reference is
1180 * released using in_pcbrele(), but the inpcb is still unlocked.  Almost all
1181 * work, including removal from global lists, is done in this context, where
1182 * the pcbinfo lock is held.
1183 */
1184void
1185in_pcbfree(struct inpcb *inp)
1186{
1187	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
1188
1189	KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__));
1190
1191	INP_INFO_WLOCK_ASSERT(pcbinfo);
1192	INP_WLOCK_ASSERT(inp);
1193
1194	/* XXXRW: Do as much as possible here. */
1195#ifdef IPSEC
1196	if (inp->inp_sp != NULL)
1197		ipsec_delete_pcbpolicy(inp);
1198#endif
1199	inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
1200	in_pcbremlists(inp);
1201#ifdef INET6
1202	if (inp->inp_vflag & INP_IPV6PROTO) {
1203		ip6_freepcbopts(inp->in6p_outputopts);
1204		if (inp->in6p_moptions != NULL)
1205			ip6_freemoptions(inp->in6p_moptions);
1206	}
1207#endif
1208	if (inp->inp_options)
1209		(void)m_free(inp->inp_options);
1210#ifdef INET
1211	if (inp->inp_moptions != NULL)
1212		inp_freemoptions(inp->inp_moptions);
1213#endif
1214	inp->inp_vflag = 0;
1215	inp->inp_flags2 |= INP_FREED;
1216	crfree(inp->inp_cred);
1217#ifdef MAC
1218	mac_inpcb_destroy(inp);
1219#endif
1220	if (!in_pcbrele_wlocked(inp))
1221		INP_WUNLOCK(inp);
1222}
1223
1224/*
1225 * in_pcbdrop() removes an inpcb from hashed lists, releasing its address and
1226 * port reservation, and preventing it from being returned by inpcb lookups.
1227 *
1228 * It is used by TCP to mark an inpcb as unused and avoid future packet
1229 * delivery or event notification when a socket remains open but TCP has
1230 * closed.  This might occur as a result of a shutdown()-initiated TCP close
1231 * or a RST on the wire, and allows the port binding to be reused while still
1232 * maintaining the invariant that so_pcb always points to a valid inpcb until
1233 * in_pcbdetach().
1234 *
1235 * XXXRW: Possibly in_pcbdrop() should also prevent future notifications by
1236 * in_pcbnotifyall() and in_pcbpurgeif0()?
1237 */
1238void
1239in_pcbdrop(struct inpcb *inp)
1240{
1241
1242	INP_WLOCK_ASSERT(inp);
1243
1244	/*
1245	 * XXXRW: Possibly we should protect the setting of INP_DROPPED with
1246	 * the hash lock...?
1247	 */
1248	inp->inp_flags |= INP_DROPPED;
1249	if (inp->inp_flags & INP_INHASHLIST) {
1250		struct inpcbport *phd = inp->inp_phd;
1251
1252		INP_HASH_WLOCK(inp->inp_pcbinfo);
1253		LIST_REMOVE(inp, inp_hash);
1254		LIST_REMOVE(inp, inp_portlist);
1255		if (LIST_FIRST(&phd->phd_pcblist) == NULL) {
1256			LIST_REMOVE(phd, phd_hash);
1257			free(phd, M_PCB);
1258		}
1259		INP_HASH_WUNLOCK(inp->inp_pcbinfo);
1260		inp->inp_flags &= ~INP_INHASHLIST;
1261#ifdef PCBGROUP
1262		in_pcbgroup_remove(inp);
1263#endif
1264	}
1265}
1266
1267#ifdef INET
1268/*
1269 * Common routines to return the socket addresses associated with inpcbs.
1270 */
1271struct sockaddr *
1272in_sockaddr(in_port_t port, struct in_addr *addr_p)
1273{
1274	struct sockaddr_in *sin;
1275
1276	sin = malloc(sizeof *sin, M_SONAME,
1277		M_WAITOK | M_ZERO);
1278	sin->sin_family = AF_INET;
1279	sin->sin_len = sizeof(*sin);
1280	sin->sin_addr = *addr_p;
1281	sin->sin_port = port;
1282
1283	return (struct sockaddr *)sin;
1284}
1285
1286int
1287in_getsockaddr(struct socket *so, struct sockaddr **nam)
1288{
1289	struct inpcb *inp;
1290	struct in_addr addr;
1291	in_port_t port;
1292
1293	inp = sotoinpcb(so);
1294	KASSERT(inp != NULL, ("in_getsockaddr: inp == NULL"));
1295
1296	INP_RLOCK(inp);
1297	port = inp->inp_lport;
1298	addr = inp->inp_laddr;
1299	INP_RUNLOCK(inp);
1300
1301	*nam = in_sockaddr(port, &addr);
1302	return 0;
1303}
1304
1305int
1306in_getpeeraddr(struct socket *so, struct sockaddr **nam)
1307{
1308	struct inpcb *inp;
1309	struct in_addr addr;
1310	in_port_t port;
1311
1312	inp = sotoinpcb(so);
1313	KASSERT(inp != NULL, ("in_getpeeraddr: inp == NULL"));
1314
1315	INP_RLOCK(inp);
1316	port = inp->inp_fport;
1317	addr = inp->inp_faddr;
1318	INP_RUNLOCK(inp);
1319
1320	*nam = in_sockaddr(port, &addr);
1321	return 0;
1322}
1323
1324void
1325in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr faddr, int errno,
1326    struct inpcb *(*notify)(struct inpcb *, int))
1327{
1328	struct inpcb *inp, *inp_temp;
1329
1330	INP_INFO_WLOCK(pcbinfo);
1331	LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, inp_temp) {
1332		INP_WLOCK(inp);
1333#ifdef INET6
1334		if ((inp->inp_vflag & INP_IPV4) == 0) {
1335			INP_WUNLOCK(inp);
1336			continue;
1337		}
1338#endif
1339		if (inp->inp_faddr.s_addr != faddr.s_addr ||
1340		    inp->inp_socket == NULL) {
1341			INP_WUNLOCK(inp);
1342			continue;
1343		}
1344		if ((*notify)(inp, errno))
1345			INP_WUNLOCK(inp);
1346	}
1347	INP_INFO_WUNLOCK(pcbinfo);
1348}
1349
1350void
1351in_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp)
1352{
1353	struct inpcb *inp;
1354	struct ip_moptions *imo;
1355	int i, gap;
1356
1357	INP_INFO_RLOCK(pcbinfo);
1358	LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
1359		INP_WLOCK(inp);
1360		imo = inp->inp_moptions;
1361		if ((inp->inp_vflag & INP_IPV4) &&
1362		    imo != NULL) {
1363			/*
1364			 * Unselect the outgoing interface if it is being
1365			 * detached.
1366			 */
1367			if (imo->imo_multicast_ifp == ifp)
1368				imo->imo_multicast_ifp = NULL;
1369
1370			/*
1371			 * Drop multicast group membership if we joined
1372			 * through the interface being detached.
1373			 */
1374			for (i = 0, gap = 0; i < imo->imo_num_memberships;
1375			    i++) {
1376				if (imo->imo_membership[i]->inm_ifp == ifp) {
1377					in_delmulti(imo->imo_membership[i]);
1378					gap++;
1379				} else if (gap != 0)
1380					imo->imo_membership[i - gap] =
1381					    imo->imo_membership[i];
1382			}
1383			imo->imo_num_memberships -= gap;
1384		}
1385		INP_WUNLOCK(inp);
1386	}
1387	INP_INFO_RUNLOCK(pcbinfo);
1388}
1389
1390/*
1391 * Lookup a PCB based on the local address and port.  Caller must hold the
1392 * hash lock.  No inpcb locks or references are acquired.
1393 */
1394#define INP_LOOKUP_MAPPED_PCB_COST	3
1395struct inpcb *
1396in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr,
1397    u_short lport, int lookupflags, struct ucred *cred)
1398{
1399	struct inpcb *inp;
1400#ifdef INET6
1401	int matchwild = 3 + INP_LOOKUP_MAPPED_PCB_COST;
1402#else
1403	int matchwild = 3;
1404#endif
1405	int wildcard;
1406
1407	KASSERT((lookupflags & ~(INPLOOKUP_WILDCARD)) == 0,
1408	    ("%s: invalid lookup flags %d", __func__, lookupflags));
1409
1410	INP_HASH_LOCK_ASSERT(pcbinfo);
1411
1412	if ((lookupflags & INPLOOKUP_WILDCARD) == 0) {
1413		struct inpcbhead *head;
1414		/*
1415		 * Look for an unconnected (wildcard foreign addr) PCB that
1416		 * matches the local address and port we're looking for.
1417		 */
1418		head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport,
1419		    0, pcbinfo->ipi_hashmask)];
1420		LIST_FOREACH(inp, head, inp_hash) {
1421#ifdef INET6
1422			/* XXX inp locking */
1423			if ((inp->inp_vflag & INP_IPV4) == 0)
1424				continue;
1425#endif
1426			if (inp->inp_faddr.s_addr == INADDR_ANY &&
1427			    inp->inp_laddr.s_addr == laddr.s_addr &&
1428			    inp->inp_lport == lport) {
1429				/*
1430				 * Found?
1431				 */
1432				if (cred == NULL ||
1433				    prison_equal_ip4(cred->cr_prison,
1434					inp->inp_cred->cr_prison))
1435					return (inp);
1436			}
1437		}
1438		/*
1439		 * Not found.
1440		 */
1441		return (NULL);
1442	} else {
1443		struct inpcbporthead *porthash;
1444		struct inpcbport *phd;
1445		struct inpcb *match = NULL;
1446		/*
1447		 * Best fit PCB lookup.
1448		 *
1449		 * First see if this local port is in use by looking on the
1450		 * port hash list.
1451		 */
1452		porthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(lport,
1453		    pcbinfo->ipi_porthashmask)];
1454		LIST_FOREACH(phd, porthash, phd_hash) {
1455			if (phd->phd_port == lport)
1456				break;
1457		}
1458		if (phd != NULL) {
1459			/*
1460			 * Port is in use by one or more PCBs. Look for best
1461			 * fit.
1462			 */
1463			LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) {
1464				wildcard = 0;
1465				if (cred != NULL &&
1466				    !prison_equal_ip4(inp->inp_cred->cr_prison,
1467					cred->cr_prison))
1468					continue;
1469#ifdef INET6
1470				/* XXX inp locking */
1471				if ((inp->inp_vflag & INP_IPV4) == 0)
1472					continue;
1473				/*
1474				 * We never select the PCB that has
1475				 * INP_IPV6 flag and is bound to :: if
1476				 * we have another PCB which is bound
1477				 * to 0.0.0.0.  If a PCB has the
1478				 * INP_IPV6 flag, then we set its cost
1479				 * higher than IPv4 only PCBs.
1480				 *
1481				 * Note that the case only happens
1482				 * when a socket is bound to ::, under
1483				 * the condition that the use of the
1484				 * mapped address is allowed.
1485				 */
1486				if ((inp->inp_vflag & INP_IPV6) != 0)
1487					wildcard += INP_LOOKUP_MAPPED_PCB_COST;
1488#endif
1489				if (inp->inp_faddr.s_addr != INADDR_ANY)
1490					wildcard++;
1491				if (inp->inp_laddr.s_addr != INADDR_ANY) {
1492					if (laddr.s_addr == INADDR_ANY)
1493						wildcard++;
1494					else if (inp->inp_laddr.s_addr != laddr.s_addr)
1495						continue;
1496				} else {
1497					if (laddr.s_addr != INADDR_ANY)
1498						wildcard++;
1499				}
1500				if (wildcard < matchwild) {
1501					match = inp;
1502					matchwild = wildcard;
1503					if (matchwild == 0)
1504						break;
1505				}
1506			}
1507		}
1508		return (match);
1509	}
1510}
1511#undef INP_LOOKUP_MAPPED_PCB_COST
1512
1513#ifdef PCBGROUP
1514/*
1515 * Lookup PCB in hash list, using pcbgroup tables.
1516 */
1517static struct inpcb *
1518in_pcblookup_group(struct inpcbinfo *pcbinfo, struct inpcbgroup *pcbgroup,
1519    struct in_addr faddr, u_int fport_arg, struct in_addr laddr,
1520    u_int lport_arg, int lookupflags, struct ifnet *ifp)
1521{
1522	struct inpcbhead *head;
1523	struct inpcb *inp, *tmpinp;
1524	u_short fport = fport_arg, lport = lport_arg;
1525
1526	/*
1527	 * First look for an exact match.
1528	 */
1529	tmpinp = NULL;
1530	INP_GROUP_LOCK(pcbgroup);
1531	head = &pcbgroup->ipg_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
1532	    pcbgroup->ipg_hashmask)];
1533	LIST_FOREACH(inp, head, inp_pcbgrouphash) {
1534#ifdef INET6
1535		/* XXX inp locking */
1536		if ((inp->inp_vflag & INP_IPV4) == 0)
1537			continue;
1538#endif
1539		if (inp->inp_faddr.s_addr == faddr.s_addr &&
1540		    inp->inp_laddr.s_addr == laddr.s_addr &&
1541		    inp->inp_fport == fport &&
1542		    inp->inp_lport == lport) {
1543			/*
1544			 * XXX We should be able to directly return
1545			 * the inp here, without any checks.
1546			 * Well unless both bound with SO_REUSEPORT?
1547			 */
1548			if (prison_flag(inp->inp_cred, PR_IP4))
1549				goto found;
1550			if (tmpinp == NULL)
1551				tmpinp = inp;
1552		}
1553	}
1554	if (tmpinp != NULL) {
1555		inp = tmpinp;
1556		goto found;
1557	}
1558
1559	/*
1560	 * Then look for a wildcard match, if requested.
1561	 */
1562	if ((lookupflags & INPLOOKUP_WILDCARD) != 0) {
1563		struct inpcb *local_wild = NULL, *local_exact = NULL;
1564#ifdef INET6
1565		struct inpcb *local_wild_mapped = NULL;
1566#endif
1567		struct inpcb *jail_wild = NULL;
1568		struct inpcbhead *head;
1569		int injail;
1570
1571		/*
1572		 * Order of socket selection - we always prefer jails.
1573		 *      1. jailed, non-wild.
1574		 *      2. jailed, wild.
1575		 *      3. non-jailed, non-wild.
1576		 *      4. non-jailed, wild.
1577		 */
1578		head = &pcbinfo->ipi_wildbase[INP_PCBHASH(INADDR_ANY, lport,
1579		    0, pcbinfo->ipi_wildmask)];
1580		LIST_FOREACH(inp, head, inp_pcbgroup_wild) {
1581#ifdef INET6
1582			/* XXX inp locking */
1583			if ((inp->inp_vflag & INP_IPV4) == 0)
1584				continue;
1585#endif
1586			if (inp->inp_faddr.s_addr != INADDR_ANY ||
1587			    inp->inp_lport != lport)
1588				continue;
1589
1590			/* XXX inp locking */
1591			if (ifp && ifp->if_type == IFT_FAITH &&
1592			    (inp->inp_flags & INP_FAITH) == 0)
1593				continue;
1594
1595			injail = prison_flag(inp->inp_cred, PR_IP4);
1596			if (injail) {
1597				if (prison_check_ip4(inp->inp_cred,
1598				    &laddr) != 0)
1599					continue;
1600			} else {
1601				if (local_exact != NULL)
1602					continue;
1603			}
1604
1605			if (inp->inp_laddr.s_addr == laddr.s_addr) {
1606				if (injail)
1607					goto found;
1608				else
1609					local_exact = inp;
1610			} else if (inp->inp_laddr.s_addr == INADDR_ANY) {
1611#ifdef INET6
1612				/* XXX inp locking, NULL check */
1613				if (inp->inp_vflag & INP_IPV6PROTO)
1614					local_wild_mapped = inp;
1615				else
1616#endif
1617					if (injail)
1618						jail_wild = inp;
1619					else
1620						local_wild = inp;
1621			}
1622		} /* LIST_FOREACH */
1623		inp = jail_wild;
1624		if (inp == NULL)
1625			inp = local_exact;
1626		if (inp == NULL)
1627			inp = local_wild;
1628#ifdef INET6
1629		if (inp == NULL)
1630			inp = local_wild_mapped;
1631#endif
1632		if (inp != NULL)
1633			goto found;
1634	} /* if (lookupflags & INPLOOKUP_WILDCARD) */
1635	INP_GROUP_UNLOCK(pcbgroup);
1636	return (NULL);
1637
1638found:
1639	in_pcbref(inp);
1640	INP_GROUP_UNLOCK(pcbgroup);
1641	if (lookupflags & INPLOOKUP_WLOCKPCB) {
1642		INP_WLOCK(inp);
1643		if (in_pcbrele_wlocked(inp))
1644			return (NULL);
1645	} else if (lookupflags & INPLOOKUP_RLOCKPCB) {
1646		INP_RLOCK(inp);
1647		if (in_pcbrele_rlocked(inp))
1648			return (NULL);
1649	} else
1650		panic("%s: locking bug", __func__);
1651	return (inp);
1652}
1653#endif /* PCBGROUP */
1654
1655/*
1656 * Lookup PCB in hash list, using pcbinfo tables.  This variation assumes
1657 * that the caller has locked the hash list, and will not perform any further
1658 * locking or reference operations on either the hash list or the connection.
1659 */
1660static struct inpcb *
1661in_pcblookup_hash_locked(struct inpcbinfo *pcbinfo, struct in_addr faddr,
1662    u_int fport_arg, struct in_addr laddr, u_int lport_arg, int lookupflags,
1663    struct ifnet *ifp)
1664{
1665	struct inpcbhead *head;
1666	struct inpcb *inp, *tmpinp;
1667	u_short fport = fport_arg, lport = lport_arg;
1668
1669	KASSERT((lookupflags & ~(INPLOOKUP_WILDCARD)) == 0,
1670	    ("%s: invalid lookup flags %d", __func__, lookupflags));
1671
1672	INP_HASH_LOCK_ASSERT(pcbinfo);
1673
1674	/*
1675	 * First look for an exact match.
1676	 */
1677	tmpinp = NULL;
1678	head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
1679	    pcbinfo->ipi_hashmask)];
1680	LIST_FOREACH(inp, head, inp_hash) {
1681#ifdef INET6
1682		/* XXX inp locking */
1683		if ((inp->inp_vflag & INP_IPV4) == 0)
1684			continue;
1685#endif
1686		if (inp->inp_faddr.s_addr == faddr.s_addr &&
1687		    inp->inp_laddr.s_addr == laddr.s_addr &&
1688		    inp->inp_fport == fport &&
1689		    inp->inp_lport == lport) {
1690			/*
1691			 * XXX We should be able to directly return
1692			 * the inp here, without any checks.
1693			 * Well unless both bound with SO_REUSEPORT?
1694			 */
1695			if (prison_flag(inp->inp_cred, PR_IP4))
1696				return (inp);
1697			if (tmpinp == NULL)
1698				tmpinp = inp;
1699		}
1700	}
1701	if (tmpinp != NULL)
1702		return (tmpinp);
1703
1704	/*
1705	 * Then look for a wildcard match, if requested.
1706	 */
1707	if ((lookupflags & INPLOOKUP_WILDCARD) != 0) {
1708		struct inpcb *local_wild = NULL, *local_exact = NULL;
1709#ifdef INET6
1710		struct inpcb *local_wild_mapped = NULL;
1711#endif
1712		struct inpcb *jail_wild = NULL;
1713		int injail;
1714
1715		/*
1716		 * Order of socket selection - we always prefer jails.
1717		 *      1. jailed, non-wild.
1718		 *      2. jailed, wild.
1719		 *      3. non-jailed, non-wild.
1720		 *      4. non-jailed, wild.
1721		 */
1722
1723		head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport,
1724		    0, pcbinfo->ipi_hashmask)];
1725		LIST_FOREACH(inp, head, inp_hash) {
1726#ifdef INET6
1727			/* XXX inp locking */
1728			if ((inp->inp_vflag & INP_IPV4) == 0)
1729				continue;
1730#endif
1731			if (inp->inp_faddr.s_addr != INADDR_ANY ||
1732			    inp->inp_lport != lport)
1733				continue;
1734
1735			/* XXX inp locking */
1736			if (ifp && ifp->if_type == IFT_FAITH &&
1737			    (inp->inp_flags & INP_FAITH) == 0)
1738				continue;
1739
1740			injail = prison_flag(inp->inp_cred, PR_IP4);
1741			if (injail) {
1742				if (prison_check_ip4(inp->inp_cred,
1743				    &laddr) != 0)
1744					continue;
1745			} else {
1746				if (local_exact != NULL)
1747					continue;
1748			}
1749
1750			if (inp->inp_laddr.s_addr == laddr.s_addr) {
1751				if (injail)
1752					return (inp);
1753				else
1754					local_exact = inp;
1755			} else if (inp->inp_laddr.s_addr == INADDR_ANY) {
1756#ifdef INET6
1757				/* XXX inp locking, NULL check */
1758				if (inp->inp_vflag & INP_IPV6PROTO)
1759					local_wild_mapped = inp;
1760				else
1761#endif
1762					if (injail)
1763						jail_wild = inp;
1764					else
1765						local_wild = inp;
1766			}
1767		} /* LIST_FOREACH */
1768		if (jail_wild != NULL)
1769			return (jail_wild);
1770		if (local_exact != NULL)
1771			return (local_exact);
1772		if (local_wild != NULL)
1773			return (local_wild);
1774#ifdef INET6
1775		if (local_wild_mapped != NULL)
1776			return (local_wild_mapped);
1777#endif
1778	} /* if ((lookupflags & INPLOOKUP_WILDCARD) != 0) */
1779
1780	return (NULL);
1781}
1782
1783/*
1784 * Lookup PCB in hash list, using pcbinfo tables.  This variation locks the
1785 * hash list lock, and will return the inpcb locked (i.e., requires
1786 * INPLOOKUP_LOCKPCB).
1787 */
1788static struct inpcb *
1789in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr,
1790    u_int fport, struct in_addr laddr, u_int lport, int lookupflags,
1791    struct ifnet *ifp)
1792{
1793	struct inpcb *inp;
1794
1795	INP_HASH_RLOCK(pcbinfo);
1796	inp = in_pcblookup_hash_locked(pcbinfo, faddr, fport, laddr, lport,
1797	    (lookupflags & ~(INPLOOKUP_RLOCKPCB | INPLOOKUP_WLOCKPCB)), ifp);
1798	if (inp != NULL) {
1799		in_pcbref(inp);
1800		INP_HASH_RUNLOCK(pcbinfo);
1801		if (lookupflags & INPLOOKUP_WLOCKPCB) {
1802			INP_WLOCK(inp);
1803			if (in_pcbrele_wlocked(inp))
1804				return (NULL);
1805		} else if (lookupflags & INPLOOKUP_RLOCKPCB) {
1806			INP_RLOCK(inp);
1807			if (in_pcbrele_rlocked(inp))
1808				return (NULL);
1809		} else
1810			panic("%s: locking bug", __func__);
1811	} else
1812		INP_HASH_RUNLOCK(pcbinfo);
1813	return (inp);
1814}
1815
1816/*
1817 * Public inpcb lookup routines, accepting a 4-tuple, and optionally, an mbuf
1818 * from which a pre-calculated hash value may be extracted.
1819 *
1820 * Possibly more of this logic should be in in_pcbgroup.c.
1821 */
1822struct inpcb *
1823in_pcblookup(struct inpcbinfo *pcbinfo, struct in_addr faddr, u_int fport,
1824    struct in_addr laddr, u_int lport, int lookupflags, struct ifnet *ifp)
1825{
1826#if defined(PCBGROUP)
1827	struct inpcbgroup *pcbgroup;
1828#endif
1829
1830	KASSERT((lookupflags & ~INPLOOKUP_MASK) == 0,
1831	    ("%s: invalid lookup flags %d", __func__, lookupflags));
1832	KASSERT((lookupflags & (INPLOOKUP_RLOCKPCB | INPLOOKUP_WLOCKPCB)) != 0,
1833	    ("%s: LOCKPCB not set", __func__));
1834
1835#if defined(PCBGROUP)
1836	if (in_pcbgroup_enabled(pcbinfo)) {
1837		pcbgroup = in_pcbgroup_bytuple(pcbinfo, laddr, lport, faddr,
1838		    fport);
1839		return (in_pcblookup_group(pcbinfo, pcbgroup, faddr, fport,
1840		    laddr, lport, lookupflags, ifp));
1841	}
1842#endif
1843	return (in_pcblookup_hash(pcbinfo, faddr, fport, laddr, lport,
1844	    lookupflags, ifp));
1845}
1846
1847struct inpcb *
1848in_pcblookup_mbuf(struct inpcbinfo *pcbinfo, struct in_addr faddr,
1849    u_int fport, struct in_addr laddr, u_int lport, int lookupflags,
1850    struct ifnet *ifp, struct mbuf *m)
1851{
1852#ifdef PCBGROUP
1853	struct inpcbgroup *pcbgroup;
1854#endif
1855
1856	KASSERT((lookupflags & ~INPLOOKUP_MASK) == 0,
1857	    ("%s: invalid lookup flags %d", __func__, lookupflags));
1858	KASSERT((lookupflags & (INPLOOKUP_RLOCKPCB | INPLOOKUP_WLOCKPCB)) != 0,
1859	    ("%s: LOCKPCB not set", __func__));
1860
1861#ifdef PCBGROUP
1862	if (in_pcbgroup_enabled(pcbinfo)) {
1863		pcbgroup = in_pcbgroup_byhash(pcbinfo, M_HASHTYPE_GET(m),
1864		    m->m_pkthdr.flowid);
1865		if (pcbgroup != NULL)
1866			return (in_pcblookup_group(pcbinfo, pcbgroup, faddr,
1867			    fport, laddr, lport, lookupflags, ifp));
1868		pcbgroup = in_pcbgroup_bytuple(pcbinfo, laddr, lport, faddr,
1869		    fport);
1870		return (in_pcblookup_group(pcbinfo, pcbgroup, faddr, fport,
1871		    laddr, lport, lookupflags, ifp));
1872	}
1873#endif
1874	return (in_pcblookup_hash(pcbinfo, faddr, fport, laddr, lport,
1875	    lookupflags, ifp));
1876}
1877#endif /* INET */
1878
1879/*
1880 * Insert PCB onto various hash lists.
1881 */
1882static int
1883in_pcbinshash_internal(struct inpcb *inp, int do_pcbgroup_update)
1884{
1885	struct inpcbhead *pcbhash;
1886	struct inpcbporthead *pcbporthash;
1887	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
1888	struct inpcbport *phd;
1889	u_int32_t hashkey_faddr;
1890
1891	INP_WLOCK_ASSERT(inp);
1892	INP_HASH_WLOCK_ASSERT(pcbinfo);
1893
1894	KASSERT((inp->inp_flags & INP_INHASHLIST) == 0,
1895	    ("in_pcbinshash: INP_INHASHLIST"));
1896
1897#ifdef INET6
1898	if (inp->inp_vflag & INP_IPV6)
1899		hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
1900	else
1901#endif
1902	hashkey_faddr = inp->inp_faddr.s_addr;
1903
1904	pcbhash = &pcbinfo->ipi_hashbase[INP_PCBHASH(hashkey_faddr,
1905		 inp->inp_lport, inp->inp_fport, pcbinfo->ipi_hashmask)];
1906
1907	pcbporthash = &pcbinfo->ipi_porthashbase[
1908	    INP_PCBPORTHASH(inp->inp_lport, pcbinfo->ipi_porthashmask)];
1909
1910	/*
1911	 * Go through port list and look for a head for this lport.
1912	 */
1913	LIST_FOREACH(phd, pcbporthash, phd_hash) {
1914		if (phd->phd_port == inp->inp_lport)
1915			break;
1916	}
1917	/*
1918	 * If none exists, malloc one and tack it on.
1919	 */
1920	if (phd == NULL) {
1921		phd = malloc(sizeof(struct inpcbport), M_PCB, M_NOWAIT);
1922		if (phd == NULL) {
1923			return (ENOBUFS); /* XXX */
1924		}
1925		phd->phd_port = inp->inp_lport;
1926		LIST_INIT(&phd->phd_pcblist);
1927		LIST_INSERT_HEAD(pcbporthash, phd, phd_hash);
1928	}
1929	inp->inp_phd = phd;
1930	LIST_INSERT_HEAD(&phd->phd_pcblist, inp, inp_portlist);
1931	LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
1932	inp->inp_flags |= INP_INHASHLIST;
1933#ifdef PCBGROUP
1934	if (do_pcbgroup_update)
1935		in_pcbgroup_update(inp);
1936#endif
1937	return (0);
1938}
1939
1940/*
1941 * For now, there are two public interfaces to insert an inpcb into the hash
1942 * lists -- one that does update pcbgroups, and one that doesn't.  The latter
1943 * is used only in the TCP syncache, where in_pcbinshash is called before the
1944 * full 4-tuple is set for the inpcb, and we don't want to install in the
1945 * pcbgroup until later.
1946 *
1947 * XXXRW: This seems like a misfeature.  in_pcbinshash should always update
1948 * connection groups, and partially initialised inpcbs should not be exposed
1949 * to either reservation hash tables or pcbgroups.
1950 */
1951int
1952in_pcbinshash(struct inpcb *inp)
1953{
1954
1955	return (in_pcbinshash_internal(inp, 1));
1956}
1957
1958int
1959in_pcbinshash_nopcbgroup(struct inpcb *inp)
1960{
1961
1962	return (in_pcbinshash_internal(inp, 0));
1963}
1964
1965/*
1966 * Move PCB to the proper hash bucket when { faddr, fport } have  been
1967 * changed. NOTE: This does not handle the case of the lport changing (the
1968 * hashed port list would have to be updated as well), so the lport must
1969 * not change after in_pcbinshash() has been called.
1970 */
1971void
1972in_pcbrehash_mbuf(struct inpcb *inp, struct mbuf *m)
1973{
1974	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
1975	struct inpcbhead *head;
1976	u_int32_t hashkey_faddr;
1977
1978	INP_WLOCK_ASSERT(inp);
1979	INP_HASH_WLOCK_ASSERT(pcbinfo);
1980
1981	KASSERT(inp->inp_flags & INP_INHASHLIST,
1982	    ("in_pcbrehash: !INP_INHASHLIST"));
1983
1984#ifdef INET6
1985	if (inp->inp_vflag & INP_IPV6)
1986		hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
1987	else
1988#endif
1989	hashkey_faddr = inp->inp_faddr.s_addr;
1990
1991	head = &pcbinfo->ipi_hashbase[INP_PCBHASH(hashkey_faddr,
1992		inp->inp_lport, inp->inp_fport, pcbinfo->ipi_hashmask)];
1993
1994	LIST_REMOVE(inp, inp_hash);
1995	LIST_INSERT_HEAD(head, inp, inp_hash);
1996
1997#ifdef PCBGROUP
1998	if (m != NULL)
1999		in_pcbgroup_update_mbuf(inp, m);
2000	else
2001		in_pcbgroup_update(inp);
2002#endif
2003}
2004
2005void
2006in_pcbrehash(struct inpcb *inp)
2007{
2008
2009	in_pcbrehash_mbuf(inp, NULL);
2010}
2011
2012/*
2013 * Remove PCB from various lists.
2014 */
2015static void
2016in_pcbremlists(struct inpcb *inp)
2017{
2018	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
2019
2020	INP_INFO_WLOCK_ASSERT(pcbinfo);
2021	INP_WLOCK_ASSERT(inp);
2022
2023	inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
2024	if (inp->inp_flags & INP_INHASHLIST) {
2025		struct inpcbport *phd = inp->inp_phd;
2026
2027		INP_HASH_WLOCK(pcbinfo);
2028		LIST_REMOVE(inp, inp_hash);
2029		LIST_REMOVE(inp, inp_portlist);
2030		if (LIST_FIRST(&phd->phd_pcblist) == NULL) {
2031			LIST_REMOVE(phd, phd_hash);
2032			free(phd, M_PCB);
2033		}
2034		INP_HASH_WUNLOCK(pcbinfo);
2035		inp->inp_flags &= ~INP_INHASHLIST;
2036	}
2037	LIST_REMOVE(inp, inp_list);
2038	pcbinfo->ipi_count--;
2039#ifdef PCBGROUP
2040	in_pcbgroup_remove(inp);
2041#endif
2042}
2043
2044/*
2045 * A set label operation has occurred at the socket layer, propagate the
2046 * label change into the in_pcb for the socket.
2047 */
2048void
2049in_pcbsosetlabel(struct socket *so)
2050{
2051#ifdef MAC
2052	struct inpcb *inp;
2053
2054	inp = sotoinpcb(so);
2055	KASSERT(inp != NULL, ("in_pcbsosetlabel: so->so_pcb == NULL"));
2056
2057	INP_WLOCK(inp);
2058	SOCK_LOCK(so);
2059	mac_inpcb_sosetlabel(so, inp);
2060	SOCK_UNLOCK(so);
2061	INP_WUNLOCK(inp);
2062#endif
2063}
2064
2065/*
2066 * ipport_tick runs once per second, determining if random port allocation
2067 * should be continued.  If more than ipport_randomcps ports have been
2068 * allocated in the last second, then we return to sequential port
2069 * allocation. We return to random allocation only once we drop below
2070 * ipport_randomcps for at least ipport_randomtime seconds.
2071 */
2072static void
2073ipport_tick(void *xtp)
2074{
2075	VNET_ITERATOR_DECL(vnet_iter);
2076
2077	VNET_LIST_RLOCK_NOSLEEP();
2078	VNET_FOREACH(vnet_iter) {
2079		CURVNET_SET(vnet_iter);	/* XXX appease INVARIANTS here */
2080		if (V_ipport_tcpallocs <=
2081		    V_ipport_tcplastcount + V_ipport_randomcps) {
2082			if (V_ipport_stoprandom > 0)
2083				V_ipport_stoprandom--;
2084		} else
2085			V_ipport_stoprandom = V_ipport_randomtime;
2086		V_ipport_tcplastcount = V_ipport_tcpallocs;
2087		CURVNET_RESTORE();
2088	}
2089	VNET_LIST_RUNLOCK_NOSLEEP();
2090	callout_reset(&ipport_tick_callout, hz, ipport_tick, NULL);
2091}
2092
2093static void
2094ip_fini(void *xtp)
2095{
2096
2097	callout_stop(&ipport_tick_callout);
2098}
2099
2100/*
2101 * The ipport_callout should start running at about the time we attach the
2102 * inet or inet6 domains.
2103 */
2104static void
2105ipport_tick_init(const void *unused __unused)
2106{
2107
2108	/* Start ipport_tick. */
2109	callout_init(&ipport_tick_callout, CALLOUT_MPSAFE);
2110	callout_reset(&ipport_tick_callout, 1, ipport_tick, NULL);
2111	EVENTHANDLER_REGISTER(shutdown_pre_sync, ip_fini, NULL,
2112		SHUTDOWN_PRI_DEFAULT);
2113}
2114SYSINIT(ipport_tick_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE,
2115    ipport_tick_init, NULL);
2116
2117void
2118inp_wlock(struct inpcb *inp)
2119{
2120
2121	INP_WLOCK(inp);
2122}
2123
2124void
2125inp_wunlock(struct inpcb *inp)
2126{
2127
2128	INP_WUNLOCK(inp);
2129}
2130
2131void
2132inp_rlock(struct inpcb *inp)
2133{
2134
2135	INP_RLOCK(inp);
2136}
2137
2138void
2139inp_runlock(struct inpcb *inp)
2140{
2141
2142	INP_RUNLOCK(inp);
2143}
2144
2145#ifdef INVARIANTS
2146void
2147inp_lock_assert(struct inpcb *inp)
2148{
2149
2150	INP_WLOCK_ASSERT(inp);
2151}
2152
2153void
2154inp_unlock_assert(struct inpcb *inp)
2155{
2156
2157	INP_UNLOCK_ASSERT(inp);
2158}
2159#endif
2160
2161void
2162inp_apply_all(void (*func)(struct inpcb *, void *), void *arg)
2163{
2164	struct inpcb *inp;
2165
2166	INP_INFO_RLOCK(&V_tcbinfo);
2167	LIST_FOREACH(inp, V_tcbinfo.ipi_listhead, inp_list) {
2168		INP_WLOCK(inp);
2169		func(inp, arg);
2170		INP_WUNLOCK(inp);
2171	}
2172	INP_INFO_RUNLOCK(&V_tcbinfo);
2173}
2174
2175struct socket *
2176inp_inpcbtosocket(struct inpcb *inp)
2177{
2178
2179	INP_WLOCK_ASSERT(inp);
2180	return (inp->inp_socket);
2181}
2182
2183struct tcpcb *
2184inp_inpcbtotcpcb(struct inpcb *inp)
2185{
2186
2187	INP_WLOCK_ASSERT(inp);
2188	return ((struct tcpcb *)inp->inp_ppcb);
2189}
2190
2191int
2192inp_ip_tos_get(const struct inpcb *inp)
2193{
2194
2195	return (inp->inp_ip_tos);
2196}
2197
2198void
2199inp_ip_tos_set(struct inpcb *inp, int val)
2200{
2201
2202	inp->inp_ip_tos = val;
2203}
2204
2205void
2206inp_4tuple_get(struct inpcb *inp, uint32_t *laddr, uint16_t *lp,
2207    uint32_t *faddr, uint16_t *fp)
2208{
2209
2210	INP_LOCK_ASSERT(inp);
2211	*laddr = inp->inp_laddr.s_addr;
2212	*faddr = inp->inp_faddr.s_addr;
2213	*lp = inp->inp_lport;
2214	*fp = inp->inp_fport;
2215}
2216
2217struct inpcb *
2218so_sotoinpcb(struct socket *so)
2219{
2220
2221	return (sotoinpcb(so));
2222}
2223
2224struct tcpcb *
2225so_sototcpcb(struct socket *so)
2226{
2227
2228	return (sototcpcb(so));
2229}
2230
2231#ifdef DDB
2232static void
2233db_print_indent(int indent)
2234{
2235	int i;
2236
2237	for (i = 0; i < indent; i++)
2238		db_printf(" ");
2239}
2240
2241static void
2242db_print_inconninfo(struct in_conninfo *inc, const char *name, int indent)
2243{
2244	char faddr_str[48], laddr_str[48];
2245
2246	db_print_indent(indent);
2247	db_printf("%s at %p\n", name, inc);
2248
2249	indent += 2;
2250
2251#ifdef INET6
2252	if (inc->inc_flags & INC_ISIPV6) {
2253		/* IPv6. */
2254		ip6_sprintf(laddr_str, &inc->inc6_laddr);
2255		ip6_sprintf(faddr_str, &inc->inc6_faddr);
2256	} else
2257#endif
2258	{
2259		/* IPv4. */
2260		inet_ntoa_r(inc->inc_laddr, laddr_str);
2261		inet_ntoa_r(inc->inc_faddr, faddr_str);
2262	}
2263	db_print_indent(indent);
2264	db_printf("inc_laddr %s   inc_lport %u\n", laddr_str,
2265	    ntohs(inc->inc_lport));
2266	db_print_indent(indent);
2267	db_printf("inc_faddr %s   inc_fport %u\n", faddr_str,
2268	    ntohs(inc->inc_fport));
2269}
2270
2271static void
2272db_print_inpflags(int inp_flags)
2273{
2274	int comma;
2275
2276	comma = 0;
2277	if (inp_flags & INP_RECVOPTS) {
2278		db_printf("%sINP_RECVOPTS", comma ? ", " : "");
2279		comma = 1;
2280	}
2281	if (inp_flags & INP_RECVRETOPTS) {
2282		db_printf("%sINP_RECVRETOPTS", comma ? ", " : "");
2283		comma = 1;
2284	}
2285	if (inp_flags & INP_RECVDSTADDR) {
2286		db_printf("%sINP_RECVDSTADDR", comma ? ", " : "");
2287		comma = 1;
2288	}
2289	if (inp_flags & INP_HDRINCL) {
2290		db_printf("%sINP_HDRINCL", comma ? ", " : "");
2291		comma = 1;
2292	}
2293	if (inp_flags & INP_HIGHPORT) {
2294		db_printf("%sINP_HIGHPORT", comma ? ", " : "");
2295		comma = 1;
2296	}
2297	if (inp_flags & INP_LOWPORT) {
2298		db_printf("%sINP_LOWPORT", comma ? ", " : "");
2299		comma = 1;
2300	}
2301	if (inp_flags & INP_ANONPORT) {
2302		db_printf("%sINP_ANONPORT", comma ? ", " : "");
2303		comma = 1;
2304	}
2305	if (inp_flags & INP_RECVIF) {
2306		db_printf("%sINP_RECVIF", comma ? ", " : "");
2307		comma = 1;
2308	}
2309	if (inp_flags & INP_MTUDISC) {
2310		db_printf("%sINP_MTUDISC", comma ? ", " : "");
2311		comma = 1;
2312	}
2313	if (inp_flags & INP_FAITH) {
2314		db_printf("%sINP_FAITH", comma ? ", " : "");
2315		comma = 1;
2316	}
2317	if (inp_flags & INP_RECVTTL) {
2318		db_printf("%sINP_RECVTTL", comma ? ", " : "");
2319		comma = 1;
2320	}
2321	if (inp_flags & INP_DONTFRAG) {
2322		db_printf("%sINP_DONTFRAG", comma ? ", " : "");
2323		comma = 1;
2324	}
2325	if (inp_flags & INP_RECVTOS) {
2326		db_printf("%sINP_RECVTOS", comma ? ", " : "");
2327		comma = 1;
2328	}
2329	if (inp_flags & IN6P_IPV6_V6ONLY) {
2330		db_printf("%sIN6P_IPV6_V6ONLY", comma ? ", " : "");
2331		comma = 1;
2332	}
2333	if (inp_flags & IN6P_PKTINFO) {
2334		db_printf("%sIN6P_PKTINFO", comma ? ", " : "");
2335		comma = 1;
2336	}
2337	if (inp_flags & IN6P_HOPLIMIT) {
2338		db_printf("%sIN6P_HOPLIMIT", comma ? ", " : "");
2339		comma = 1;
2340	}
2341	if (inp_flags & IN6P_HOPOPTS) {
2342		db_printf("%sIN6P_HOPOPTS", comma ? ", " : "");
2343		comma = 1;
2344	}
2345	if (inp_flags & IN6P_DSTOPTS) {
2346		db_printf("%sIN6P_DSTOPTS", comma ? ", " : "");
2347		comma = 1;
2348	}
2349	if (inp_flags & IN6P_RTHDR) {
2350		db_printf("%sIN6P_RTHDR", comma ? ", " : "");
2351		comma = 1;
2352	}
2353	if (inp_flags & IN6P_RTHDRDSTOPTS) {
2354		db_printf("%sIN6P_RTHDRDSTOPTS", comma ? ", " : "");
2355		comma = 1;
2356	}
2357	if (inp_flags & IN6P_TCLASS) {
2358		db_printf("%sIN6P_TCLASS", comma ? ", " : "");
2359		comma = 1;
2360	}
2361	if (inp_flags & IN6P_AUTOFLOWLABEL) {
2362		db_printf("%sIN6P_AUTOFLOWLABEL", comma ? ", " : "");
2363		comma = 1;
2364	}
2365	if (inp_flags & INP_TIMEWAIT) {
2366		db_printf("%sINP_TIMEWAIT", comma ? ", " : "");
2367		comma  = 1;
2368	}
2369	if (inp_flags & INP_ONESBCAST) {
2370		db_printf("%sINP_ONESBCAST", comma ? ", " : "");
2371		comma  = 1;
2372	}
2373	if (inp_flags & INP_DROPPED) {
2374		db_printf("%sINP_DROPPED", comma ? ", " : "");
2375		comma  = 1;
2376	}
2377	if (inp_flags & INP_SOCKREF) {
2378		db_printf("%sINP_SOCKREF", comma ? ", " : "");
2379		comma  = 1;
2380	}
2381	if (inp_flags & IN6P_RFC2292) {
2382		db_printf("%sIN6P_RFC2292", comma ? ", " : "");
2383		comma = 1;
2384	}
2385	if (inp_flags & IN6P_MTU) {
2386		db_printf("IN6P_MTU%s", comma ? ", " : "");
2387		comma = 1;
2388	}
2389}
2390
2391static void
2392db_print_inpvflag(u_char inp_vflag)
2393{
2394	int comma;
2395
2396	comma = 0;
2397	if (inp_vflag & INP_IPV4) {
2398		db_printf("%sINP_IPV4", comma ? ", " : "");
2399		comma  = 1;
2400	}
2401	if (inp_vflag & INP_IPV6) {
2402		db_printf("%sINP_IPV6", comma ? ", " : "");
2403		comma  = 1;
2404	}
2405	if (inp_vflag & INP_IPV6PROTO) {
2406		db_printf("%sINP_IPV6PROTO", comma ? ", " : "");
2407		comma  = 1;
2408	}
2409}
2410
2411static void
2412db_print_inpcb(struct inpcb *inp, const char *name, int indent)
2413{
2414
2415	db_print_indent(indent);
2416	db_printf("%s at %p\n", name, inp);
2417
2418	indent += 2;
2419
2420	db_print_indent(indent);
2421	db_printf("inp_flow: 0x%x\n", inp->inp_flow);
2422
2423	db_print_inconninfo(&inp->inp_inc, "inp_conninfo", indent);
2424
2425	db_print_indent(indent);
2426	db_printf("inp_ppcb: %p   inp_pcbinfo: %p   inp_socket: %p\n",
2427	    inp->inp_ppcb, inp->inp_pcbinfo, inp->inp_socket);
2428
2429	db_print_indent(indent);
2430	db_printf("inp_label: %p   inp_flags: 0x%x (",
2431	   inp->inp_label, inp->inp_flags);
2432	db_print_inpflags(inp->inp_flags);
2433	db_printf(")\n");
2434
2435	db_print_indent(indent);
2436	db_printf("inp_sp: %p   inp_vflag: 0x%x (", inp->inp_sp,
2437	    inp->inp_vflag);
2438	db_print_inpvflag(inp->inp_vflag);
2439	db_printf(")\n");
2440
2441	db_print_indent(indent);
2442	db_printf("inp_ip_ttl: %d   inp_ip_p: %d   inp_ip_minttl: %d\n",
2443	    inp->inp_ip_ttl, inp->inp_ip_p, inp->inp_ip_minttl);
2444
2445	db_print_indent(indent);
2446#ifdef INET6
2447	if (inp->inp_vflag & INP_IPV6) {
2448		db_printf("in6p_options: %p   in6p_outputopts: %p   "
2449		    "in6p_moptions: %p\n", inp->in6p_options,
2450		    inp->in6p_outputopts, inp->in6p_moptions);
2451		db_printf("in6p_icmp6filt: %p   in6p_cksum %d   "
2452		    "in6p_hops %u\n", inp->in6p_icmp6filt, inp->in6p_cksum,
2453		    inp->in6p_hops);
2454	} else
2455#endif
2456	{
2457		db_printf("inp_ip_tos: %d   inp_ip_options: %p   "
2458		    "inp_ip_moptions: %p\n", inp->inp_ip_tos,
2459		    inp->inp_options, inp->inp_moptions);
2460	}
2461
2462	db_print_indent(indent);
2463	db_printf("inp_phd: %p   inp_gencnt: %ju\n", inp->inp_phd,
2464	    (uintmax_t)inp->inp_gencnt);
2465}
2466
2467DB_SHOW_COMMAND(inpcb, db_show_inpcb)
2468{
2469	struct inpcb *inp;
2470
2471	if (!have_addr) {
2472		db_printf("usage: show inpcb <addr>\n");
2473		return;
2474	}
2475	inp = (struct inpcb *)addr;
2476
2477	db_print_inpcb(inp, "inpcb", 0);
2478}
2479#endif /* DDB */
2480