Deleted Added
full compact
in_pcb.c (269280) in_pcb.c (271386)
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993, 1995
3 * The Regents of the University of California.
4 * Copyright (c) 2007-2009 Robert N. M. Watson
5 * Copyright (c) 2010-2011 Juniper Networks, Inc.
6 * All rights reserved.
7 *
8 * Portions of this software were developed by Robert N. M. Watson under
9 * contract to Juniper Networks, Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)in_pcb.c 8.4 (Berkeley) 5/24/95
36 */
37
38#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993, 1995
3 * The Regents of the University of California.
4 * Copyright (c) 2007-2009 Robert N. M. Watson
5 * Copyright (c) 2010-2011 Juniper Networks, Inc.
6 * All rights reserved.
7 *
8 * Portions of this software were developed by Robert N. M. Watson under
9 * contract to Juniper Networks, Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)in_pcb.c 8.4 (Berkeley) 5/24/95
36 */
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD: head/sys/netinet/in_pcb.c 269280 2014-07-29 23:42:51Z hiren $");
39__FBSDID("$FreeBSD: head/sys/netinet/in_pcb.c 271386 2014-09-10 12:35:42Z ae $");
40
41#include "opt_ddb.h"
42#include "opt_ipsec.h"
43#include "opt_inet.h"
44#include "opt_inet6.h"
45#include "opt_pcbgroup.h"
46#include "opt_rss.h"
47
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/malloc.h>
51#include <sys/mbuf.h>
52#include <sys/callout.h>
53#include <sys/domain.h>
54#include <sys/protosw.h>
55#include <sys/socket.h>
56#include <sys/socketvar.h>
57#include <sys/priv.h>
58#include <sys/proc.h>
59#include <sys/refcount.h>
60#include <sys/jail.h>
61#include <sys/kernel.h>
62#include <sys/sysctl.h>
63
64#ifdef DDB
65#include <ddb/ddb.h>
66#endif
67
68#include <vm/uma.h>
69
70#include <net/if.h>
71#include <net/if_var.h>
72#include <net/if_types.h>
73#include <net/route.h>
74#include <net/vnet.h>
75
76#if defined(INET) || defined(INET6)
77#include <netinet/in.h>
78#include <netinet/in_pcb.h>
79#include <netinet/in_rss.h>
80#include <netinet/ip_var.h>
81#include <netinet/tcp_var.h>
82#include <netinet/udp.h>
83#include <netinet/udp_var.h>
84#endif
85#ifdef INET
86#include <netinet/in_var.h>
87#endif
88#ifdef INET6
89#include <netinet/ip6.h>
90#include <netinet6/in6_pcb.h>
91#include <netinet6/in6_var.h>
92#include <netinet6/ip6_var.h>
93#endif /* INET6 */
94
95
96#ifdef IPSEC
97#include <netipsec/ipsec.h>
98#include <netipsec/key.h>
99#endif /* IPSEC */
100
101#include <security/mac/mac_framework.h>
102
103static struct callout ipport_tick_callout;
104
105/*
106 * These configure the range of local port addresses assigned to
107 * "unspecified" outgoing connections/packets/whatever.
108 */
109VNET_DEFINE(int, ipport_lowfirstauto) = IPPORT_RESERVED - 1; /* 1023 */
110VNET_DEFINE(int, ipport_lowlastauto) = IPPORT_RESERVEDSTART; /* 600 */
111VNET_DEFINE(int, ipport_firstauto) = IPPORT_EPHEMERALFIRST; /* 10000 */
112VNET_DEFINE(int, ipport_lastauto) = IPPORT_EPHEMERALLAST; /* 65535 */
113VNET_DEFINE(int, ipport_hifirstauto) = IPPORT_HIFIRSTAUTO; /* 49152 */
114VNET_DEFINE(int, ipport_hilastauto) = IPPORT_HILASTAUTO; /* 65535 */
115
116/*
117 * Reserved ports accessible only to root. There are significant
118 * security considerations that must be accounted for when changing these,
119 * but the security benefits can be great. Please be careful.
120 */
121VNET_DEFINE(int, ipport_reservedhigh) = IPPORT_RESERVED - 1; /* 1023 */
122VNET_DEFINE(int, ipport_reservedlow);
123
124/* Variables dealing with random ephemeral port allocation. */
125VNET_DEFINE(int, ipport_randomized) = 1; /* user controlled via sysctl */
126VNET_DEFINE(int, ipport_randomcps) = 10; /* user controlled via sysctl */
127VNET_DEFINE(int, ipport_randomtime) = 45; /* user controlled via sysctl */
128VNET_DEFINE(int, ipport_stoprandom); /* toggled by ipport_tick */
129VNET_DEFINE(int, ipport_tcpallocs);
130static VNET_DEFINE(int, ipport_tcplastcount);
131
132#define V_ipport_tcplastcount VNET(ipport_tcplastcount)
133
134static void in_pcbremlists(struct inpcb *inp);
135#ifdef INET
136static struct inpcb *in_pcblookup_hash_locked(struct inpcbinfo *pcbinfo,
137 struct in_addr faddr, u_int fport_arg,
138 struct in_addr laddr, u_int lport_arg,
139 int lookupflags, struct ifnet *ifp);
140
141#define RANGECHK(var, min, max) \
142 if ((var) < (min)) { (var) = (min); } \
143 else if ((var) > (max)) { (var) = (max); }
144
145static int
146sysctl_net_ipport_check(SYSCTL_HANDLER_ARGS)
147{
148 int error;
149
150 error = sysctl_handle_int(oidp, arg1, arg2, req);
151 if (error == 0) {
152 RANGECHK(V_ipport_lowfirstauto, 1, IPPORT_RESERVED - 1);
153 RANGECHK(V_ipport_lowlastauto, 1, IPPORT_RESERVED - 1);
154 RANGECHK(V_ipport_firstauto, IPPORT_RESERVED, IPPORT_MAX);
155 RANGECHK(V_ipport_lastauto, IPPORT_RESERVED, IPPORT_MAX);
156 RANGECHK(V_ipport_hifirstauto, IPPORT_RESERVED, IPPORT_MAX);
157 RANGECHK(V_ipport_hilastauto, IPPORT_RESERVED, IPPORT_MAX);
158 }
159 return (error);
160}
161
162#undef RANGECHK
163
164static SYSCTL_NODE(_net_inet_ip, IPPROTO_IP, portrange, CTLFLAG_RW, 0,
165 "IP Ports");
166
167SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, lowfirst,
168 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_lowfirstauto), 0,
169 &sysctl_net_ipport_check, "I", "");
170SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, lowlast,
171 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_lowlastauto), 0,
172 &sysctl_net_ipport_check, "I", "");
173SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, first,
174 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_firstauto), 0,
175 &sysctl_net_ipport_check, "I", "");
176SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, last,
177 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_lastauto), 0,
178 &sysctl_net_ipport_check, "I", "");
179SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, hifirst,
180 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_hifirstauto), 0,
181 &sysctl_net_ipport_check, "I", "");
182SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, hilast,
183 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_hilastauto), 0,
184 &sysctl_net_ipport_check, "I", "");
185SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, reservedhigh,
186 CTLFLAG_RW|CTLFLAG_SECURE, &VNET_NAME(ipport_reservedhigh), 0, "");
187SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, reservedlow,
188 CTLFLAG_RW|CTLFLAG_SECURE, &VNET_NAME(ipport_reservedlow), 0, "");
189SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, randomized, CTLFLAG_RW,
190 &VNET_NAME(ipport_randomized), 0, "Enable random port allocation");
191SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, randomcps, CTLFLAG_RW,
192 &VNET_NAME(ipport_randomcps), 0, "Maximum number of random port "
193 "allocations before switching to a sequental one");
194SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, randomtime, CTLFLAG_RW,
195 &VNET_NAME(ipport_randomtime), 0,
196 "Minimum time to keep sequental port "
197 "allocation before switching to a random one");
198#endif /* INET */
199
200/*
201 * in_pcb.c: manage the Protocol Control Blocks.
202 *
203 * NOTE: It is assumed that most of these functions will be called with
204 * the pcbinfo lock held, and often, the inpcb lock held, as these utility
205 * functions often modify hash chains or addresses in pcbs.
206 */
207
208/*
209 * Initialize an inpcbinfo -- we should be able to reduce the number of
210 * arguments in time.
211 */
212void
213in_pcbinfo_init(struct inpcbinfo *pcbinfo, const char *name,
214 struct inpcbhead *listhead, int hash_nelements, int porthash_nelements,
215 char *inpcbzone_name, uma_init inpcbzone_init, uma_fini inpcbzone_fini,
216 uint32_t inpcbzone_flags, u_int hashfields)
217{
218
219 INP_INFO_LOCK_INIT(pcbinfo, name);
220 INP_HASH_LOCK_INIT(pcbinfo, "pcbinfohash"); /* XXXRW: argument? */
221#ifdef VIMAGE
222 pcbinfo->ipi_vnet = curvnet;
223#endif
224 pcbinfo->ipi_listhead = listhead;
225 LIST_INIT(pcbinfo->ipi_listhead);
226 pcbinfo->ipi_count = 0;
227 pcbinfo->ipi_hashbase = hashinit(hash_nelements, M_PCB,
228 &pcbinfo->ipi_hashmask);
229 pcbinfo->ipi_porthashbase = hashinit(porthash_nelements, M_PCB,
230 &pcbinfo->ipi_porthashmask);
231#ifdef PCBGROUP
232 in_pcbgroup_init(pcbinfo, hashfields, hash_nelements);
233#endif
234 pcbinfo->ipi_zone = uma_zcreate(inpcbzone_name, sizeof(struct inpcb),
235 NULL, NULL, inpcbzone_init, inpcbzone_fini, UMA_ALIGN_PTR,
236 inpcbzone_flags);
237 uma_zone_set_max(pcbinfo->ipi_zone, maxsockets);
238 uma_zone_set_warning(pcbinfo->ipi_zone,
239 "kern.ipc.maxsockets limit reached");
240}
241
242/*
243 * Destroy an inpcbinfo.
244 */
245void
246in_pcbinfo_destroy(struct inpcbinfo *pcbinfo)
247{
248
249 KASSERT(pcbinfo->ipi_count == 0,
250 ("%s: ipi_count = %u", __func__, pcbinfo->ipi_count));
251
252 hashdestroy(pcbinfo->ipi_hashbase, M_PCB, pcbinfo->ipi_hashmask);
253 hashdestroy(pcbinfo->ipi_porthashbase, M_PCB,
254 pcbinfo->ipi_porthashmask);
255#ifdef PCBGROUP
256 in_pcbgroup_destroy(pcbinfo);
257#endif
258 uma_zdestroy(pcbinfo->ipi_zone);
259 INP_HASH_LOCK_DESTROY(pcbinfo);
260 INP_INFO_LOCK_DESTROY(pcbinfo);
261}
262
263/*
264 * Allocate a PCB and associate it with the socket.
265 * On success return with the PCB locked.
266 */
267int
268in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo)
269{
270 struct inpcb *inp;
271 int error;
272
273 INP_INFO_WLOCK_ASSERT(pcbinfo);
274 error = 0;
275 inp = uma_zalloc(pcbinfo->ipi_zone, M_NOWAIT);
276 if (inp == NULL)
277 return (ENOBUFS);
278 bzero(inp, inp_zero_size);
279 inp->inp_pcbinfo = pcbinfo;
280 inp->inp_socket = so;
281 inp->inp_cred = crhold(so->so_cred);
282 inp->inp_inc.inc_fibnum = so->so_fibnum;
283#ifdef MAC
284 error = mac_inpcb_init(inp, M_NOWAIT);
285 if (error != 0)
286 goto out;
287 mac_inpcb_create(so, inp);
288#endif
289#ifdef IPSEC
290 error = ipsec_init_policy(so, &inp->inp_sp);
291 if (error != 0) {
292#ifdef MAC
293 mac_inpcb_destroy(inp);
294#endif
295 goto out;
296 }
297#endif /*IPSEC*/
298#ifdef INET6
299 if (INP_SOCKAF(so) == AF_INET6) {
300 inp->inp_vflag |= INP_IPV6PROTO;
301 if (V_ip6_v6only)
302 inp->inp_flags |= IN6P_IPV6_V6ONLY;
303 }
304#endif
305 LIST_INSERT_HEAD(pcbinfo->ipi_listhead, inp, inp_list);
306 pcbinfo->ipi_count++;
307 so->so_pcb = (caddr_t)inp;
308#ifdef INET6
309 if (V_ip6_auto_flowlabel)
310 inp->inp_flags |= IN6P_AUTOFLOWLABEL;
311#endif
312 INP_WLOCK(inp);
313 inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
314 refcount_init(&inp->inp_refcount, 1); /* Reference from inpcbinfo */
315#if defined(IPSEC) || defined(MAC)
316out:
317 if (error != 0) {
318 crfree(inp->inp_cred);
319 uma_zfree(pcbinfo->ipi_zone, inp);
320 }
321#endif
322 return (error);
323}
324
325#ifdef INET
326int
327in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct ucred *cred)
328{
329 int anonport, error;
330
331 INP_WLOCK_ASSERT(inp);
332 INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo);
333
334 if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY)
335 return (EINVAL);
336 anonport = nam == NULL || ((struct sockaddr_in *)nam)->sin_port == 0;
337 error = in_pcbbind_setup(inp, nam, &inp->inp_laddr.s_addr,
338 &inp->inp_lport, cred);
339 if (error)
340 return (error);
341 if (in_pcbinshash(inp) != 0) {
342 inp->inp_laddr.s_addr = INADDR_ANY;
343 inp->inp_lport = 0;
344 return (EAGAIN);
345 }
346 if (anonport)
347 inp->inp_flags |= INP_ANONPORT;
348 return (0);
349}
350#endif
351
352/*
353 * Select a local port (number) to use.
354 */
355#if defined(INET) || defined(INET6)
356int
357in_pcb_lport(struct inpcb *inp, struct in_addr *laddrp, u_short *lportp,
358 struct ucred *cred, int lookupflags)
359{
360 struct inpcbinfo *pcbinfo;
361 struct inpcb *tmpinp;
362 unsigned short *lastport;
363 int count, dorandom, error;
364 u_short aux, first, last, lport;
365#ifdef INET
366 struct in_addr laddr;
367#endif
368
369 pcbinfo = inp->inp_pcbinfo;
370
371 /*
372 * Because no actual state changes occur here, a global write lock on
373 * the pcbinfo isn't required.
374 */
375 INP_LOCK_ASSERT(inp);
376 INP_HASH_LOCK_ASSERT(pcbinfo);
377
378 if (inp->inp_flags & INP_HIGHPORT) {
379 first = V_ipport_hifirstauto; /* sysctl */
380 last = V_ipport_hilastauto;
381 lastport = &pcbinfo->ipi_lasthi;
382 } else if (inp->inp_flags & INP_LOWPORT) {
383 error = priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT, 0);
384 if (error)
385 return (error);
386 first = V_ipport_lowfirstauto; /* 1023 */
387 last = V_ipport_lowlastauto; /* 600 */
388 lastport = &pcbinfo->ipi_lastlow;
389 } else {
390 first = V_ipport_firstauto; /* sysctl */
391 last = V_ipport_lastauto;
392 lastport = &pcbinfo->ipi_lastport;
393 }
394 /*
395 * For UDP(-Lite), use random port allocation as long as the user
396 * allows it. For TCP (and as of yet unknown) connections,
397 * use random port allocation only if the user allows it AND
398 * ipport_tick() allows it.
399 */
400 if (V_ipport_randomized &&
401 (!V_ipport_stoprandom || pcbinfo == &V_udbinfo ||
402 pcbinfo == &V_ulitecbinfo))
403 dorandom = 1;
404 else
405 dorandom = 0;
406 /*
407 * It makes no sense to do random port allocation if
408 * we have the only port available.
409 */
410 if (first == last)
411 dorandom = 0;
412 /* Make sure to not include UDP(-Lite) packets in the count. */
413 if (pcbinfo != &V_udbinfo || pcbinfo != &V_ulitecbinfo)
414 V_ipport_tcpallocs++;
415 /*
416 * Instead of having two loops further down counting up or down
417 * make sure that first is always <= last and go with only one
418 * code path implementing all logic.
419 */
420 if (first > last) {
421 aux = first;
422 first = last;
423 last = aux;
424 }
425
426#ifdef INET
427 /* Make the compiler happy. */
428 laddr.s_addr = 0;
429 if ((inp->inp_vflag & (INP_IPV4|INP_IPV6)) == INP_IPV4) {
430 KASSERT(laddrp != NULL, ("%s: laddrp NULL for v4 inp %p",
431 __func__, inp));
432 laddr = *laddrp;
433 }
434#endif
435 tmpinp = NULL; /* Make compiler happy. */
436 lport = *lportp;
437
438 if (dorandom)
439 *lastport = first + (arc4random() % (last - first));
440
441 count = last - first;
442
443 do {
444 if (count-- < 0) /* completely used? */
445 return (EADDRNOTAVAIL);
446 ++*lastport;
447 if (*lastport < first || *lastport > last)
448 *lastport = first;
449 lport = htons(*lastport);
450
451#ifdef INET6
452 if ((inp->inp_vflag & INP_IPV6) != 0)
453 tmpinp = in6_pcblookup_local(pcbinfo,
454 &inp->in6p_laddr, lport, lookupflags, cred);
455#endif
456#if defined(INET) && defined(INET6)
457 else
458#endif
459#ifdef INET
460 tmpinp = in_pcblookup_local(pcbinfo, laddr,
461 lport, lookupflags, cred);
462#endif
463 } while (tmpinp != NULL);
464
465#ifdef INET
466 if ((inp->inp_vflag & (INP_IPV4|INP_IPV6)) == INP_IPV4)
467 laddrp->s_addr = laddr.s_addr;
468#endif
469 *lportp = lport;
470
471 return (0);
472}
473
474/*
475 * Return cached socket options.
476 */
477short
478inp_so_options(const struct inpcb *inp)
479{
480 short so_options;
481
482 so_options = 0;
483
484 if ((inp->inp_flags2 & INP_REUSEPORT) != 0)
485 so_options |= SO_REUSEPORT;
486 if ((inp->inp_flags2 & INP_REUSEADDR) != 0)
487 so_options |= SO_REUSEADDR;
488 return (so_options);
489}
490#endif /* INET || INET6 */
491
492/*
493 * Check if a new BINDMULTI socket is allowed to be created.
494 *
495 * ni points to the new inp.
496 * oi points to the exisitng inp.
497 *
498 * This checks whether the existing inp also has BINDMULTI and
499 * whether the credentials match.
500 */
501int
502in_pcbbind_check_bindmulti(const struct inpcb *ni, const struct inpcb *oi)
503{
504 /* Check permissions match */
505 if ((ni->inp_flags2 & INP_BINDMULTI) &&
506 (ni->inp_cred->cr_uid !=
507 oi->inp_cred->cr_uid))
508 return (0);
509
510 /* Check the existing inp has BINDMULTI set */
511 if ((ni->inp_flags2 & INP_BINDMULTI) &&
512 ((oi->inp_flags2 & INP_BINDMULTI) == 0))
513 return (0);
514
515 /*
516 * We're okay - either INP_BINDMULTI isn't set on ni, or
517 * it is and it matches the checks.
518 */
519 return (1);
520}
521
522#ifdef INET
523/*
524 * Set up a bind operation on a PCB, performing port allocation
525 * as required, but do not actually modify the PCB. Callers can
526 * either complete the bind by setting inp_laddr/inp_lport and
527 * calling in_pcbinshash(), or they can just use the resulting
528 * port and address to authorise the sending of a once-off packet.
529 *
530 * On error, the values of *laddrp and *lportp are not changed.
531 */
532int
533in_pcbbind_setup(struct inpcb *inp, struct sockaddr *nam, in_addr_t *laddrp,
534 u_short *lportp, struct ucred *cred)
535{
536 struct socket *so = inp->inp_socket;
537 struct sockaddr_in *sin;
538 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
539 struct in_addr laddr;
540 u_short lport = 0;
541 int lookupflags = 0, reuseport = (so->so_options & SO_REUSEPORT);
542 int error;
543
544 /*
545 * No state changes, so read locks are sufficient here.
546 */
547 INP_LOCK_ASSERT(inp);
548 INP_HASH_LOCK_ASSERT(pcbinfo);
549
550 if (TAILQ_EMPTY(&V_in_ifaddrhead)) /* XXX broken! */
551 return (EADDRNOTAVAIL);
552 laddr.s_addr = *laddrp;
553 if (nam != NULL && laddr.s_addr != INADDR_ANY)
554 return (EINVAL);
555 if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0)
556 lookupflags = INPLOOKUP_WILDCARD;
557 if (nam == NULL) {
558 if ((error = prison_local_ip4(cred, &laddr)) != 0)
559 return (error);
560 } else {
561 sin = (struct sockaddr_in *)nam;
562 if (nam->sa_len != sizeof (*sin))
563 return (EINVAL);
564#ifdef notdef
565 /*
566 * We should check the family, but old programs
567 * incorrectly fail to initialize it.
568 */
569 if (sin->sin_family != AF_INET)
570 return (EAFNOSUPPORT);
571#endif
572 error = prison_local_ip4(cred, &sin->sin_addr);
573 if (error)
574 return (error);
575 if (sin->sin_port != *lportp) {
576 /* Don't allow the port to change. */
577 if (*lportp != 0)
578 return (EINVAL);
579 lport = sin->sin_port;
580 }
581 /* NB: lport is left as 0 if the port isn't being changed. */
582 if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
583 /*
584 * Treat SO_REUSEADDR as SO_REUSEPORT for multicast;
585 * allow complete duplication of binding if
586 * SO_REUSEPORT is set, or if SO_REUSEADDR is set
587 * and a multicast address is bound on both
588 * new and duplicated sockets.
589 */
590 if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) != 0)
591 reuseport = SO_REUSEADDR|SO_REUSEPORT;
592 } else if (sin->sin_addr.s_addr != INADDR_ANY) {
593 sin->sin_port = 0; /* yech... */
594 bzero(&sin->sin_zero, sizeof(sin->sin_zero));
595 /*
596 * Is the address a local IP address?
597 * If INP_BINDANY is set, then the socket may be bound
598 * to any endpoint address, local or not.
599 */
600 if ((inp->inp_flags & INP_BINDANY) == 0 &&
601 ifa_ifwithaddr_check((struct sockaddr *)sin) == 0)
602 return (EADDRNOTAVAIL);
603 }
604 laddr = sin->sin_addr;
605 if (lport) {
606 struct inpcb *t;
607 struct tcptw *tw;
608
609 /* GROSS */
610 if (ntohs(lport) <= V_ipport_reservedhigh &&
611 ntohs(lport) >= V_ipport_reservedlow &&
612 priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT,
613 0))
614 return (EACCES);
615 if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)) &&
616 priv_check_cred(inp->inp_cred,
617 PRIV_NETINET_REUSEPORT, 0) != 0) {
618 t = in_pcblookup_local(pcbinfo, sin->sin_addr,
619 lport, INPLOOKUP_WILDCARD, cred);
620 /*
621 * XXX
622 * This entire block sorely needs a rewrite.
623 */
624 if (t &&
625 ((inp->inp_flags2 & INP_BINDMULTI) == 0) &&
626 ((t->inp_flags & INP_TIMEWAIT) == 0) &&
627 (so->so_type != SOCK_STREAM ||
628 ntohl(t->inp_faddr.s_addr) == INADDR_ANY) &&
629 (ntohl(sin->sin_addr.s_addr) != INADDR_ANY ||
630 ntohl(t->inp_laddr.s_addr) != INADDR_ANY ||
631 (t->inp_flags2 & INP_REUSEPORT) == 0) &&
632 (inp->inp_cred->cr_uid !=
633 t->inp_cred->cr_uid))
634 return (EADDRINUSE);
635
636 /*
637 * If the socket is a BINDMULTI socket, then
638 * the credentials need to match and the
639 * original socket also has to have been bound
640 * with BINDMULTI.
641 */
642 if (t && (! in_pcbbind_check_bindmulti(inp, t)))
643 return (EADDRINUSE);
644 }
645 t = in_pcblookup_local(pcbinfo, sin->sin_addr,
646 lport, lookupflags, cred);
647 if (t && (t->inp_flags & INP_TIMEWAIT)) {
648 /*
649 * XXXRW: If an incpb has had its timewait
650 * state recycled, we treat the address as
651 * being in use (for now). This is better
652 * than a panic, but not desirable.
653 */
654 tw = intotw(t);
655 if (tw == NULL ||
656 (reuseport & tw->tw_so_options) == 0)
657 return (EADDRINUSE);
658 } else if (t &&
659 ((inp->inp_flags2 & INP_BINDMULTI) == 0) &&
660 (reuseport & inp_so_options(t)) == 0) {
661#ifdef INET6
662 if (ntohl(sin->sin_addr.s_addr) !=
663 INADDR_ANY ||
664 ntohl(t->inp_laddr.s_addr) !=
665 INADDR_ANY ||
666 (inp->inp_vflag & INP_IPV6PROTO) == 0 ||
667 (t->inp_vflag & INP_IPV6PROTO) == 0)
668#endif
669 return (EADDRINUSE);
670 if (t && (! in_pcbbind_check_bindmulti(inp, t)))
671 return (EADDRINUSE);
672 }
673 }
674 }
675 if (*lportp != 0)
676 lport = *lportp;
677 if (lport == 0) {
678 error = in_pcb_lport(inp, &laddr, &lport, cred, lookupflags);
679 if (error != 0)
680 return (error);
681
682 }
683 *laddrp = laddr.s_addr;
684 *lportp = lport;
685 return (0);
686}
687
688/*
689 * Connect from a socket to a specified address.
690 * Both address and port must be specified in argument sin.
691 * If don't have a local address for this socket yet,
692 * then pick one.
693 */
694int
695in_pcbconnect_mbuf(struct inpcb *inp, struct sockaddr *nam,
696 struct ucred *cred, struct mbuf *m)
697{
698 u_short lport, fport;
699 in_addr_t laddr, faddr;
700 int anonport, error;
701
702 INP_WLOCK_ASSERT(inp);
703 INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo);
704
705 lport = inp->inp_lport;
706 laddr = inp->inp_laddr.s_addr;
707 anonport = (lport == 0);
708 error = in_pcbconnect_setup(inp, nam, &laddr, &lport, &faddr, &fport,
709 NULL, cred);
710 if (error)
711 return (error);
712
713 /* Do the initial binding of the local address if required. */
714 if (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0) {
715 inp->inp_lport = lport;
716 inp->inp_laddr.s_addr = laddr;
717 if (in_pcbinshash(inp) != 0) {
718 inp->inp_laddr.s_addr = INADDR_ANY;
719 inp->inp_lport = 0;
720 return (EAGAIN);
721 }
722 }
723
724 /* Commit the remaining changes. */
725 inp->inp_lport = lport;
726 inp->inp_laddr.s_addr = laddr;
727 inp->inp_faddr.s_addr = faddr;
728 inp->inp_fport = fport;
729 in_pcbrehash_mbuf(inp, m);
730
731 if (anonport)
732 inp->inp_flags |= INP_ANONPORT;
733 return (0);
734}
735
736int
737in_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct ucred *cred)
738{
739
740 return (in_pcbconnect_mbuf(inp, nam, cred, NULL));
741}
742
743/*
744 * Do proper source address selection on an unbound socket in case
745 * of connect. Take jails into account as well.
746 */
747int
748in_pcbladdr(struct inpcb *inp, struct in_addr *faddr, struct in_addr *laddr,
749 struct ucred *cred)
750{
751 struct ifaddr *ifa;
752 struct sockaddr *sa;
753 struct sockaddr_in *sin;
754 struct route sro;
755 int error;
756
757 KASSERT(laddr != NULL, ("%s: laddr NULL", __func__));
758
759 /*
760 * Bypass source address selection and use the primary jail IP
761 * if requested.
762 */
763 if (cred != NULL && !prison_saddrsel_ip4(cred, laddr))
764 return (0);
765
766 error = 0;
767 bzero(&sro, sizeof(sro));
768
769 sin = (struct sockaddr_in *)&sro.ro_dst;
770 sin->sin_family = AF_INET;
771 sin->sin_len = sizeof(struct sockaddr_in);
772 sin->sin_addr.s_addr = faddr->s_addr;
773
774 /*
775 * If route is known our src addr is taken from the i/f,
776 * else punt.
777 *
778 * Find out route to destination.
779 */
780 if ((inp->inp_socket->so_options & SO_DONTROUTE) == 0)
781 in_rtalloc_ign(&sro, 0, inp->inp_inc.inc_fibnum);
782
783 /*
784 * If we found a route, use the address corresponding to
785 * the outgoing interface.
786 *
787 * Otherwise assume faddr is reachable on a directly connected
788 * network and try to find a corresponding interface to take
789 * the source address from.
790 */
791 if (sro.ro_rt == NULL || sro.ro_rt->rt_ifp == NULL) {
792 struct in_ifaddr *ia;
793 struct ifnet *ifp;
794
795 ia = ifatoia(ifa_ifwithdstaddr((struct sockaddr *)sin));
796 if (ia == NULL)
797 ia = ifatoia(ifa_ifwithnet((struct sockaddr *)sin, 0));
798 if (ia == NULL) {
799 error = ENETUNREACH;
800 goto done;
801 }
802
803 if (cred == NULL || !prison_flag(cred, PR_IP4)) {
804 laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
805 ifa_free(&ia->ia_ifa);
806 goto done;
807 }
808
809 ifp = ia->ia_ifp;
810 ifa_free(&ia->ia_ifa);
811 ia = NULL;
812 IF_ADDR_RLOCK(ifp);
813 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
814
815 sa = ifa->ifa_addr;
816 if (sa->sa_family != AF_INET)
817 continue;
818 sin = (struct sockaddr_in *)sa;
819 if (prison_check_ip4(cred, &sin->sin_addr) == 0) {
820 ia = (struct in_ifaddr *)ifa;
821 break;
822 }
823 }
824 if (ia != NULL) {
825 laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
826 IF_ADDR_RUNLOCK(ifp);
827 goto done;
828 }
829 IF_ADDR_RUNLOCK(ifp);
830
831 /* 3. As a last resort return the 'default' jail address. */
832 error = prison_get_ip4(cred, laddr);
833 goto done;
834 }
835
836 /*
837 * If the outgoing interface on the route found is not
838 * a loopback interface, use the address from that interface.
839 * In case of jails do those three steps:
840 * 1. check if the interface address belongs to the jail. If so use it.
841 * 2. check if we have any address on the outgoing interface
842 * belonging to this jail. If so use it.
843 * 3. as a last resort return the 'default' jail address.
844 */
845 if ((sro.ro_rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0) {
846 struct in_ifaddr *ia;
847 struct ifnet *ifp;
848
849 /* If not jailed, use the default returned. */
850 if (cred == NULL || !prison_flag(cred, PR_IP4)) {
851 ia = (struct in_ifaddr *)sro.ro_rt->rt_ifa;
852 laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
853 goto done;
854 }
855
856 /* Jailed. */
857 /* 1. Check if the iface address belongs to the jail. */
858 sin = (struct sockaddr_in *)sro.ro_rt->rt_ifa->ifa_addr;
859 if (prison_check_ip4(cred, &sin->sin_addr) == 0) {
860 ia = (struct in_ifaddr *)sro.ro_rt->rt_ifa;
861 laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
862 goto done;
863 }
864
865 /*
866 * 2. Check if we have any address on the outgoing interface
867 * belonging to this jail.
868 */
869 ia = NULL;
870 ifp = sro.ro_rt->rt_ifp;
871 IF_ADDR_RLOCK(ifp);
872 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
873 sa = ifa->ifa_addr;
874 if (sa->sa_family != AF_INET)
875 continue;
876 sin = (struct sockaddr_in *)sa;
877 if (prison_check_ip4(cred, &sin->sin_addr) == 0) {
878 ia = (struct in_ifaddr *)ifa;
879 break;
880 }
881 }
882 if (ia != NULL) {
883 laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
884 IF_ADDR_RUNLOCK(ifp);
885 goto done;
886 }
887 IF_ADDR_RUNLOCK(ifp);
888
889 /* 3. As a last resort return the 'default' jail address. */
890 error = prison_get_ip4(cred, laddr);
891 goto done;
892 }
893
894 /*
895 * The outgoing interface is marked with 'loopback net', so a route
896 * to ourselves is here.
897 * Try to find the interface of the destination address and then
898 * take the address from there. That interface is not necessarily
899 * a loopback interface.
900 * In case of jails, check that it is an address of the jail
901 * and if we cannot find, fall back to the 'default' jail address.
902 */
903 if ((sro.ro_rt->rt_ifp->if_flags & IFF_LOOPBACK) != 0) {
904 struct sockaddr_in sain;
905 struct in_ifaddr *ia;
906
907 bzero(&sain, sizeof(struct sockaddr_in));
908 sain.sin_family = AF_INET;
909 sain.sin_len = sizeof(struct sockaddr_in);
910 sain.sin_addr.s_addr = faddr->s_addr;
911
912 ia = ifatoia(ifa_ifwithdstaddr(sintosa(&sain)));
913 if (ia == NULL)
914 ia = ifatoia(ifa_ifwithnet(sintosa(&sain), 0));
915 if (ia == NULL)
916 ia = ifatoia(ifa_ifwithaddr(sintosa(&sain)));
917
918 if (cred == NULL || !prison_flag(cred, PR_IP4)) {
919 if (ia == NULL) {
920 error = ENETUNREACH;
921 goto done;
922 }
923 laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
924 ifa_free(&ia->ia_ifa);
925 goto done;
926 }
927
928 /* Jailed. */
929 if (ia != NULL) {
930 struct ifnet *ifp;
931
932 ifp = ia->ia_ifp;
933 ifa_free(&ia->ia_ifa);
934 ia = NULL;
935 IF_ADDR_RLOCK(ifp);
936 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
937
938 sa = ifa->ifa_addr;
939 if (sa->sa_family != AF_INET)
940 continue;
941 sin = (struct sockaddr_in *)sa;
942 if (prison_check_ip4(cred,
943 &sin->sin_addr) == 0) {
944 ia = (struct in_ifaddr *)ifa;
945 break;
946 }
947 }
948 if (ia != NULL) {
949 laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
950 IF_ADDR_RUNLOCK(ifp);
951 goto done;
952 }
953 IF_ADDR_RUNLOCK(ifp);
954 }
955
956 /* 3. As a last resort return the 'default' jail address. */
957 error = prison_get_ip4(cred, laddr);
958 goto done;
959 }
960
961done:
962 if (sro.ro_rt != NULL)
963 RTFREE(sro.ro_rt);
964 return (error);
965}
966
967/*
968 * Set up for a connect from a socket to the specified address.
969 * On entry, *laddrp and *lportp should contain the current local
970 * address and port for the PCB; these are updated to the values
971 * that should be placed in inp_laddr and inp_lport to complete
972 * the connect.
973 *
974 * On success, *faddrp and *fportp will be set to the remote address
975 * and port. These are not updated in the error case.
976 *
977 * If the operation fails because the connection already exists,
978 * *oinpp will be set to the PCB of that connection so that the
979 * caller can decide to override it. In all other cases, *oinpp
980 * is set to NULL.
981 */
982int
983in_pcbconnect_setup(struct inpcb *inp, struct sockaddr *nam,
984 in_addr_t *laddrp, u_short *lportp, in_addr_t *faddrp, u_short *fportp,
985 struct inpcb **oinpp, struct ucred *cred)
986{
987 struct sockaddr_in *sin = (struct sockaddr_in *)nam;
988 struct in_ifaddr *ia;
989 struct inpcb *oinp;
990 struct in_addr laddr, faddr;
991 u_short lport, fport;
992 int error;
993
994 /*
995 * Because a global state change doesn't actually occur here, a read
996 * lock is sufficient.
997 */
998 INP_LOCK_ASSERT(inp);
999 INP_HASH_LOCK_ASSERT(inp->inp_pcbinfo);
1000
1001 if (oinpp != NULL)
1002 *oinpp = NULL;
1003 if (nam->sa_len != sizeof (*sin))
1004 return (EINVAL);
1005 if (sin->sin_family != AF_INET)
1006 return (EAFNOSUPPORT);
1007 if (sin->sin_port == 0)
1008 return (EADDRNOTAVAIL);
1009 laddr.s_addr = *laddrp;
1010 lport = *lportp;
1011 faddr = sin->sin_addr;
1012 fport = sin->sin_port;
1013
1014 if (!TAILQ_EMPTY(&V_in_ifaddrhead)) {
1015 /*
1016 * If the destination address is INADDR_ANY,
1017 * use the primary local address.
1018 * If the supplied address is INADDR_BROADCAST,
1019 * and the primary interface supports broadcast,
1020 * choose the broadcast address for that interface.
1021 */
1022 if (faddr.s_addr == INADDR_ANY) {
1023 IN_IFADDR_RLOCK();
1024 faddr =
1025 IA_SIN(TAILQ_FIRST(&V_in_ifaddrhead))->sin_addr;
1026 IN_IFADDR_RUNLOCK();
1027 if (cred != NULL &&
1028 (error = prison_get_ip4(cred, &faddr)) != 0)
1029 return (error);
1030 } else if (faddr.s_addr == (u_long)INADDR_BROADCAST) {
1031 IN_IFADDR_RLOCK();
1032 if (TAILQ_FIRST(&V_in_ifaddrhead)->ia_ifp->if_flags &
1033 IFF_BROADCAST)
1034 faddr = satosin(&TAILQ_FIRST(
1035 &V_in_ifaddrhead)->ia_broadaddr)->sin_addr;
1036 IN_IFADDR_RUNLOCK();
1037 }
1038 }
1039 if (laddr.s_addr == INADDR_ANY) {
1040 error = in_pcbladdr(inp, &faddr, &laddr, cred);
1041 /*
1042 * If the destination address is multicast and an outgoing
1043 * interface has been set as a multicast option, prefer the
1044 * address of that interface as our source address.
1045 */
1046 if (IN_MULTICAST(ntohl(faddr.s_addr)) &&
1047 inp->inp_moptions != NULL) {
1048 struct ip_moptions *imo;
1049 struct ifnet *ifp;
1050
1051 imo = inp->inp_moptions;
1052 if (imo->imo_multicast_ifp != NULL) {
1053 ifp = imo->imo_multicast_ifp;
1054 IN_IFADDR_RLOCK();
1055 TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
1056 if ((ia->ia_ifp == ifp) &&
1057 (cred == NULL ||
1058 prison_check_ip4(cred,
1059 &ia->ia_addr.sin_addr) == 0))
1060 break;
1061 }
1062 if (ia == NULL)
1063 error = EADDRNOTAVAIL;
1064 else {
1065 laddr = ia->ia_addr.sin_addr;
1066 error = 0;
1067 }
1068 IN_IFADDR_RUNLOCK();
1069 }
1070 }
1071 if (error)
1072 return (error);
1073 }
1074 oinp = in_pcblookup_hash_locked(inp->inp_pcbinfo, faddr, fport,
1075 laddr, lport, 0, NULL);
1076 if (oinp != NULL) {
1077 if (oinpp != NULL)
1078 *oinpp = oinp;
1079 return (EADDRINUSE);
1080 }
1081 if (lport == 0) {
1082 error = in_pcbbind_setup(inp, NULL, &laddr.s_addr, &lport,
1083 cred);
1084 if (error)
1085 return (error);
1086 }
1087 *laddrp = laddr.s_addr;
1088 *lportp = lport;
1089 *faddrp = faddr.s_addr;
1090 *fportp = fport;
1091 return (0);
1092}
1093
1094void
1095in_pcbdisconnect(struct inpcb *inp)
1096{
1097
1098 INP_WLOCK_ASSERT(inp);
1099 INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo);
1100
1101 inp->inp_faddr.s_addr = INADDR_ANY;
1102 inp->inp_fport = 0;
1103 in_pcbrehash(inp);
1104}
1105#endif /* INET */
1106
1107/*
1108 * in_pcbdetach() is responsibe for disassociating a socket from an inpcb.
1109 * For most protocols, this will be invoked immediately prior to calling
1110 * in_pcbfree(). However, with TCP the inpcb may significantly outlive the
1111 * socket, in which case in_pcbfree() is deferred.
1112 */
1113void
1114in_pcbdetach(struct inpcb *inp)
1115{
1116
1117 KASSERT(inp->inp_socket != NULL, ("%s: inp_socket == NULL", __func__));
1118
1119 inp->inp_socket->so_pcb = NULL;
1120 inp->inp_socket = NULL;
1121}
1122
1123/*
1124 * in_pcbref() bumps the reference count on an inpcb in order to maintain
1125 * stability of an inpcb pointer despite the inpcb lock being released. This
1126 * is used in TCP when the inpcbinfo lock needs to be acquired or upgraded,
1127 * but where the inpcb lock may already held, or when acquiring a reference
1128 * via a pcbgroup.
1129 *
1130 * in_pcbref() should be used only to provide brief memory stability, and
1131 * must always be followed by a call to INP_WLOCK() and in_pcbrele() to
1132 * garbage collect the inpcb if it has been in_pcbfree()'d from another
1133 * context. Until in_pcbrele() has returned that the inpcb is still valid,
1134 * lock and rele are the *only* safe operations that may be performed on the
1135 * inpcb.
1136 *
1137 * While the inpcb will not be freed, releasing the inpcb lock means that the
1138 * connection's state may change, so the caller should be careful to
1139 * revalidate any cached state on reacquiring the lock. Drop the reference
1140 * using in_pcbrele().
1141 */
1142void
1143in_pcbref(struct inpcb *inp)
1144{
1145
1146 KASSERT(inp->inp_refcount > 0, ("%s: refcount 0", __func__));
1147
1148 refcount_acquire(&inp->inp_refcount);
1149}
1150
1151/*
1152 * Drop a refcount on an inpcb elevated using in_pcbref(); because a call to
1153 * in_pcbfree() may have been made between in_pcbref() and in_pcbrele(), we
1154 * return a flag indicating whether or not the inpcb remains valid. If it is
1155 * valid, we return with the inpcb lock held.
1156 *
1157 * Notice that, unlike in_pcbref(), the inpcb lock must be held to drop a
1158 * reference on an inpcb. Historically more work was done here (actually, in
1159 * in_pcbfree_internal()) but has been moved to in_pcbfree() to avoid the
1160 * need for the pcbinfo lock in in_pcbrele(). Deferring the free is entirely
1161 * about memory stability (and continued use of the write lock).
1162 */
1163int
1164in_pcbrele_rlocked(struct inpcb *inp)
1165{
1166 struct inpcbinfo *pcbinfo;
1167
1168 KASSERT(inp->inp_refcount > 0, ("%s: refcount 0", __func__));
1169
1170 INP_RLOCK_ASSERT(inp);
1171
1172 if (refcount_release(&inp->inp_refcount) == 0) {
1173 /*
1174 * If the inpcb has been freed, let the caller know, even if
1175 * this isn't the last reference.
1176 */
1177 if (inp->inp_flags2 & INP_FREED) {
1178 INP_RUNLOCK(inp);
1179 return (1);
1180 }
1181 return (0);
1182 }
1183
1184 KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__));
1185
1186 INP_RUNLOCK(inp);
1187 pcbinfo = inp->inp_pcbinfo;
1188 uma_zfree(pcbinfo->ipi_zone, inp);
1189 return (1);
1190}
1191
1192int
1193in_pcbrele_wlocked(struct inpcb *inp)
1194{
1195 struct inpcbinfo *pcbinfo;
1196
1197 KASSERT(inp->inp_refcount > 0, ("%s: refcount 0", __func__));
1198
1199 INP_WLOCK_ASSERT(inp);
1200
1201 if (refcount_release(&inp->inp_refcount) == 0)
1202 return (0);
1203
1204 KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__));
1205
1206 INP_WUNLOCK(inp);
1207 pcbinfo = inp->inp_pcbinfo;
1208 uma_zfree(pcbinfo->ipi_zone, inp);
1209 return (1);
1210}
1211
1212/*
1213 * Temporary wrapper.
1214 */
1215int
1216in_pcbrele(struct inpcb *inp)
1217{
1218
1219 return (in_pcbrele_wlocked(inp));
1220}
1221
1222/*
1223 * Unconditionally schedule an inpcb to be freed by decrementing its
1224 * reference count, which should occur only after the inpcb has been detached
1225 * from its socket. If another thread holds a temporary reference (acquired
1226 * using in_pcbref()) then the free is deferred until that reference is
1227 * released using in_pcbrele(), but the inpcb is still unlocked. Almost all
1228 * work, including removal from global lists, is done in this context, where
1229 * the pcbinfo lock is held.
1230 */
1231void
1232in_pcbfree(struct inpcb *inp)
1233{
1234 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
1235
1236 KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__));
1237
1238 INP_INFO_WLOCK_ASSERT(pcbinfo);
1239 INP_WLOCK_ASSERT(inp);
1240
1241 /* XXXRW: Do as much as possible here. */
1242#ifdef IPSEC
1243 if (inp->inp_sp != NULL)
1244 ipsec_delete_pcbpolicy(inp);
1245#endif
1246 inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
1247 in_pcbremlists(inp);
1248#ifdef INET6
1249 if (inp->inp_vflag & INP_IPV6PROTO) {
1250 ip6_freepcbopts(inp->in6p_outputopts);
1251 if (inp->in6p_moptions != NULL)
1252 ip6_freemoptions(inp->in6p_moptions);
1253 }
1254#endif
1255 if (inp->inp_options)
1256 (void)m_free(inp->inp_options);
1257#ifdef INET
1258 if (inp->inp_moptions != NULL)
1259 inp_freemoptions(inp->inp_moptions);
1260#endif
1261 inp->inp_vflag = 0;
1262 inp->inp_flags2 |= INP_FREED;
1263 crfree(inp->inp_cred);
1264#ifdef MAC
1265 mac_inpcb_destroy(inp);
1266#endif
1267 if (!in_pcbrele_wlocked(inp))
1268 INP_WUNLOCK(inp);
1269}
1270
1271/*
1272 * in_pcbdrop() removes an inpcb from hashed lists, releasing its address and
1273 * port reservation, and preventing it from being returned by inpcb lookups.
1274 *
1275 * It is used by TCP to mark an inpcb as unused and avoid future packet
1276 * delivery or event notification when a socket remains open but TCP has
1277 * closed. This might occur as a result of a shutdown()-initiated TCP close
1278 * or a RST on the wire, and allows the port binding to be reused while still
1279 * maintaining the invariant that so_pcb always points to a valid inpcb until
1280 * in_pcbdetach().
1281 *
1282 * XXXRW: Possibly in_pcbdrop() should also prevent future notifications by
1283 * in_pcbnotifyall() and in_pcbpurgeif0()?
1284 */
1285void
1286in_pcbdrop(struct inpcb *inp)
1287{
1288
1289 INP_WLOCK_ASSERT(inp);
1290
1291 /*
1292 * XXXRW: Possibly we should protect the setting of INP_DROPPED with
1293 * the hash lock...?
1294 */
1295 inp->inp_flags |= INP_DROPPED;
1296 if (inp->inp_flags & INP_INHASHLIST) {
1297 struct inpcbport *phd = inp->inp_phd;
1298
1299 INP_HASH_WLOCK(inp->inp_pcbinfo);
1300 LIST_REMOVE(inp, inp_hash);
1301 LIST_REMOVE(inp, inp_portlist);
1302 if (LIST_FIRST(&phd->phd_pcblist) == NULL) {
1303 LIST_REMOVE(phd, phd_hash);
1304 free(phd, M_PCB);
1305 }
1306 INP_HASH_WUNLOCK(inp->inp_pcbinfo);
1307 inp->inp_flags &= ~INP_INHASHLIST;
1308#ifdef PCBGROUP
1309 in_pcbgroup_remove(inp);
1310#endif
1311 }
1312}
1313
1314#ifdef INET
1315/*
1316 * Common routines to return the socket addresses associated with inpcbs.
1317 */
1318struct sockaddr *
1319in_sockaddr(in_port_t port, struct in_addr *addr_p)
1320{
1321 struct sockaddr_in *sin;
1322
1323 sin = malloc(sizeof *sin, M_SONAME,
1324 M_WAITOK | M_ZERO);
1325 sin->sin_family = AF_INET;
1326 sin->sin_len = sizeof(*sin);
1327 sin->sin_addr = *addr_p;
1328 sin->sin_port = port;
1329
1330 return (struct sockaddr *)sin;
1331}
1332
1333int
1334in_getsockaddr(struct socket *so, struct sockaddr **nam)
1335{
1336 struct inpcb *inp;
1337 struct in_addr addr;
1338 in_port_t port;
1339
1340 inp = sotoinpcb(so);
1341 KASSERT(inp != NULL, ("in_getsockaddr: inp == NULL"));
1342
1343 INP_RLOCK(inp);
1344 port = inp->inp_lport;
1345 addr = inp->inp_laddr;
1346 INP_RUNLOCK(inp);
1347
1348 *nam = in_sockaddr(port, &addr);
1349 return 0;
1350}
1351
1352int
1353in_getpeeraddr(struct socket *so, struct sockaddr **nam)
1354{
1355 struct inpcb *inp;
1356 struct in_addr addr;
1357 in_port_t port;
1358
1359 inp = sotoinpcb(so);
1360 KASSERT(inp != NULL, ("in_getpeeraddr: inp == NULL"));
1361
1362 INP_RLOCK(inp);
1363 port = inp->inp_fport;
1364 addr = inp->inp_faddr;
1365 INP_RUNLOCK(inp);
1366
1367 *nam = in_sockaddr(port, &addr);
1368 return 0;
1369}
1370
1371void
1372in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr faddr, int errno,
1373 struct inpcb *(*notify)(struct inpcb *, int))
1374{
1375 struct inpcb *inp, *inp_temp;
1376
1377 INP_INFO_WLOCK(pcbinfo);
1378 LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, inp_temp) {
1379 INP_WLOCK(inp);
1380#ifdef INET6
1381 if ((inp->inp_vflag & INP_IPV4) == 0) {
1382 INP_WUNLOCK(inp);
1383 continue;
1384 }
1385#endif
1386 if (inp->inp_faddr.s_addr != faddr.s_addr ||
1387 inp->inp_socket == NULL) {
1388 INP_WUNLOCK(inp);
1389 continue;
1390 }
1391 if ((*notify)(inp, errno))
1392 INP_WUNLOCK(inp);
1393 }
1394 INP_INFO_WUNLOCK(pcbinfo);
1395}
1396
1397void
1398in_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp)
1399{
1400 struct inpcb *inp;
1401 struct ip_moptions *imo;
1402 int i, gap;
1403
1404 INP_INFO_RLOCK(pcbinfo);
1405 LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
1406 INP_WLOCK(inp);
1407 imo = inp->inp_moptions;
1408 if ((inp->inp_vflag & INP_IPV4) &&
1409 imo != NULL) {
1410 /*
1411 * Unselect the outgoing interface if it is being
1412 * detached.
1413 */
1414 if (imo->imo_multicast_ifp == ifp)
1415 imo->imo_multicast_ifp = NULL;
1416
1417 /*
1418 * Drop multicast group membership if we joined
1419 * through the interface being detached.
1420 */
1421 for (i = 0, gap = 0; i < imo->imo_num_memberships;
1422 i++) {
1423 if (imo->imo_membership[i]->inm_ifp == ifp) {
1424 in_delmulti(imo->imo_membership[i]);
1425 gap++;
1426 } else if (gap != 0)
1427 imo->imo_membership[i - gap] =
1428 imo->imo_membership[i];
1429 }
1430 imo->imo_num_memberships -= gap;
1431 }
1432 INP_WUNLOCK(inp);
1433 }
1434 INP_INFO_RUNLOCK(pcbinfo);
1435}
1436
1437/*
1438 * Lookup a PCB based on the local address and port. Caller must hold the
1439 * hash lock. No inpcb locks or references are acquired.
1440 */
1441#define INP_LOOKUP_MAPPED_PCB_COST 3
1442struct inpcb *
1443in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr,
1444 u_short lport, int lookupflags, struct ucred *cred)
1445{
1446 struct inpcb *inp;
1447#ifdef INET6
1448 int matchwild = 3 + INP_LOOKUP_MAPPED_PCB_COST;
1449#else
1450 int matchwild = 3;
1451#endif
1452 int wildcard;
1453
1454 KASSERT((lookupflags & ~(INPLOOKUP_WILDCARD)) == 0,
1455 ("%s: invalid lookup flags %d", __func__, lookupflags));
1456
1457 INP_HASH_LOCK_ASSERT(pcbinfo);
1458
1459 if ((lookupflags & INPLOOKUP_WILDCARD) == 0) {
1460 struct inpcbhead *head;
1461 /*
1462 * Look for an unconnected (wildcard foreign addr) PCB that
1463 * matches the local address and port we're looking for.
1464 */
1465 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport,
1466 0, pcbinfo->ipi_hashmask)];
1467 LIST_FOREACH(inp, head, inp_hash) {
1468#ifdef INET6
1469 /* XXX inp locking */
1470 if ((inp->inp_vflag & INP_IPV4) == 0)
1471 continue;
1472#endif
1473 if (inp->inp_faddr.s_addr == INADDR_ANY &&
1474 inp->inp_laddr.s_addr == laddr.s_addr &&
1475 inp->inp_lport == lport) {
1476 /*
1477 * Found?
1478 */
1479 if (cred == NULL ||
1480 prison_equal_ip4(cred->cr_prison,
1481 inp->inp_cred->cr_prison))
1482 return (inp);
1483 }
1484 }
1485 /*
1486 * Not found.
1487 */
1488 return (NULL);
1489 } else {
1490 struct inpcbporthead *porthash;
1491 struct inpcbport *phd;
1492 struct inpcb *match = NULL;
1493 /*
1494 * Best fit PCB lookup.
1495 *
1496 * First see if this local port is in use by looking on the
1497 * port hash list.
1498 */
1499 porthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(lport,
1500 pcbinfo->ipi_porthashmask)];
1501 LIST_FOREACH(phd, porthash, phd_hash) {
1502 if (phd->phd_port == lport)
1503 break;
1504 }
1505 if (phd != NULL) {
1506 /*
1507 * Port is in use by one or more PCBs. Look for best
1508 * fit.
1509 */
1510 LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) {
1511 wildcard = 0;
1512 if (cred != NULL &&
1513 !prison_equal_ip4(inp->inp_cred->cr_prison,
1514 cred->cr_prison))
1515 continue;
1516#ifdef INET6
1517 /* XXX inp locking */
1518 if ((inp->inp_vflag & INP_IPV4) == 0)
1519 continue;
1520 /*
1521 * We never select the PCB that has
1522 * INP_IPV6 flag and is bound to :: if
1523 * we have another PCB which is bound
1524 * to 0.0.0.0. If a PCB has the
1525 * INP_IPV6 flag, then we set its cost
1526 * higher than IPv4 only PCBs.
1527 *
1528 * Note that the case only happens
1529 * when a socket is bound to ::, under
1530 * the condition that the use of the
1531 * mapped address is allowed.
1532 */
1533 if ((inp->inp_vflag & INP_IPV6) != 0)
1534 wildcard += INP_LOOKUP_MAPPED_PCB_COST;
1535#endif
1536 if (inp->inp_faddr.s_addr != INADDR_ANY)
1537 wildcard++;
1538 if (inp->inp_laddr.s_addr != INADDR_ANY) {
1539 if (laddr.s_addr == INADDR_ANY)
1540 wildcard++;
1541 else if (inp->inp_laddr.s_addr != laddr.s_addr)
1542 continue;
1543 } else {
1544 if (laddr.s_addr != INADDR_ANY)
1545 wildcard++;
1546 }
1547 if (wildcard < matchwild) {
1548 match = inp;
1549 matchwild = wildcard;
1550 if (matchwild == 0)
1551 break;
1552 }
1553 }
1554 }
1555 return (match);
1556 }
1557}
1558#undef INP_LOOKUP_MAPPED_PCB_COST
1559
1560#ifdef PCBGROUP
1561/*
1562 * Lookup PCB in hash list, using pcbgroup tables.
1563 */
1564static struct inpcb *
1565in_pcblookup_group(struct inpcbinfo *pcbinfo, struct inpcbgroup *pcbgroup,
1566 struct in_addr faddr, u_int fport_arg, struct in_addr laddr,
1567 u_int lport_arg, int lookupflags, struct ifnet *ifp)
1568{
1569 struct inpcbhead *head;
1570 struct inpcb *inp, *tmpinp;
1571 u_short fport = fport_arg, lport = lport_arg;
1572
1573 /*
1574 * First look for an exact match.
1575 */
1576 tmpinp = NULL;
1577 INP_GROUP_LOCK(pcbgroup);
1578 head = &pcbgroup->ipg_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
1579 pcbgroup->ipg_hashmask)];
1580 LIST_FOREACH(inp, head, inp_pcbgrouphash) {
1581#ifdef INET6
1582 /* XXX inp locking */
1583 if ((inp->inp_vflag & INP_IPV4) == 0)
1584 continue;
1585#endif
1586 if (inp->inp_faddr.s_addr == faddr.s_addr &&
1587 inp->inp_laddr.s_addr == laddr.s_addr &&
1588 inp->inp_fport == fport &&
1589 inp->inp_lport == lport) {
1590 /*
1591 * XXX We should be able to directly return
1592 * the inp here, without any checks.
1593 * Well unless both bound with SO_REUSEPORT?
1594 */
1595 if (prison_flag(inp->inp_cred, PR_IP4))
1596 goto found;
1597 if (tmpinp == NULL)
1598 tmpinp = inp;
1599 }
1600 }
1601 if (tmpinp != NULL) {
1602 inp = tmpinp;
1603 goto found;
1604 }
1605
1606#ifdef RSS
1607 /*
1608 * For incoming connections, we may wish to do a wildcard
1609 * match for an RSS-local socket.
1610 */
1611 if ((lookupflags & INPLOOKUP_WILDCARD) != 0) {
1612 struct inpcb *local_wild = NULL, *local_exact = NULL;
1613#ifdef INET6
1614 struct inpcb *local_wild_mapped = NULL;
1615#endif
1616 struct inpcb *jail_wild = NULL;
1617 struct inpcbhead *head;
1618 int injail;
1619
1620 /*
1621 * Order of socket selection - we always prefer jails.
1622 * 1. jailed, non-wild.
1623 * 2. jailed, wild.
1624 * 3. non-jailed, non-wild.
1625 * 4. non-jailed, wild.
1626 */
1627
1628 head = &pcbgroup->ipg_hashbase[INP_PCBHASH(INADDR_ANY,
1629 lport, 0, pcbgroup->ipg_hashmask)];
1630 LIST_FOREACH(inp, head, inp_pcbgrouphash) {
1631#ifdef INET6
1632 /* XXX inp locking */
1633 if ((inp->inp_vflag & INP_IPV4) == 0)
1634 continue;
1635#endif
1636 if (inp->inp_faddr.s_addr != INADDR_ANY ||
1637 inp->inp_lport != lport)
1638 continue;
1639
1640 /* XXX inp locking */
1641 if (ifp && ifp->if_type == IFT_FAITH &&
1642 (inp->inp_flags & INP_FAITH) == 0)
1643 continue;
1644
1645 injail = prison_flag(inp->inp_cred, PR_IP4);
1646 if (injail) {
1647 if (prison_check_ip4(inp->inp_cred,
1648 &laddr) != 0)
1649 continue;
1650 } else {
1651 if (local_exact != NULL)
1652 continue;
1653 }
1654
1655 if (inp->inp_laddr.s_addr == laddr.s_addr) {
1656 if (injail)
1657 goto found;
1658 else
1659 local_exact = inp;
1660 } else if (inp->inp_laddr.s_addr == INADDR_ANY) {
1661#ifdef INET6
1662 /* XXX inp locking, NULL check */
1663 if (inp->inp_vflag & INP_IPV6PROTO)
1664 local_wild_mapped = inp;
1665 else
1666#endif
1667 if (injail)
1668 jail_wild = inp;
1669 else
1670 local_wild = inp;
1671 }
1672 } /* LIST_FOREACH */
1673
1674 inp = jail_wild;
1675 if (inp == NULL)
1676 inp = local_exact;
1677 if (inp == NULL)
1678 inp = local_wild;
1679#ifdef INET6
1680 if (inp == NULL)
1681 inp = local_wild_mapped;
1682#endif
1683 if (inp != NULL)
1684 goto found;
1685 }
1686#endif
1687
1688 /*
1689 * Then look for a wildcard match, if requested.
1690 */
1691 if ((lookupflags & INPLOOKUP_WILDCARD) != 0) {
1692 struct inpcb *local_wild = NULL, *local_exact = NULL;
1693#ifdef INET6
1694 struct inpcb *local_wild_mapped = NULL;
1695#endif
1696 struct inpcb *jail_wild = NULL;
1697 struct inpcbhead *head;
1698 int injail;
1699
1700 /*
1701 * Order of socket selection - we always prefer jails.
1702 * 1. jailed, non-wild.
1703 * 2. jailed, wild.
1704 * 3. non-jailed, non-wild.
1705 * 4. non-jailed, wild.
1706 */
1707 head = &pcbinfo->ipi_wildbase[INP_PCBHASH(INADDR_ANY, lport,
1708 0, pcbinfo->ipi_wildmask)];
1709 LIST_FOREACH(inp, head, inp_pcbgroup_wild) {
1710#ifdef INET6
1711 /* XXX inp locking */
1712 if ((inp->inp_vflag & INP_IPV4) == 0)
1713 continue;
1714#endif
1715 if (inp->inp_faddr.s_addr != INADDR_ANY ||
1716 inp->inp_lport != lport)
1717 continue;
1718
1719 /* XXX inp locking */
1720 if (ifp && ifp->if_type == IFT_FAITH &&
1721 (inp->inp_flags & INP_FAITH) == 0)
1722 continue;
1723
1724 injail = prison_flag(inp->inp_cred, PR_IP4);
1725 if (injail) {
1726 if (prison_check_ip4(inp->inp_cred,
1727 &laddr) != 0)
1728 continue;
1729 } else {
1730 if (local_exact != NULL)
1731 continue;
1732 }
1733
1734 if (inp->inp_laddr.s_addr == laddr.s_addr) {
1735 if (injail)
1736 goto found;
1737 else
1738 local_exact = inp;
1739 } else if (inp->inp_laddr.s_addr == INADDR_ANY) {
1740#ifdef INET6
1741 /* XXX inp locking, NULL check */
1742 if (inp->inp_vflag & INP_IPV6PROTO)
1743 local_wild_mapped = inp;
1744 else
1745#endif
1746 if (injail)
1747 jail_wild = inp;
1748 else
1749 local_wild = inp;
1750 }
1751 } /* LIST_FOREACH */
1752 inp = jail_wild;
1753 if (inp == NULL)
1754 inp = local_exact;
1755 if (inp == NULL)
1756 inp = local_wild;
1757#ifdef INET6
1758 if (inp == NULL)
1759 inp = local_wild_mapped;
1760#endif
1761 if (inp != NULL)
1762 goto found;
1763 } /* if (lookupflags & INPLOOKUP_WILDCARD) */
1764 INP_GROUP_UNLOCK(pcbgroup);
1765 return (NULL);
1766
1767found:
1768 in_pcbref(inp);
1769 INP_GROUP_UNLOCK(pcbgroup);
1770 if (lookupflags & INPLOOKUP_WLOCKPCB) {
1771 INP_WLOCK(inp);
1772 if (in_pcbrele_wlocked(inp))
1773 return (NULL);
1774 } else if (lookupflags & INPLOOKUP_RLOCKPCB) {
1775 INP_RLOCK(inp);
1776 if (in_pcbrele_rlocked(inp))
1777 return (NULL);
1778 } else
1779 panic("%s: locking bug", __func__);
1780 return (inp);
1781}
1782#endif /* PCBGROUP */
1783
1784/*
1785 * Lookup PCB in hash list, using pcbinfo tables. This variation assumes
1786 * that the caller has locked the hash list, and will not perform any further
1787 * locking or reference operations on either the hash list or the connection.
1788 */
1789static struct inpcb *
1790in_pcblookup_hash_locked(struct inpcbinfo *pcbinfo, struct in_addr faddr,
1791 u_int fport_arg, struct in_addr laddr, u_int lport_arg, int lookupflags,
1792 struct ifnet *ifp)
1793{
1794 struct inpcbhead *head;
1795 struct inpcb *inp, *tmpinp;
1796 u_short fport = fport_arg, lport = lport_arg;
1797
1798 KASSERT((lookupflags & ~(INPLOOKUP_WILDCARD)) == 0,
1799 ("%s: invalid lookup flags %d", __func__, lookupflags));
1800
1801 INP_HASH_LOCK_ASSERT(pcbinfo);
1802
1803 /*
1804 * First look for an exact match.
1805 */
1806 tmpinp = NULL;
1807 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
1808 pcbinfo->ipi_hashmask)];
1809 LIST_FOREACH(inp, head, inp_hash) {
1810#ifdef INET6
1811 /* XXX inp locking */
1812 if ((inp->inp_vflag & INP_IPV4) == 0)
1813 continue;
1814#endif
1815 if (inp->inp_faddr.s_addr == faddr.s_addr &&
1816 inp->inp_laddr.s_addr == laddr.s_addr &&
1817 inp->inp_fport == fport &&
1818 inp->inp_lport == lport) {
1819 /*
1820 * XXX We should be able to directly return
1821 * the inp here, without any checks.
1822 * Well unless both bound with SO_REUSEPORT?
1823 */
1824 if (prison_flag(inp->inp_cred, PR_IP4))
1825 return (inp);
1826 if (tmpinp == NULL)
1827 tmpinp = inp;
1828 }
1829 }
1830 if (tmpinp != NULL)
1831 return (tmpinp);
1832
1833 /*
1834 * Then look for a wildcard match, if requested.
1835 */
1836 if ((lookupflags & INPLOOKUP_WILDCARD) != 0) {
1837 struct inpcb *local_wild = NULL, *local_exact = NULL;
1838#ifdef INET6
1839 struct inpcb *local_wild_mapped = NULL;
1840#endif
1841 struct inpcb *jail_wild = NULL;
1842 int injail;
1843
1844 /*
1845 * Order of socket selection - we always prefer jails.
1846 * 1. jailed, non-wild.
1847 * 2. jailed, wild.
1848 * 3. non-jailed, non-wild.
1849 * 4. non-jailed, wild.
1850 */
1851
1852 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport,
1853 0, pcbinfo->ipi_hashmask)];
1854 LIST_FOREACH(inp, head, inp_hash) {
1855#ifdef INET6
1856 /* XXX inp locking */
1857 if ((inp->inp_vflag & INP_IPV4) == 0)
1858 continue;
1859#endif
1860 if (inp->inp_faddr.s_addr != INADDR_ANY ||
1861 inp->inp_lport != lport)
1862 continue;
1863
1864 /* XXX inp locking */
1865 if (ifp && ifp->if_type == IFT_FAITH &&
1866 (inp->inp_flags & INP_FAITH) == 0)
1867 continue;
1868
1869 injail = prison_flag(inp->inp_cred, PR_IP4);
1870 if (injail) {
1871 if (prison_check_ip4(inp->inp_cred,
1872 &laddr) != 0)
1873 continue;
1874 } else {
1875 if (local_exact != NULL)
1876 continue;
1877 }
1878
1879 if (inp->inp_laddr.s_addr == laddr.s_addr) {
1880 if (injail)
1881 return (inp);
1882 else
1883 local_exact = inp;
1884 } else if (inp->inp_laddr.s_addr == INADDR_ANY) {
1885#ifdef INET6
1886 /* XXX inp locking, NULL check */
1887 if (inp->inp_vflag & INP_IPV6PROTO)
1888 local_wild_mapped = inp;
1889 else
1890#endif
1891 if (injail)
1892 jail_wild = inp;
1893 else
1894 local_wild = inp;
1895 }
1896 } /* LIST_FOREACH */
1897 if (jail_wild != NULL)
1898 return (jail_wild);
1899 if (local_exact != NULL)
1900 return (local_exact);
1901 if (local_wild != NULL)
1902 return (local_wild);
1903#ifdef INET6
1904 if (local_wild_mapped != NULL)
1905 return (local_wild_mapped);
1906#endif
1907 } /* if ((lookupflags & INPLOOKUP_WILDCARD) != 0) */
1908
1909 return (NULL);
1910}
1911
1912/*
1913 * Lookup PCB in hash list, using pcbinfo tables. This variation locks the
1914 * hash list lock, and will return the inpcb locked (i.e., requires
1915 * INPLOOKUP_LOCKPCB).
1916 */
1917static struct inpcb *
1918in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr,
1919 u_int fport, struct in_addr laddr, u_int lport, int lookupflags,
1920 struct ifnet *ifp)
1921{
1922 struct inpcb *inp;
1923
1924 INP_HASH_RLOCK(pcbinfo);
1925 inp = in_pcblookup_hash_locked(pcbinfo, faddr, fport, laddr, lport,
1926 (lookupflags & ~(INPLOOKUP_RLOCKPCB | INPLOOKUP_WLOCKPCB)), ifp);
1927 if (inp != NULL) {
1928 in_pcbref(inp);
1929 INP_HASH_RUNLOCK(pcbinfo);
1930 if (lookupflags & INPLOOKUP_WLOCKPCB) {
1931 INP_WLOCK(inp);
1932 if (in_pcbrele_wlocked(inp))
1933 return (NULL);
1934 } else if (lookupflags & INPLOOKUP_RLOCKPCB) {
1935 INP_RLOCK(inp);
1936 if (in_pcbrele_rlocked(inp))
1937 return (NULL);
1938 } else
1939 panic("%s: locking bug", __func__);
1940 } else
1941 INP_HASH_RUNLOCK(pcbinfo);
1942 return (inp);
1943}
1944
1945/*
1946 * Public inpcb lookup routines, accepting a 4-tuple, and optionally, an mbuf
1947 * from which a pre-calculated hash value may be extracted.
1948 *
1949 * Possibly more of this logic should be in in_pcbgroup.c.
1950 */
1951struct inpcb *
1952in_pcblookup(struct inpcbinfo *pcbinfo, struct in_addr faddr, u_int fport,
1953 struct in_addr laddr, u_int lport, int lookupflags, struct ifnet *ifp)
1954{
1955#if defined(PCBGROUP) && !defined(RSS)
1956 struct inpcbgroup *pcbgroup;
1957#endif
1958
1959 KASSERT((lookupflags & ~INPLOOKUP_MASK) == 0,
1960 ("%s: invalid lookup flags %d", __func__, lookupflags));
1961 KASSERT((lookupflags & (INPLOOKUP_RLOCKPCB | INPLOOKUP_WLOCKPCB)) != 0,
1962 ("%s: LOCKPCB not set", __func__));
1963
1964 /*
1965 * When not using RSS, use connection groups in preference to the
1966 * reservation table when looking up 4-tuples. When using RSS, just
1967 * use the reservation table, due to the cost of the Toeplitz hash
1968 * in software.
1969 *
1970 * XXXRW: This policy belongs in the pcbgroup code, as in principle
1971 * we could be doing RSS with a non-Toeplitz hash that is affordable
1972 * in software.
1973 */
1974#if defined(PCBGROUP) && !defined(RSS)
1975 if (in_pcbgroup_enabled(pcbinfo)) {
1976 pcbgroup = in_pcbgroup_bytuple(pcbinfo, laddr, lport, faddr,
1977 fport);
1978 return (in_pcblookup_group(pcbinfo, pcbgroup, faddr, fport,
1979 laddr, lport, lookupflags, ifp));
1980 }
1981#endif
1982 return (in_pcblookup_hash(pcbinfo, faddr, fport, laddr, lport,
1983 lookupflags, ifp));
1984}
1985
1986struct inpcb *
1987in_pcblookup_mbuf(struct inpcbinfo *pcbinfo, struct in_addr faddr,
1988 u_int fport, struct in_addr laddr, u_int lport, int lookupflags,
1989 struct ifnet *ifp, struct mbuf *m)
1990{
1991#ifdef PCBGROUP
1992 struct inpcbgroup *pcbgroup;
1993#endif
1994
1995 KASSERT((lookupflags & ~INPLOOKUP_MASK) == 0,
1996 ("%s: invalid lookup flags %d", __func__, lookupflags));
1997 KASSERT((lookupflags & (INPLOOKUP_RLOCKPCB | INPLOOKUP_WLOCKPCB)) != 0,
1998 ("%s: LOCKPCB not set", __func__));
1999
2000#ifdef PCBGROUP
2001 /*
2002 * If we can use a hardware-generated hash to look up the connection
2003 * group, use that connection group to find the inpcb. Otherwise
2004 * fall back on a software hash -- or the reservation table if we're
2005 * using RSS.
2006 *
2007 * XXXRW: As above, that policy belongs in the pcbgroup code.
2008 */
2009 if (in_pcbgroup_enabled(pcbinfo) &&
2010 !(M_HASHTYPE_TEST(m, M_HASHTYPE_NONE))) {
2011 pcbgroup = in_pcbgroup_byhash(pcbinfo, M_HASHTYPE_GET(m),
2012 m->m_pkthdr.flowid);
2013 if (pcbgroup != NULL)
2014 return (in_pcblookup_group(pcbinfo, pcbgroup, faddr,
2015 fport, laddr, lport, lookupflags, ifp));
2016#ifndef RSS
2017 pcbgroup = in_pcbgroup_bytuple(pcbinfo, laddr, lport, faddr,
2018 fport);
2019 return (in_pcblookup_group(pcbinfo, pcbgroup, faddr, fport,
2020 laddr, lport, lookupflags, ifp));
2021#endif
2022 }
2023#endif
2024 return (in_pcblookup_hash(pcbinfo, faddr, fport, laddr, lport,
2025 lookupflags, ifp));
2026}
2027#endif /* INET */
2028
2029/*
2030 * Insert PCB onto various hash lists.
2031 */
2032static int
2033in_pcbinshash_internal(struct inpcb *inp, int do_pcbgroup_update)
2034{
2035 struct inpcbhead *pcbhash;
2036 struct inpcbporthead *pcbporthash;
2037 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
2038 struct inpcbport *phd;
2039 u_int32_t hashkey_faddr;
2040
2041 INP_WLOCK_ASSERT(inp);
2042 INP_HASH_WLOCK_ASSERT(pcbinfo);
2043
2044 KASSERT((inp->inp_flags & INP_INHASHLIST) == 0,
2045 ("in_pcbinshash: INP_INHASHLIST"));
2046
2047#ifdef INET6
2048 if (inp->inp_vflag & INP_IPV6)
40
41#include "opt_ddb.h"
42#include "opt_ipsec.h"
43#include "opt_inet.h"
44#include "opt_inet6.h"
45#include "opt_pcbgroup.h"
46#include "opt_rss.h"
47
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/malloc.h>
51#include <sys/mbuf.h>
52#include <sys/callout.h>
53#include <sys/domain.h>
54#include <sys/protosw.h>
55#include <sys/socket.h>
56#include <sys/socketvar.h>
57#include <sys/priv.h>
58#include <sys/proc.h>
59#include <sys/refcount.h>
60#include <sys/jail.h>
61#include <sys/kernel.h>
62#include <sys/sysctl.h>
63
64#ifdef DDB
65#include <ddb/ddb.h>
66#endif
67
68#include <vm/uma.h>
69
70#include <net/if.h>
71#include <net/if_var.h>
72#include <net/if_types.h>
73#include <net/route.h>
74#include <net/vnet.h>
75
76#if defined(INET) || defined(INET6)
77#include <netinet/in.h>
78#include <netinet/in_pcb.h>
79#include <netinet/in_rss.h>
80#include <netinet/ip_var.h>
81#include <netinet/tcp_var.h>
82#include <netinet/udp.h>
83#include <netinet/udp_var.h>
84#endif
85#ifdef INET
86#include <netinet/in_var.h>
87#endif
88#ifdef INET6
89#include <netinet/ip6.h>
90#include <netinet6/in6_pcb.h>
91#include <netinet6/in6_var.h>
92#include <netinet6/ip6_var.h>
93#endif /* INET6 */
94
95
96#ifdef IPSEC
97#include <netipsec/ipsec.h>
98#include <netipsec/key.h>
99#endif /* IPSEC */
100
101#include <security/mac/mac_framework.h>
102
103static struct callout ipport_tick_callout;
104
105/*
106 * These configure the range of local port addresses assigned to
107 * "unspecified" outgoing connections/packets/whatever.
108 */
109VNET_DEFINE(int, ipport_lowfirstauto) = IPPORT_RESERVED - 1; /* 1023 */
110VNET_DEFINE(int, ipport_lowlastauto) = IPPORT_RESERVEDSTART; /* 600 */
111VNET_DEFINE(int, ipport_firstauto) = IPPORT_EPHEMERALFIRST; /* 10000 */
112VNET_DEFINE(int, ipport_lastauto) = IPPORT_EPHEMERALLAST; /* 65535 */
113VNET_DEFINE(int, ipport_hifirstauto) = IPPORT_HIFIRSTAUTO; /* 49152 */
114VNET_DEFINE(int, ipport_hilastauto) = IPPORT_HILASTAUTO; /* 65535 */
115
116/*
117 * Reserved ports accessible only to root. There are significant
118 * security considerations that must be accounted for when changing these,
119 * but the security benefits can be great. Please be careful.
120 */
121VNET_DEFINE(int, ipport_reservedhigh) = IPPORT_RESERVED - 1; /* 1023 */
122VNET_DEFINE(int, ipport_reservedlow);
123
124/* Variables dealing with random ephemeral port allocation. */
125VNET_DEFINE(int, ipport_randomized) = 1; /* user controlled via sysctl */
126VNET_DEFINE(int, ipport_randomcps) = 10; /* user controlled via sysctl */
127VNET_DEFINE(int, ipport_randomtime) = 45; /* user controlled via sysctl */
128VNET_DEFINE(int, ipport_stoprandom); /* toggled by ipport_tick */
129VNET_DEFINE(int, ipport_tcpallocs);
130static VNET_DEFINE(int, ipport_tcplastcount);
131
132#define V_ipport_tcplastcount VNET(ipport_tcplastcount)
133
134static void in_pcbremlists(struct inpcb *inp);
135#ifdef INET
136static struct inpcb *in_pcblookup_hash_locked(struct inpcbinfo *pcbinfo,
137 struct in_addr faddr, u_int fport_arg,
138 struct in_addr laddr, u_int lport_arg,
139 int lookupflags, struct ifnet *ifp);
140
141#define RANGECHK(var, min, max) \
142 if ((var) < (min)) { (var) = (min); } \
143 else if ((var) > (max)) { (var) = (max); }
144
145static int
146sysctl_net_ipport_check(SYSCTL_HANDLER_ARGS)
147{
148 int error;
149
150 error = sysctl_handle_int(oidp, arg1, arg2, req);
151 if (error == 0) {
152 RANGECHK(V_ipport_lowfirstauto, 1, IPPORT_RESERVED - 1);
153 RANGECHK(V_ipport_lowlastauto, 1, IPPORT_RESERVED - 1);
154 RANGECHK(V_ipport_firstauto, IPPORT_RESERVED, IPPORT_MAX);
155 RANGECHK(V_ipport_lastauto, IPPORT_RESERVED, IPPORT_MAX);
156 RANGECHK(V_ipport_hifirstauto, IPPORT_RESERVED, IPPORT_MAX);
157 RANGECHK(V_ipport_hilastauto, IPPORT_RESERVED, IPPORT_MAX);
158 }
159 return (error);
160}
161
162#undef RANGECHK
163
164static SYSCTL_NODE(_net_inet_ip, IPPROTO_IP, portrange, CTLFLAG_RW, 0,
165 "IP Ports");
166
167SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, lowfirst,
168 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_lowfirstauto), 0,
169 &sysctl_net_ipport_check, "I", "");
170SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, lowlast,
171 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_lowlastauto), 0,
172 &sysctl_net_ipport_check, "I", "");
173SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, first,
174 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_firstauto), 0,
175 &sysctl_net_ipport_check, "I", "");
176SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, last,
177 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_lastauto), 0,
178 &sysctl_net_ipport_check, "I", "");
179SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, hifirst,
180 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_hifirstauto), 0,
181 &sysctl_net_ipport_check, "I", "");
182SYSCTL_VNET_PROC(_net_inet_ip_portrange, OID_AUTO, hilast,
183 CTLTYPE_INT|CTLFLAG_RW, &VNET_NAME(ipport_hilastauto), 0,
184 &sysctl_net_ipport_check, "I", "");
185SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, reservedhigh,
186 CTLFLAG_RW|CTLFLAG_SECURE, &VNET_NAME(ipport_reservedhigh), 0, "");
187SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, reservedlow,
188 CTLFLAG_RW|CTLFLAG_SECURE, &VNET_NAME(ipport_reservedlow), 0, "");
189SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, randomized, CTLFLAG_RW,
190 &VNET_NAME(ipport_randomized), 0, "Enable random port allocation");
191SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, randomcps, CTLFLAG_RW,
192 &VNET_NAME(ipport_randomcps), 0, "Maximum number of random port "
193 "allocations before switching to a sequental one");
194SYSCTL_VNET_INT(_net_inet_ip_portrange, OID_AUTO, randomtime, CTLFLAG_RW,
195 &VNET_NAME(ipport_randomtime), 0,
196 "Minimum time to keep sequental port "
197 "allocation before switching to a random one");
198#endif /* INET */
199
200/*
201 * in_pcb.c: manage the Protocol Control Blocks.
202 *
203 * NOTE: It is assumed that most of these functions will be called with
204 * the pcbinfo lock held, and often, the inpcb lock held, as these utility
205 * functions often modify hash chains or addresses in pcbs.
206 */
207
208/*
209 * Initialize an inpcbinfo -- we should be able to reduce the number of
210 * arguments in time.
211 */
212void
213in_pcbinfo_init(struct inpcbinfo *pcbinfo, const char *name,
214 struct inpcbhead *listhead, int hash_nelements, int porthash_nelements,
215 char *inpcbzone_name, uma_init inpcbzone_init, uma_fini inpcbzone_fini,
216 uint32_t inpcbzone_flags, u_int hashfields)
217{
218
219 INP_INFO_LOCK_INIT(pcbinfo, name);
220 INP_HASH_LOCK_INIT(pcbinfo, "pcbinfohash"); /* XXXRW: argument? */
221#ifdef VIMAGE
222 pcbinfo->ipi_vnet = curvnet;
223#endif
224 pcbinfo->ipi_listhead = listhead;
225 LIST_INIT(pcbinfo->ipi_listhead);
226 pcbinfo->ipi_count = 0;
227 pcbinfo->ipi_hashbase = hashinit(hash_nelements, M_PCB,
228 &pcbinfo->ipi_hashmask);
229 pcbinfo->ipi_porthashbase = hashinit(porthash_nelements, M_PCB,
230 &pcbinfo->ipi_porthashmask);
231#ifdef PCBGROUP
232 in_pcbgroup_init(pcbinfo, hashfields, hash_nelements);
233#endif
234 pcbinfo->ipi_zone = uma_zcreate(inpcbzone_name, sizeof(struct inpcb),
235 NULL, NULL, inpcbzone_init, inpcbzone_fini, UMA_ALIGN_PTR,
236 inpcbzone_flags);
237 uma_zone_set_max(pcbinfo->ipi_zone, maxsockets);
238 uma_zone_set_warning(pcbinfo->ipi_zone,
239 "kern.ipc.maxsockets limit reached");
240}
241
242/*
243 * Destroy an inpcbinfo.
244 */
245void
246in_pcbinfo_destroy(struct inpcbinfo *pcbinfo)
247{
248
249 KASSERT(pcbinfo->ipi_count == 0,
250 ("%s: ipi_count = %u", __func__, pcbinfo->ipi_count));
251
252 hashdestroy(pcbinfo->ipi_hashbase, M_PCB, pcbinfo->ipi_hashmask);
253 hashdestroy(pcbinfo->ipi_porthashbase, M_PCB,
254 pcbinfo->ipi_porthashmask);
255#ifdef PCBGROUP
256 in_pcbgroup_destroy(pcbinfo);
257#endif
258 uma_zdestroy(pcbinfo->ipi_zone);
259 INP_HASH_LOCK_DESTROY(pcbinfo);
260 INP_INFO_LOCK_DESTROY(pcbinfo);
261}
262
263/*
264 * Allocate a PCB and associate it with the socket.
265 * On success return with the PCB locked.
266 */
267int
268in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo)
269{
270 struct inpcb *inp;
271 int error;
272
273 INP_INFO_WLOCK_ASSERT(pcbinfo);
274 error = 0;
275 inp = uma_zalloc(pcbinfo->ipi_zone, M_NOWAIT);
276 if (inp == NULL)
277 return (ENOBUFS);
278 bzero(inp, inp_zero_size);
279 inp->inp_pcbinfo = pcbinfo;
280 inp->inp_socket = so;
281 inp->inp_cred = crhold(so->so_cred);
282 inp->inp_inc.inc_fibnum = so->so_fibnum;
283#ifdef MAC
284 error = mac_inpcb_init(inp, M_NOWAIT);
285 if (error != 0)
286 goto out;
287 mac_inpcb_create(so, inp);
288#endif
289#ifdef IPSEC
290 error = ipsec_init_policy(so, &inp->inp_sp);
291 if (error != 0) {
292#ifdef MAC
293 mac_inpcb_destroy(inp);
294#endif
295 goto out;
296 }
297#endif /*IPSEC*/
298#ifdef INET6
299 if (INP_SOCKAF(so) == AF_INET6) {
300 inp->inp_vflag |= INP_IPV6PROTO;
301 if (V_ip6_v6only)
302 inp->inp_flags |= IN6P_IPV6_V6ONLY;
303 }
304#endif
305 LIST_INSERT_HEAD(pcbinfo->ipi_listhead, inp, inp_list);
306 pcbinfo->ipi_count++;
307 so->so_pcb = (caddr_t)inp;
308#ifdef INET6
309 if (V_ip6_auto_flowlabel)
310 inp->inp_flags |= IN6P_AUTOFLOWLABEL;
311#endif
312 INP_WLOCK(inp);
313 inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
314 refcount_init(&inp->inp_refcount, 1); /* Reference from inpcbinfo */
315#if defined(IPSEC) || defined(MAC)
316out:
317 if (error != 0) {
318 crfree(inp->inp_cred);
319 uma_zfree(pcbinfo->ipi_zone, inp);
320 }
321#endif
322 return (error);
323}
324
325#ifdef INET
326int
327in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct ucred *cred)
328{
329 int anonport, error;
330
331 INP_WLOCK_ASSERT(inp);
332 INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo);
333
334 if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY)
335 return (EINVAL);
336 anonport = nam == NULL || ((struct sockaddr_in *)nam)->sin_port == 0;
337 error = in_pcbbind_setup(inp, nam, &inp->inp_laddr.s_addr,
338 &inp->inp_lport, cred);
339 if (error)
340 return (error);
341 if (in_pcbinshash(inp) != 0) {
342 inp->inp_laddr.s_addr = INADDR_ANY;
343 inp->inp_lport = 0;
344 return (EAGAIN);
345 }
346 if (anonport)
347 inp->inp_flags |= INP_ANONPORT;
348 return (0);
349}
350#endif
351
352/*
353 * Select a local port (number) to use.
354 */
355#if defined(INET) || defined(INET6)
356int
357in_pcb_lport(struct inpcb *inp, struct in_addr *laddrp, u_short *lportp,
358 struct ucred *cred, int lookupflags)
359{
360 struct inpcbinfo *pcbinfo;
361 struct inpcb *tmpinp;
362 unsigned short *lastport;
363 int count, dorandom, error;
364 u_short aux, first, last, lport;
365#ifdef INET
366 struct in_addr laddr;
367#endif
368
369 pcbinfo = inp->inp_pcbinfo;
370
371 /*
372 * Because no actual state changes occur here, a global write lock on
373 * the pcbinfo isn't required.
374 */
375 INP_LOCK_ASSERT(inp);
376 INP_HASH_LOCK_ASSERT(pcbinfo);
377
378 if (inp->inp_flags & INP_HIGHPORT) {
379 first = V_ipport_hifirstauto; /* sysctl */
380 last = V_ipport_hilastauto;
381 lastport = &pcbinfo->ipi_lasthi;
382 } else if (inp->inp_flags & INP_LOWPORT) {
383 error = priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT, 0);
384 if (error)
385 return (error);
386 first = V_ipport_lowfirstauto; /* 1023 */
387 last = V_ipport_lowlastauto; /* 600 */
388 lastport = &pcbinfo->ipi_lastlow;
389 } else {
390 first = V_ipport_firstauto; /* sysctl */
391 last = V_ipport_lastauto;
392 lastport = &pcbinfo->ipi_lastport;
393 }
394 /*
395 * For UDP(-Lite), use random port allocation as long as the user
396 * allows it. For TCP (and as of yet unknown) connections,
397 * use random port allocation only if the user allows it AND
398 * ipport_tick() allows it.
399 */
400 if (V_ipport_randomized &&
401 (!V_ipport_stoprandom || pcbinfo == &V_udbinfo ||
402 pcbinfo == &V_ulitecbinfo))
403 dorandom = 1;
404 else
405 dorandom = 0;
406 /*
407 * It makes no sense to do random port allocation if
408 * we have the only port available.
409 */
410 if (first == last)
411 dorandom = 0;
412 /* Make sure to not include UDP(-Lite) packets in the count. */
413 if (pcbinfo != &V_udbinfo || pcbinfo != &V_ulitecbinfo)
414 V_ipport_tcpallocs++;
415 /*
416 * Instead of having two loops further down counting up or down
417 * make sure that first is always <= last and go with only one
418 * code path implementing all logic.
419 */
420 if (first > last) {
421 aux = first;
422 first = last;
423 last = aux;
424 }
425
426#ifdef INET
427 /* Make the compiler happy. */
428 laddr.s_addr = 0;
429 if ((inp->inp_vflag & (INP_IPV4|INP_IPV6)) == INP_IPV4) {
430 KASSERT(laddrp != NULL, ("%s: laddrp NULL for v4 inp %p",
431 __func__, inp));
432 laddr = *laddrp;
433 }
434#endif
435 tmpinp = NULL; /* Make compiler happy. */
436 lport = *lportp;
437
438 if (dorandom)
439 *lastport = first + (arc4random() % (last - first));
440
441 count = last - first;
442
443 do {
444 if (count-- < 0) /* completely used? */
445 return (EADDRNOTAVAIL);
446 ++*lastport;
447 if (*lastport < first || *lastport > last)
448 *lastport = first;
449 lport = htons(*lastport);
450
451#ifdef INET6
452 if ((inp->inp_vflag & INP_IPV6) != 0)
453 tmpinp = in6_pcblookup_local(pcbinfo,
454 &inp->in6p_laddr, lport, lookupflags, cred);
455#endif
456#if defined(INET) && defined(INET6)
457 else
458#endif
459#ifdef INET
460 tmpinp = in_pcblookup_local(pcbinfo, laddr,
461 lport, lookupflags, cred);
462#endif
463 } while (tmpinp != NULL);
464
465#ifdef INET
466 if ((inp->inp_vflag & (INP_IPV4|INP_IPV6)) == INP_IPV4)
467 laddrp->s_addr = laddr.s_addr;
468#endif
469 *lportp = lport;
470
471 return (0);
472}
473
474/*
475 * Return cached socket options.
476 */
477short
478inp_so_options(const struct inpcb *inp)
479{
480 short so_options;
481
482 so_options = 0;
483
484 if ((inp->inp_flags2 & INP_REUSEPORT) != 0)
485 so_options |= SO_REUSEPORT;
486 if ((inp->inp_flags2 & INP_REUSEADDR) != 0)
487 so_options |= SO_REUSEADDR;
488 return (so_options);
489}
490#endif /* INET || INET6 */
491
492/*
493 * Check if a new BINDMULTI socket is allowed to be created.
494 *
495 * ni points to the new inp.
496 * oi points to the exisitng inp.
497 *
498 * This checks whether the existing inp also has BINDMULTI and
499 * whether the credentials match.
500 */
501int
502in_pcbbind_check_bindmulti(const struct inpcb *ni, const struct inpcb *oi)
503{
504 /* Check permissions match */
505 if ((ni->inp_flags2 & INP_BINDMULTI) &&
506 (ni->inp_cred->cr_uid !=
507 oi->inp_cred->cr_uid))
508 return (0);
509
510 /* Check the existing inp has BINDMULTI set */
511 if ((ni->inp_flags2 & INP_BINDMULTI) &&
512 ((oi->inp_flags2 & INP_BINDMULTI) == 0))
513 return (0);
514
515 /*
516 * We're okay - either INP_BINDMULTI isn't set on ni, or
517 * it is and it matches the checks.
518 */
519 return (1);
520}
521
522#ifdef INET
523/*
524 * Set up a bind operation on a PCB, performing port allocation
525 * as required, but do not actually modify the PCB. Callers can
526 * either complete the bind by setting inp_laddr/inp_lport and
527 * calling in_pcbinshash(), or they can just use the resulting
528 * port and address to authorise the sending of a once-off packet.
529 *
530 * On error, the values of *laddrp and *lportp are not changed.
531 */
532int
533in_pcbbind_setup(struct inpcb *inp, struct sockaddr *nam, in_addr_t *laddrp,
534 u_short *lportp, struct ucred *cred)
535{
536 struct socket *so = inp->inp_socket;
537 struct sockaddr_in *sin;
538 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
539 struct in_addr laddr;
540 u_short lport = 0;
541 int lookupflags = 0, reuseport = (so->so_options & SO_REUSEPORT);
542 int error;
543
544 /*
545 * No state changes, so read locks are sufficient here.
546 */
547 INP_LOCK_ASSERT(inp);
548 INP_HASH_LOCK_ASSERT(pcbinfo);
549
550 if (TAILQ_EMPTY(&V_in_ifaddrhead)) /* XXX broken! */
551 return (EADDRNOTAVAIL);
552 laddr.s_addr = *laddrp;
553 if (nam != NULL && laddr.s_addr != INADDR_ANY)
554 return (EINVAL);
555 if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0)
556 lookupflags = INPLOOKUP_WILDCARD;
557 if (nam == NULL) {
558 if ((error = prison_local_ip4(cred, &laddr)) != 0)
559 return (error);
560 } else {
561 sin = (struct sockaddr_in *)nam;
562 if (nam->sa_len != sizeof (*sin))
563 return (EINVAL);
564#ifdef notdef
565 /*
566 * We should check the family, but old programs
567 * incorrectly fail to initialize it.
568 */
569 if (sin->sin_family != AF_INET)
570 return (EAFNOSUPPORT);
571#endif
572 error = prison_local_ip4(cred, &sin->sin_addr);
573 if (error)
574 return (error);
575 if (sin->sin_port != *lportp) {
576 /* Don't allow the port to change. */
577 if (*lportp != 0)
578 return (EINVAL);
579 lport = sin->sin_port;
580 }
581 /* NB: lport is left as 0 if the port isn't being changed. */
582 if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
583 /*
584 * Treat SO_REUSEADDR as SO_REUSEPORT for multicast;
585 * allow complete duplication of binding if
586 * SO_REUSEPORT is set, or if SO_REUSEADDR is set
587 * and a multicast address is bound on both
588 * new and duplicated sockets.
589 */
590 if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) != 0)
591 reuseport = SO_REUSEADDR|SO_REUSEPORT;
592 } else if (sin->sin_addr.s_addr != INADDR_ANY) {
593 sin->sin_port = 0; /* yech... */
594 bzero(&sin->sin_zero, sizeof(sin->sin_zero));
595 /*
596 * Is the address a local IP address?
597 * If INP_BINDANY is set, then the socket may be bound
598 * to any endpoint address, local or not.
599 */
600 if ((inp->inp_flags & INP_BINDANY) == 0 &&
601 ifa_ifwithaddr_check((struct sockaddr *)sin) == 0)
602 return (EADDRNOTAVAIL);
603 }
604 laddr = sin->sin_addr;
605 if (lport) {
606 struct inpcb *t;
607 struct tcptw *tw;
608
609 /* GROSS */
610 if (ntohs(lport) <= V_ipport_reservedhigh &&
611 ntohs(lport) >= V_ipport_reservedlow &&
612 priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT,
613 0))
614 return (EACCES);
615 if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)) &&
616 priv_check_cred(inp->inp_cred,
617 PRIV_NETINET_REUSEPORT, 0) != 0) {
618 t = in_pcblookup_local(pcbinfo, sin->sin_addr,
619 lport, INPLOOKUP_WILDCARD, cred);
620 /*
621 * XXX
622 * This entire block sorely needs a rewrite.
623 */
624 if (t &&
625 ((inp->inp_flags2 & INP_BINDMULTI) == 0) &&
626 ((t->inp_flags & INP_TIMEWAIT) == 0) &&
627 (so->so_type != SOCK_STREAM ||
628 ntohl(t->inp_faddr.s_addr) == INADDR_ANY) &&
629 (ntohl(sin->sin_addr.s_addr) != INADDR_ANY ||
630 ntohl(t->inp_laddr.s_addr) != INADDR_ANY ||
631 (t->inp_flags2 & INP_REUSEPORT) == 0) &&
632 (inp->inp_cred->cr_uid !=
633 t->inp_cred->cr_uid))
634 return (EADDRINUSE);
635
636 /*
637 * If the socket is a BINDMULTI socket, then
638 * the credentials need to match and the
639 * original socket also has to have been bound
640 * with BINDMULTI.
641 */
642 if (t && (! in_pcbbind_check_bindmulti(inp, t)))
643 return (EADDRINUSE);
644 }
645 t = in_pcblookup_local(pcbinfo, sin->sin_addr,
646 lport, lookupflags, cred);
647 if (t && (t->inp_flags & INP_TIMEWAIT)) {
648 /*
649 * XXXRW: If an incpb has had its timewait
650 * state recycled, we treat the address as
651 * being in use (for now). This is better
652 * than a panic, but not desirable.
653 */
654 tw = intotw(t);
655 if (tw == NULL ||
656 (reuseport & tw->tw_so_options) == 0)
657 return (EADDRINUSE);
658 } else if (t &&
659 ((inp->inp_flags2 & INP_BINDMULTI) == 0) &&
660 (reuseport & inp_so_options(t)) == 0) {
661#ifdef INET6
662 if (ntohl(sin->sin_addr.s_addr) !=
663 INADDR_ANY ||
664 ntohl(t->inp_laddr.s_addr) !=
665 INADDR_ANY ||
666 (inp->inp_vflag & INP_IPV6PROTO) == 0 ||
667 (t->inp_vflag & INP_IPV6PROTO) == 0)
668#endif
669 return (EADDRINUSE);
670 if (t && (! in_pcbbind_check_bindmulti(inp, t)))
671 return (EADDRINUSE);
672 }
673 }
674 }
675 if (*lportp != 0)
676 lport = *lportp;
677 if (lport == 0) {
678 error = in_pcb_lport(inp, &laddr, &lport, cred, lookupflags);
679 if (error != 0)
680 return (error);
681
682 }
683 *laddrp = laddr.s_addr;
684 *lportp = lport;
685 return (0);
686}
687
688/*
689 * Connect from a socket to a specified address.
690 * Both address and port must be specified in argument sin.
691 * If don't have a local address for this socket yet,
692 * then pick one.
693 */
694int
695in_pcbconnect_mbuf(struct inpcb *inp, struct sockaddr *nam,
696 struct ucred *cred, struct mbuf *m)
697{
698 u_short lport, fport;
699 in_addr_t laddr, faddr;
700 int anonport, error;
701
702 INP_WLOCK_ASSERT(inp);
703 INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo);
704
705 lport = inp->inp_lport;
706 laddr = inp->inp_laddr.s_addr;
707 anonport = (lport == 0);
708 error = in_pcbconnect_setup(inp, nam, &laddr, &lport, &faddr, &fport,
709 NULL, cred);
710 if (error)
711 return (error);
712
713 /* Do the initial binding of the local address if required. */
714 if (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0) {
715 inp->inp_lport = lport;
716 inp->inp_laddr.s_addr = laddr;
717 if (in_pcbinshash(inp) != 0) {
718 inp->inp_laddr.s_addr = INADDR_ANY;
719 inp->inp_lport = 0;
720 return (EAGAIN);
721 }
722 }
723
724 /* Commit the remaining changes. */
725 inp->inp_lport = lport;
726 inp->inp_laddr.s_addr = laddr;
727 inp->inp_faddr.s_addr = faddr;
728 inp->inp_fport = fport;
729 in_pcbrehash_mbuf(inp, m);
730
731 if (anonport)
732 inp->inp_flags |= INP_ANONPORT;
733 return (0);
734}
735
736int
737in_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct ucred *cred)
738{
739
740 return (in_pcbconnect_mbuf(inp, nam, cred, NULL));
741}
742
743/*
744 * Do proper source address selection on an unbound socket in case
745 * of connect. Take jails into account as well.
746 */
747int
748in_pcbladdr(struct inpcb *inp, struct in_addr *faddr, struct in_addr *laddr,
749 struct ucred *cred)
750{
751 struct ifaddr *ifa;
752 struct sockaddr *sa;
753 struct sockaddr_in *sin;
754 struct route sro;
755 int error;
756
757 KASSERT(laddr != NULL, ("%s: laddr NULL", __func__));
758
759 /*
760 * Bypass source address selection and use the primary jail IP
761 * if requested.
762 */
763 if (cred != NULL && !prison_saddrsel_ip4(cred, laddr))
764 return (0);
765
766 error = 0;
767 bzero(&sro, sizeof(sro));
768
769 sin = (struct sockaddr_in *)&sro.ro_dst;
770 sin->sin_family = AF_INET;
771 sin->sin_len = sizeof(struct sockaddr_in);
772 sin->sin_addr.s_addr = faddr->s_addr;
773
774 /*
775 * If route is known our src addr is taken from the i/f,
776 * else punt.
777 *
778 * Find out route to destination.
779 */
780 if ((inp->inp_socket->so_options & SO_DONTROUTE) == 0)
781 in_rtalloc_ign(&sro, 0, inp->inp_inc.inc_fibnum);
782
783 /*
784 * If we found a route, use the address corresponding to
785 * the outgoing interface.
786 *
787 * Otherwise assume faddr is reachable on a directly connected
788 * network and try to find a corresponding interface to take
789 * the source address from.
790 */
791 if (sro.ro_rt == NULL || sro.ro_rt->rt_ifp == NULL) {
792 struct in_ifaddr *ia;
793 struct ifnet *ifp;
794
795 ia = ifatoia(ifa_ifwithdstaddr((struct sockaddr *)sin));
796 if (ia == NULL)
797 ia = ifatoia(ifa_ifwithnet((struct sockaddr *)sin, 0));
798 if (ia == NULL) {
799 error = ENETUNREACH;
800 goto done;
801 }
802
803 if (cred == NULL || !prison_flag(cred, PR_IP4)) {
804 laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
805 ifa_free(&ia->ia_ifa);
806 goto done;
807 }
808
809 ifp = ia->ia_ifp;
810 ifa_free(&ia->ia_ifa);
811 ia = NULL;
812 IF_ADDR_RLOCK(ifp);
813 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
814
815 sa = ifa->ifa_addr;
816 if (sa->sa_family != AF_INET)
817 continue;
818 sin = (struct sockaddr_in *)sa;
819 if (prison_check_ip4(cred, &sin->sin_addr) == 0) {
820 ia = (struct in_ifaddr *)ifa;
821 break;
822 }
823 }
824 if (ia != NULL) {
825 laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
826 IF_ADDR_RUNLOCK(ifp);
827 goto done;
828 }
829 IF_ADDR_RUNLOCK(ifp);
830
831 /* 3. As a last resort return the 'default' jail address. */
832 error = prison_get_ip4(cred, laddr);
833 goto done;
834 }
835
836 /*
837 * If the outgoing interface on the route found is not
838 * a loopback interface, use the address from that interface.
839 * In case of jails do those three steps:
840 * 1. check if the interface address belongs to the jail. If so use it.
841 * 2. check if we have any address on the outgoing interface
842 * belonging to this jail. If so use it.
843 * 3. as a last resort return the 'default' jail address.
844 */
845 if ((sro.ro_rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0) {
846 struct in_ifaddr *ia;
847 struct ifnet *ifp;
848
849 /* If not jailed, use the default returned. */
850 if (cred == NULL || !prison_flag(cred, PR_IP4)) {
851 ia = (struct in_ifaddr *)sro.ro_rt->rt_ifa;
852 laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
853 goto done;
854 }
855
856 /* Jailed. */
857 /* 1. Check if the iface address belongs to the jail. */
858 sin = (struct sockaddr_in *)sro.ro_rt->rt_ifa->ifa_addr;
859 if (prison_check_ip4(cred, &sin->sin_addr) == 0) {
860 ia = (struct in_ifaddr *)sro.ro_rt->rt_ifa;
861 laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
862 goto done;
863 }
864
865 /*
866 * 2. Check if we have any address on the outgoing interface
867 * belonging to this jail.
868 */
869 ia = NULL;
870 ifp = sro.ro_rt->rt_ifp;
871 IF_ADDR_RLOCK(ifp);
872 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
873 sa = ifa->ifa_addr;
874 if (sa->sa_family != AF_INET)
875 continue;
876 sin = (struct sockaddr_in *)sa;
877 if (prison_check_ip4(cred, &sin->sin_addr) == 0) {
878 ia = (struct in_ifaddr *)ifa;
879 break;
880 }
881 }
882 if (ia != NULL) {
883 laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
884 IF_ADDR_RUNLOCK(ifp);
885 goto done;
886 }
887 IF_ADDR_RUNLOCK(ifp);
888
889 /* 3. As a last resort return the 'default' jail address. */
890 error = prison_get_ip4(cred, laddr);
891 goto done;
892 }
893
894 /*
895 * The outgoing interface is marked with 'loopback net', so a route
896 * to ourselves is here.
897 * Try to find the interface of the destination address and then
898 * take the address from there. That interface is not necessarily
899 * a loopback interface.
900 * In case of jails, check that it is an address of the jail
901 * and if we cannot find, fall back to the 'default' jail address.
902 */
903 if ((sro.ro_rt->rt_ifp->if_flags & IFF_LOOPBACK) != 0) {
904 struct sockaddr_in sain;
905 struct in_ifaddr *ia;
906
907 bzero(&sain, sizeof(struct sockaddr_in));
908 sain.sin_family = AF_INET;
909 sain.sin_len = sizeof(struct sockaddr_in);
910 sain.sin_addr.s_addr = faddr->s_addr;
911
912 ia = ifatoia(ifa_ifwithdstaddr(sintosa(&sain)));
913 if (ia == NULL)
914 ia = ifatoia(ifa_ifwithnet(sintosa(&sain), 0));
915 if (ia == NULL)
916 ia = ifatoia(ifa_ifwithaddr(sintosa(&sain)));
917
918 if (cred == NULL || !prison_flag(cred, PR_IP4)) {
919 if (ia == NULL) {
920 error = ENETUNREACH;
921 goto done;
922 }
923 laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
924 ifa_free(&ia->ia_ifa);
925 goto done;
926 }
927
928 /* Jailed. */
929 if (ia != NULL) {
930 struct ifnet *ifp;
931
932 ifp = ia->ia_ifp;
933 ifa_free(&ia->ia_ifa);
934 ia = NULL;
935 IF_ADDR_RLOCK(ifp);
936 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
937
938 sa = ifa->ifa_addr;
939 if (sa->sa_family != AF_INET)
940 continue;
941 sin = (struct sockaddr_in *)sa;
942 if (prison_check_ip4(cred,
943 &sin->sin_addr) == 0) {
944 ia = (struct in_ifaddr *)ifa;
945 break;
946 }
947 }
948 if (ia != NULL) {
949 laddr->s_addr = ia->ia_addr.sin_addr.s_addr;
950 IF_ADDR_RUNLOCK(ifp);
951 goto done;
952 }
953 IF_ADDR_RUNLOCK(ifp);
954 }
955
956 /* 3. As a last resort return the 'default' jail address. */
957 error = prison_get_ip4(cred, laddr);
958 goto done;
959 }
960
961done:
962 if (sro.ro_rt != NULL)
963 RTFREE(sro.ro_rt);
964 return (error);
965}
966
967/*
968 * Set up for a connect from a socket to the specified address.
969 * On entry, *laddrp and *lportp should contain the current local
970 * address and port for the PCB; these are updated to the values
971 * that should be placed in inp_laddr and inp_lport to complete
972 * the connect.
973 *
974 * On success, *faddrp and *fportp will be set to the remote address
975 * and port. These are not updated in the error case.
976 *
977 * If the operation fails because the connection already exists,
978 * *oinpp will be set to the PCB of that connection so that the
979 * caller can decide to override it. In all other cases, *oinpp
980 * is set to NULL.
981 */
982int
983in_pcbconnect_setup(struct inpcb *inp, struct sockaddr *nam,
984 in_addr_t *laddrp, u_short *lportp, in_addr_t *faddrp, u_short *fportp,
985 struct inpcb **oinpp, struct ucred *cred)
986{
987 struct sockaddr_in *sin = (struct sockaddr_in *)nam;
988 struct in_ifaddr *ia;
989 struct inpcb *oinp;
990 struct in_addr laddr, faddr;
991 u_short lport, fport;
992 int error;
993
994 /*
995 * Because a global state change doesn't actually occur here, a read
996 * lock is sufficient.
997 */
998 INP_LOCK_ASSERT(inp);
999 INP_HASH_LOCK_ASSERT(inp->inp_pcbinfo);
1000
1001 if (oinpp != NULL)
1002 *oinpp = NULL;
1003 if (nam->sa_len != sizeof (*sin))
1004 return (EINVAL);
1005 if (sin->sin_family != AF_INET)
1006 return (EAFNOSUPPORT);
1007 if (sin->sin_port == 0)
1008 return (EADDRNOTAVAIL);
1009 laddr.s_addr = *laddrp;
1010 lport = *lportp;
1011 faddr = sin->sin_addr;
1012 fport = sin->sin_port;
1013
1014 if (!TAILQ_EMPTY(&V_in_ifaddrhead)) {
1015 /*
1016 * If the destination address is INADDR_ANY,
1017 * use the primary local address.
1018 * If the supplied address is INADDR_BROADCAST,
1019 * and the primary interface supports broadcast,
1020 * choose the broadcast address for that interface.
1021 */
1022 if (faddr.s_addr == INADDR_ANY) {
1023 IN_IFADDR_RLOCK();
1024 faddr =
1025 IA_SIN(TAILQ_FIRST(&V_in_ifaddrhead))->sin_addr;
1026 IN_IFADDR_RUNLOCK();
1027 if (cred != NULL &&
1028 (error = prison_get_ip4(cred, &faddr)) != 0)
1029 return (error);
1030 } else if (faddr.s_addr == (u_long)INADDR_BROADCAST) {
1031 IN_IFADDR_RLOCK();
1032 if (TAILQ_FIRST(&V_in_ifaddrhead)->ia_ifp->if_flags &
1033 IFF_BROADCAST)
1034 faddr = satosin(&TAILQ_FIRST(
1035 &V_in_ifaddrhead)->ia_broadaddr)->sin_addr;
1036 IN_IFADDR_RUNLOCK();
1037 }
1038 }
1039 if (laddr.s_addr == INADDR_ANY) {
1040 error = in_pcbladdr(inp, &faddr, &laddr, cred);
1041 /*
1042 * If the destination address is multicast and an outgoing
1043 * interface has been set as a multicast option, prefer the
1044 * address of that interface as our source address.
1045 */
1046 if (IN_MULTICAST(ntohl(faddr.s_addr)) &&
1047 inp->inp_moptions != NULL) {
1048 struct ip_moptions *imo;
1049 struct ifnet *ifp;
1050
1051 imo = inp->inp_moptions;
1052 if (imo->imo_multicast_ifp != NULL) {
1053 ifp = imo->imo_multicast_ifp;
1054 IN_IFADDR_RLOCK();
1055 TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
1056 if ((ia->ia_ifp == ifp) &&
1057 (cred == NULL ||
1058 prison_check_ip4(cred,
1059 &ia->ia_addr.sin_addr) == 0))
1060 break;
1061 }
1062 if (ia == NULL)
1063 error = EADDRNOTAVAIL;
1064 else {
1065 laddr = ia->ia_addr.sin_addr;
1066 error = 0;
1067 }
1068 IN_IFADDR_RUNLOCK();
1069 }
1070 }
1071 if (error)
1072 return (error);
1073 }
1074 oinp = in_pcblookup_hash_locked(inp->inp_pcbinfo, faddr, fport,
1075 laddr, lport, 0, NULL);
1076 if (oinp != NULL) {
1077 if (oinpp != NULL)
1078 *oinpp = oinp;
1079 return (EADDRINUSE);
1080 }
1081 if (lport == 0) {
1082 error = in_pcbbind_setup(inp, NULL, &laddr.s_addr, &lport,
1083 cred);
1084 if (error)
1085 return (error);
1086 }
1087 *laddrp = laddr.s_addr;
1088 *lportp = lport;
1089 *faddrp = faddr.s_addr;
1090 *fportp = fport;
1091 return (0);
1092}
1093
1094void
1095in_pcbdisconnect(struct inpcb *inp)
1096{
1097
1098 INP_WLOCK_ASSERT(inp);
1099 INP_HASH_WLOCK_ASSERT(inp->inp_pcbinfo);
1100
1101 inp->inp_faddr.s_addr = INADDR_ANY;
1102 inp->inp_fport = 0;
1103 in_pcbrehash(inp);
1104}
1105#endif /* INET */
1106
1107/*
1108 * in_pcbdetach() is responsibe for disassociating a socket from an inpcb.
1109 * For most protocols, this will be invoked immediately prior to calling
1110 * in_pcbfree(). However, with TCP the inpcb may significantly outlive the
1111 * socket, in which case in_pcbfree() is deferred.
1112 */
1113void
1114in_pcbdetach(struct inpcb *inp)
1115{
1116
1117 KASSERT(inp->inp_socket != NULL, ("%s: inp_socket == NULL", __func__));
1118
1119 inp->inp_socket->so_pcb = NULL;
1120 inp->inp_socket = NULL;
1121}
1122
1123/*
1124 * in_pcbref() bumps the reference count on an inpcb in order to maintain
1125 * stability of an inpcb pointer despite the inpcb lock being released. This
1126 * is used in TCP when the inpcbinfo lock needs to be acquired or upgraded,
1127 * but where the inpcb lock may already held, or when acquiring a reference
1128 * via a pcbgroup.
1129 *
1130 * in_pcbref() should be used only to provide brief memory stability, and
1131 * must always be followed by a call to INP_WLOCK() and in_pcbrele() to
1132 * garbage collect the inpcb if it has been in_pcbfree()'d from another
1133 * context. Until in_pcbrele() has returned that the inpcb is still valid,
1134 * lock and rele are the *only* safe operations that may be performed on the
1135 * inpcb.
1136 *
1137 * While the inpcb will not be freed, releasing the inpcb lock means that the
1138 * connection's state may change, so the caller should be careful to
1139 * revalidate any cached state on reacquiring the lock. Drop the reference
1140 * using in_pcbrele().
1141 */
1142void
1143in_pcbref(struct inpcb *inp)
1144{
1145
1146 KASSERT(inp->inp_refcount > 0, ("%s: refcount 0", __func__));
1147
1148 refcount_acquire(&inp->inp_refcount);
1149}
1150
1151/*
1152 * Drop a refcount on an inpcb elevated using in_pcbref(); because a call to
1153 * in_pcbfree() may have been made between in_pcbref() and in_pcbrele(), we
1154 * return a flag indicating whether or not the inpcb remains valid. If it is
1155 * valid, we return with the inpcb lock held.
1156 *
1157 * Notice that, unlike in_pcbref(), the inpcb lock must be held to drop a
1158 * reference on an inpcb. Historically more work was done here (actually, in
1159 * in_pcbfree_internal()) but has been moved to in_pcbfree() to avoid the
1160 * need for the pcbinfo lock in in_pcbrele(). Deferring the free is entirely
1161 * about memory stability (and continued use of the write lock).
1162 */
1163int
1164in_pcbrele_rlocked(struct inpcb *inp)
1165{
1166 struct inpcbinfo *pcbinfo;
1167
1168 KASSERT(inp->inp_refcount > 0, ("%s: refcount 0", __func__));
1169
1170 INP_RLOCK_ASSERT(inp);
1171
1172 if (refcount_release(&inp->inp_refcount) == 0) {
1173 /*
1174 * If the inpcb has been freed, let the caller know, even if
1175 * this isn't the last reference.
1176 */
1177 if (inp->inp_flags2 & INP_FREED) {
1178 INP_RUNLOCK(inp);
1179 return (1);
1180 }
1181 return (0);
1182 }
1183
1184 KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__));
1185
1186 INP_RUNLOCK(inp);
1187 pcbinfo = inp->inp_pcbinfo;
1188 uma_zfree(pcbinfo->ipi_zone, inp);
1189 return (1);
1190}
1191
1192int
1193in_pcbrele_wlocked(struct inpcb *inp)
1194{
1195 struct inpcbinfo *pcbinfo;
1196
1197 KASSERT(inp->inp_refcount > 0, ("%s: refcount 0", __func__));
1198
1199 INP_WLOCK_ASSERT(inp);
1200
1201 if (refcount_release(&inp->inp_refcount) == 0)
1202 return (0);
1203
1204 KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__));
1205
1206 INP_WUNLOCK(inp);
1207 pcbinfo = inp->inp_pcbinfo;
1208 uma_zfree(pcbinfo->ipi_zone, inp);
1209 return (1);
1210}
1211
1212/*
1213 * Temporary wrapper.
1214 */
1215int
1216in_pcbrele(struct inpcb *inp)
1217{
1218
1219 return (in_pcbrele_wlocked(inp));
1220}
1221
1222/*
1223 * Unconditionally schedule an inpcb to be freed by decrementing its
1224 * reference count, which should occur only after the inpcb has been detached
1225 * from its socket. If another thread holds a temporary reference (acquired
1226 * using in_pcbref()) then the free is deferred until that reference is
1227 * released using in_pcbrele(), but the inpcb is still unlocked. Almost all
1228 * work, including removal from global lists, is done in this context, where
1229 * the pcbinfo lock is held.
1230 */
1231void
1232in_pcbfree(struct inpcb *inp)
1233{
1234 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
1235
1236 KASSERT(inp->inp_socket == NULL, ("%s: inp_socket != NULL", __func__));
1237
1238 INP_INFO_WLOCK_ASSERT(pcbinfo);
1239 INP_WLOCK_ASSERT(inp);
1240
1241 /* XXXRW: Do as much as possible here. */
1242#ifdef IPSEC
1243 if (inp->inp_sp != NULL)
1244 ipsec_delete_pcbpolicy(inp);
1245#endif
1246 inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
1247 in_pcbremlists(inp);
1248#ifdef INET6
1249 if (inp->inp_vflag & INP_IPV6PROTO) {
1250 ip6_freepcbopts(inp->in6p_outputopts);
1251 if (inp->in6p_moptions != NULL)
1252 ip6_freemoptions(inp->in6p_moptions);
1253 }
1254#endif
1255 if (inp->inp_options)
1256 (void)m_free(inp->inp_options);
1257#ifdef INET
1258 if (inp->inp_moptions != NULL)
1259 inp_freemoptions(inp->inp_moptions);
1260#endif
1261 inp->inp_vflag = 0;
1262 inp->inp_flags2 |= INP_FREED;
1263 crfree(inp->inp_cred);
1264#ifdef MAC
1265 mac_inpcb_destroy(inp);
1266#endif
1267 if (!in_pcbrele_wlocked(inp))
1268 INP_WUNLOCK(inp);
1269}
1270
1271/*
1272 * in_pcbdrop() removes an inpcb from hashed lists, releasing its address and
1273 * port reservation, and preventing it from being returned by inpcb lookups.
1274 *
1275 * It is used by TCP to mark an inpcb as unused and avoid future packet
1276 * delivery or event notification when a socket remains open but TCP has
1277 * closed. This might occur as a result of a shutdown()-initiated TCP close
1278 * or a RST on the wire, and allows the port binding to be reused while still
1279 * maintaining the invariant that so_pcb always points to a valid inpcb until
1280 * in_pcbdetach().
1281 *
1282 * XXXRW: Possibly in_pcbdrop() should also prevent future notifications by
1283 * in_pcbnotifyall() and in_pcbpurgeif0()?
1284 */
1285void
1286in_pcbdrop(struct inpcb *inp)
1287{
1288
1289 INP_WLOCK_ASSERT(inp);
1290
1291 /*
1292 * XXXRW: Possibly we should protect the setting of INP_DROPPED with
1293 * the hash lock...?
1294 */
1295 inp->inp_flags |= INP_DROPPED;
1296 if (inp->inp_flags & INP_INHASHLIST) {
1297 struct inpcbport *phd = inp->inp_phd;
1298
1299 INP_HASH_WLOCK(inp->inp_pcbinfo);
1300 LIST_REMOVE(inp, inp_hash);
1301 LIST_REMOVE(inp, inp_portlist);
1302 if (LIST_FIRST(&phd->phd_pcblist) == NULL) {
1303 LIST_REMOVE(phd, phd_hash);
1304 free(phd, M_PCB);
1305 }
1306 INP_HASH_WUNLOCK(inp->inp_pcbinfo);
1307 inp->inp_flags &= ~INP_INHASHLIST;
1308#ifdef PCBGROUP
1309 in_pcbgroup_remove(inp);
1310#endif
1311 }
1312}
1313
1314#ifdef INET
1315/*
1316 * Common routines to return the socket addresses associated with inpcbs.
1317 */
1318struct sockaddr *
1319in_sockaddr(in_port_t port, struct in_addr *addr_p)
1320{
1321 struct sockaddr_in *sin;
1322
1323 sin = malloc(sizeof *sin, M_SONAME,
1324 M_WAITOK | M_ZERO);
1325 sin->sin_family = AF_INET;
1326 sin->sin_len = sizeof(*sin);
1327 sin->sin_addr = *addr_p;
1328 sin->sin_port = port;
1329
1330 return (struct sockaddr *)sin;
1331}
1332
1333int
1334in_getsockaddr(struct socket *so, struct sockaddr **nam)
1335{
1336 struct inpcb *inp;
1337 struct in_addr addr;
1338 in_port_t port;
1339
1340 inp = sotoinpcb(so);
1341 KASSERT(inp != NULL, ("in_getsockaddr: inp == NULL"));
1342
1343 INP_RLOCK(inp);
1344 port = inp->inp_lport;
1345 addr = inp->inp_laddr;
1346 INP_RUNLOCK(inp);
1347
1348 *nam = in_sockaddr(port, &addr);
1349 return 0;
1350}
1351
1352int
1353in_getpeeraddr(struct socket *so, struct sockaddr **nam)
1354{
1355 struct inpcb *inp;
1356 struct in_addr addr;
1357 in_port_t port;
1358
1359 inp = sotoinpcb(so);
1360 KASSERT(inp != NULL, ("in_getpeeraddr: inp == NULL"));
1361
1362 INP_RLOCK(inp);
1363 port = inp->inp_fport;
1364 addr = inp->inp_faddr;
1365 INP_RUNLOCK(inp);
1366
1367 *nam = in_sockaddr(port, &addr);
1368 return 0;
1369}
1370
1371void
1372in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr faddr, int errno,
1373 struct inpcb *(*notify)(struct inpcb *, int))
1374{
1375 struct inpcb *inp, *inp_temp;
1376
1377 INP_INFO_WLOCK(pcbinfo);
1378 LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, inp_temp) {
1379 INP_WLOCK(inp);
1380#ifdef INET6
1381 if ((inp->inp_vflag & INP_IPV4) == 0) {
1382 INP_WUNLOCK(inp);
1383 continue;
1384 }
1385#endif
1386 if (inp->inp_faddr.s_addr != faddr.s_addr ||
1387 inp->inp_socket == NULL) {
1388 INP_WUNLOCK(inp);
1389 continue;
1390 }
1391 if ((*notify)(inp, errno))
1392 INP_WUNLOCK(inp);
1393 }
1394 INP_INFO_WUNLOCK(pcbinfo);
1395}
1396
1397void
1398in_pcbpurgeif0(struct inpcbinfo *pcbinfo, struct ifnet *ifp)
1399{
1400 struct inpcb *inp;
1401 struct ip_moptions *imo;
1402 int i, gap;
1403
1404 INP_INFO_RLOCK(pcbinfo);
1405 LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) {
1406 INP_WLOCK(inp);
1407 imo = inp->inp_moptions;
1408 if ((inp->inp_vflag & INP_IPV4) &&
1409 imo != NULL) {
1410 /*
1411 * Unselect the outgoing interface if it is being
1412 * detached.
1413 */
1414 if (imo->imo_multicast_ifp == ifp)
1415 imo->imo_multicast_ifp = NULL;
1416
1417 /*
1418 * Drop multicast group membership if we joined
1419 * through the interface being detached.
1420 */
1421 for (i = 0, gap = 0; i < imo->imo_num_memberships;
1422 i++) {
1423 if (imo->imo_membership[i]->inm_ifp == ifp) {
1424 in_delmulti(imo->imo_membership[i]);
1425 gap++;
1426 } else if (gap != 0)
1427 imo->imo_membership[i - gap] =
1428 imo->imo_membership[i];
1429 }
1430 imo->imo_num_memberships -= gap;
1431 }
1432 INP_WUNLOCK(inp);
1433 }
1434 INP_INFO_RUNLOCK(pcbinfo);
1435}
1436
1437/*
1438 * Lookup a PCB based on the local address and port. Caller must hold the
1439 * hash lock. No inpcb locks or references are acquired.
1440 */
1441#define INP_LOOKUP_MAPPED_PCB_COST 3
1442struct inpcb *
1443in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr,
1444 u_short lport, int lookupflags, struct ucred *cred)
1445{
1446 struct inpcb *inp;
1447#ifdef INET6
1448 int matchwild = 3 + INP_LOOKUP_MAPPED_PCB_COST;
1449#else
1450 int matchwild = 3;
1451#endif
1452 int wildcard;
1453
1454 KASSERT((lookupflags & ~(INPLOOKUP_WILDCARD)) == 0,
1455 ("%s: invalid lookup flags %d", __func__, lookupflags));
1456
1457 INP_HASH_LOCK_ASSERT(pcbinfo);
1458
1459 if ((lookupflags & INPLOOKUP_WILDCARD) == 0) {
1460 struct inpcbhead *head;
1461 /*
1462 * Look for an unconnected (wildcard foreign addr) PCB that
1463 * matches the local address and port we're looking for.
1464 */
1465 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport,
1466 0, pcbinfo->ipi_hashmask)];
1467 LIST_FOREACH(inp, head, inp_hash) {
1468#ifdef INET6
1469 /* XXX inp locking */
1470 if ((inp->inp_vflag & INP_IPV4) == 0)
1471 continue;
1472#endif
1473 if (inp->inp_faddr.s_addr == INADDR_ANY &&
1474 inp->inp_laddr.s_addr == laddr.s_addr &&
1475 inp->inp_lport == lport) {
1476 /*
1477 * Found?
1478 */
1479 if (cred == NULL ||
1480 prison_equal_ip4(cred->cr_prison,
1481 inp->inp_cred->cr_prison))
1482 return (inp);
1483 }
1484 }
1485 /*
1486 * Not found.
1487 */
1488 return (NULL);
1489 } else {
1490 struct inpcbporthead *porthash;
1491 struct inpcbport *phd;
1492 struct inpcb *match = NULL;
1493 /*
1494 * Best fit PCB lookup.
1495 *
1496 * First see if this local port is in use by looking on the
1497 * port hash list.
1498 */
1499 porthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(lport,
1500 pcbinfo->ipi_porthashmask)];
1501 LIST_FOREACH(phd, porthash, phd_hash) {
1502 if (phd->phd_port == lport)
1503 break;
1504 }
1505 if (phd != NULL) {
1506 /*
1507 * Port is in use by one or more PCBs. Look for best
1508 * fit.
1509 */
1510 LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) {
1511 wildcard = 0;
1512 if (cred != NULL &&
1513 !prison_equal_ip4(inp->inp_cred->cr_prison,
1514 cred->cr_prison))
1515 continue;
1516#ifdef INET6
1517 /* XXX inp locking */
1518 if ((inp->inp_vflag & INP_IPV4) == 0)
1519 continue;
1520 /*
1521 * We never select the PCB that has
1522 * INP_IPV6 flag and is bound to :: if
1523 * we have another PCB which is bound
1524 * to 0.0.0.0. If a PCB has the
1525 * INP_IPV6 flag, then we set its cost
1526 * higher than IPv4 only PCBs.
1527 *
1528 * Note that the case only happens
1529 * when a socket is bound to ::, under
1530 * the condition that the use of the
1531 * mapped address is allowed.
1532 */
1533 if ((inp->inp_vflag & INP_IPV6) != 0)
1534 wildcard += INP_LOOKUP_MAPPED_PCB_COST;
1535#endif
1536 if (inp->inp_faddr.s_addr != INADDR_ANY)
1537 wildcard++;
1538 if (inp->inp_laddr.s_addr != INADDR_ANY) {
1539 if (laddr.s_addr == INADDR_ANY)
1540 wildcard++;
1541 else if (inp->inp_laddr.s_addr != laddr.s_addr)
1542 continue;
1543 } else {
1544 if (laddr.s_addr != INADDR_ANY)
1545 wildcard++;
1546 }
1547 if (wildcard < matchwild) {
1548 match = inp;
1549 matchwild = wildcard;
1550 if (matchwild == 0)
1551 break;
1552 }
1553 }
1554 }
1555 return (match);
1556 }
1557}
1558#undef INP_LOOKUP_MAPPED_PCB_COST
1559
1560#ifdef PCBGROUP
1561/*
1562 * Lookup PCB in hash list, using pcbgroup tables.
1563 */
1564static struct inpcb *
1565in_pcblookup_group(struct inpcbinfo *pcbinfo, struct inpcbgroup *pcbgroup,
1566 struct in_addr faddr, u_int fport_arg, struct in_addr laddr,
1567 u_int lport_arg, int lookupflags, struct ifnet *ifp)
1568{
1569 struct inpcbhead *head;
1570 struct inpcb *inp, *tmpinp;
1571 u_short fport = fport_arg, lport = lport_arg;
1572
1573 /*
1574 * First look for an exact match.
1575 */
1576 tmpinp = NULL;
1577 INP_GROUP_LOCK(pcbgroup);
1578 head = &pcbgroup->ipg_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
1579 pcbgroup->ipg_hashmask)];
1580 LIST_FOREACH(inp, head, inp_pcbgrouphash) {
1581#ifdef INET6
1582 /* XXX inp locking */
1583 if ((inp->inp_vflag & INP_IPV4) == 0)
1584 continue;
1585#endif
1586 if (inp->inp_faddr.s_addr == faddr.s_addr &&
1587 inp->inp_laddr.s_addr == laddr.s_addr &&
1588 inp->inp_fport == fport &&
1589 inp->inp_lport == lport) {
1590 /*
1591 * XXX We should be able to directly return
1592 * the inp here, without any checks.
1593 * Well unless both bound with SO_REUSEPORT?
1594 */
1595 if (prison_flag(inp->inp_cred, PR_IP4))
1596 goto found;
1597 if (tmpinp == NULL)
1598 tmpinp = inp;
1599 }
1600 }
1601 if (tmpinp != NULL) {
1602 inp = tmpinp;
1603 goto found;
1604 }
1605
1606#ifdef RSS
1607 /*
1608 * For incoming connections, we may wish to do a wildcard
1609 * match for an RSS-local socket.
1610 */
1611 if ((lookupflags & INPLOOKUP_WILDCARD) != 0) {
1612 struct inpcb *local_wild = NULL, *local_exact = NULL;
1613#ifdef INET6
1614 struct inpcb *local_wild_mapped = NULL;
1615#endif
1616 struct inpcb *jail_wild = NULL;
1617 struct inpcbhead *head;
1618 int injail;
1619
1620 /*
1621 * Order of socket selection - we always prefer jails.
1622 * 1. jailed, non-wild.
1623 * 2. jailed, wild.
1624 * 3. non-jailed, non-wild.
1625 * 4. non-jailed, wild.
1626 */
1627
1628 head = &pcbgroup->ipg_hashbase[INP_PCBHASH(INADDR_ANY,
1629 lport, 0, pcbgroup->ipg_hashmask)];
1630 LIST_FOREACH(inp, head, inp_pcbgrouphash) {
1631#ifdef INET6
1632 /* XXX inp locking */
1633 if ((inp->inp_vflag & INP_IPV4) == 0)
1634 continue;
1635#endif
1636 if (inp->inp_faddr.s_addr != INADDR_ANY ||
1637 inp->inp_lport != lport)
1638 continue;
1639
1640 /* XXX inp locking */
1641 if (ifp && ifp->if_type == IFT_FAITH &&
1642 (inp->inp_flags & INP_FAITH) == 0)
1643 continue;
1644
1645 injail = prison_flag(inp->inp_cred, PR_IP4);
1646 if (injail) {
1647 if (prison_check_ip4(inp->inp_cred,
1648 &laddr) != 0)
1649 continue;
1650 } else {
1651 if (local_exact != NULL)
1652 continue;
1653 }
1654
1655 if (inp->inp_laddr.s_addr == laddr.s_addr) {
1656 if (injail)
1657 goto found;
1658 else
1659 local_exact = inp;
1660 } else if (inp->inp_laddr.s_addr == INADDR_ANY) {
1661#ifdef INET6
1662 /* XXX inp locking, NULL check */
1663 if (inp->inp_vflag & INP_IPV6PROTO)
1664 local_wild_mapped = inp;
1665 else
1666#endif
1667 if (injail)
1668 jail_wild = inp;
1669 else
1670 local_wild = inp;
1671 }
1672 } /* LIST_FOREACH */
1673
1674 inp = jail_wild;
1675 if (inp == NULL)
1676 inp = local_exact;
1677 if (inp == NULL)
1678 inp = local_wild;
1679#ifdef INET6
1680 if (inp == NULL)
1681 inp = local_wild_mapped;
1682#endif
1683 if (inp != NULL)
1684 goto found;
1685 }
1686#endif
1687
1688 /*
1689 * Then look for a wildcard match, if requested.
1690 */
1691 if ((lookupflags & INPLOOKUP_WILDCARD) != 0) {
1692 struct inpcb *local_wild = NULL, *local_exact = NULL;
1693#ifdef INET6
1694 struct inpcb *local_wild_mapped = NULL;
1695#endif
1696 struct inpcb *jail_wild = NULL;
1697 struct inpcbhead *head;
1698 int injail;
1699
1700 /*
1701 * Order of socket selection - we always prefer jails.
1702 * 1. jailed, non-wild.
1703 * 2. jailed, wild.
1704 * 3. non-jailed, non-wild.
1705 * 4. non-jailed, wild.
1706 */
1707 head = &pcbinfo->ipi_wildbase[INP_PCBHASH(INADDR_ANY, lport,
1708 0, pcbinfo->ipi_wildmask)];
1709 LIST_FOREACH(inp, head, inp_pcbgroup_wild) {
1710#ifdef INET6
1711 /* XXX inp locking */
1712 if ((inp->inp_vflag & INP_IPV4) == 0)
1713 continue;
1714#endif
1715 if (inp->inp_faddr.s_addr != INADDR_ANY ||
1716 inp->inp_lport != lport)
1717 continue;
1718
1719 /* XXX inp locking */
1720 if (ifp && ifp->if_type == IFT_FAITH &&
1721 (inp->inp_flags & INP_FAITH) == 0)
1722 continue;
1723
1724 injail = prison_flag(inp->inp_cred, PR_IP4);
1725 if (injail) {
1726 if (prison_check_ip4(inp->inp_cred,
1727 &laddr) != 0)
1728 continue;
1729 } else {
1730 if (local_exact != NULL)
1731 continue;
1732 }
1733
1734 if (inp->inp_laddr.s_addr == laddr.s_addr) {
1735 if (injail)
1736 goto found;
1737 else
1738 local_exact = inp;
1739 } else if (inp->inp_laddr.s_addr == INADDR_ANY) {
1740#ifdef INET6
1741 /* XXX inp locking, NULL check */
1742 if (inp->inp_vflag & INP_IPV6PROTO)
1743 local_wild_mapped = inp;
1744 else
1745#endif
1746 if (injail)
1747 jail_wild = inp;
1748 else
1749 local_wild = inp;
1750 }
1751 } /* LIST_FOREACH */
1752 inp = jail_wild;
1753 if (inp == NULL)
1754 inp = local_exact;
1755 if (inp == NULL)
1756 inp = local_wild;
1757#ifdef INET6
1758 if (inp == NULL)
1759 inp = local_wild_mapped;
1760#endif
1761 if (inp != NULL)
1762 goto found;
1763 } /* if (lookupflags & INPLOOKUP_WILDCARD) */
1764 INP_GROUP_UNLOCK(pcbgroup);
1765 return (NULL);
1766
1767found:
1768 in_pcbref(inp);
1769 INP_GROUP_UNLOCK(pcbgroup);
1770 if (lookupflags & INPLOOKUP_WLOCKPCB) {
1771 INP_WLOCK(inp);
1772 if (in_pcbrele_wlocked(inp))
1773 return (NULL);
1774 } else if (lookupflags & INPLOOKUP_RLOCKPCB) {
1775 INP_RLOCK(inp);
1776 if (in_pcbrele_rlocked(inp))
1777 return (NULL);
1778 } else
1779 panic("%s: locking bug", __func__);
1780 return (inp);
1781}
1782#endif /* PCBGROUP */
1783
1784/*
1785 * Lookup PCB in hash list, using pcbinfo tables. This variation assumes
1786 * that the caller has locked the hash list, and will not perform any further
1787 * locking or reference operations on either the hash list or the connection.
1788 */
1789static struct inpcb *
1790in_pcblookup_hash_locked(struct inpcbinfo *pcbinfo, struct in_addr faddr,
1791 u_int fport_arg, struct in_addr laddr, u_int lport_arg, int lookupflags,
1792 struct ifnet *ifp)
1793{
1794 struct inpcbhead *head;
1795 struct inpcb *inp, *tmpinp;
1796 u_short fport = fport_arg, lport = lport_arg;
1797
1798 KASSERT((lookupflags & ~(INPLOOKUP_WILDCARD)) == 0,
1799 ("%s: invalid lookup flags %d", __func__, lookupflags));
1800
1801 INP_HASH_LOCK_ASSERT(pcbinfo);
1802
1803 /*
1804 * First look for an exact match.
1805 */
1806 tmpinp = NULL;
1807 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr.s_addr, lport, fport,
1808 pcbinfo->ipi_hashmask)];
1809 LIST_FOREACH(inp, head, inp_hash) {
1810#ifdef INET6
1811 /* XXX inp locking */
1812 if ((inp->inp_vflag & INP_IPV4) == 0)
1813 continue;
1814#endif
1815 if (inp->inp_faddr.s_addr == faddr.s_addr &&
1816 inp->inp_laddr.s_addr == laddr.s_addr &&
1817 inp->inp_fport == fport &&
1818 inp->inp_lport == lport) {
1819 /*
1820 * XXX We should be able to directly return
1821 * the inp here, without any checks.
1822 * Well unless both bound with SO_REUSEPORT?
1823 */
1824 if (prison_flag(inp->inp_cred, PR_IP4))
1825 return (inp);
1826 if (tmpinp == NULL)
1827 tmpinp = inp;
1828 }
1829 }
1830 if (tmpinp != NULL)
1831 return (tmpinp);
1832
1833 /*
1834 * Then look for a wildcard match, if requested.
1835 */
1836 if ((lookupflags & INPLOOKUP_WILDCARD) != 0) {
1837 struct inpcb *local_wild = NULL, *local_exact = NULL;
1838#ifdef INET6
1839 struct inpcb *local_wild_mapped = NULL;
1840#endif
1841 struct inpcb *jail_wild = NULL;
1842 int injail;
1843
1844 /*
1845 * Order of socket selection - we always prefer jails.
1846 * 1. jailed, non-wild.
1847 * 2. jailed, wild.
1848 * 3. non-jailed, non-wild.
1849 * 4. non-jailed, wild.
1850 */
1851
1852 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport,
1853 0, pcbinfo->ipi_hashmask)];
1854 LIST_FOREACH(inp, head, inp_hash) {
1855#ifdef INET6
1856 /* XXX inp locking */
1857 if ((inp->inp_vflag & INP_IPV4) == 0)
1858 continue;
1859#endif
1860 if (inp->inp_faddr.s_addr != INADDR_ANY ||
1861 inp->inp_lport != lport)
1862 continue;
1863
1864 /* XXX inp locking */
1865 if (ifp && ifp->if_type == IFT_FAITH &&
1866 (inp->inp_flags & INP_FAITH) == 0)
1867 continue;
1868
1869 injail = prison_flag(inp->inp_cred, PR_IP4);
1870 if (injail) {
1871 if (prison_check_ip4(inp->inp_cred,
1872 &laddr) != 0)
1873 continue;
1874 } else {
1875 if (local_exact != NULL)
1876 continue;
1877 }
1878
1879 if (inp->inp_laddr.s_addr == laddr.s_addr) {
1880 if (injail)
1881 return (inp);
1882 else
1883 local_exact = inp;
1884 } else if (inp->inp_laddr.s_addr == INADDR_ANY) {
1885#ifdef INET6
1886 /* XXX inp locking, NULL check */
1887 if (inp->inp_vflag & INP_IPV6PROTO)
1888 local_wild_mapped = inp;
1889 else
1890#endif
1891 if (injail)
1892 jail_wild = inp;
1893 else
1894 local_wild = inp;
1895 }
1896 } /* LIST_FOREACH */
1897 if (jail_wild != NULL)
1898 return (jail_wild);
1899 if (local_exact != NULL)
1900 return (local_exact);
1901 if (local_wild != NULL)
1902 return (local_wild);
1903#ifdef INET6
1904 if (local_wild_mapped != NULL)
1905 return (local_wild_mapped);
1906#endif
1907 } /* if ((lookupflags & INPLOOKUP_WILDCARD) != 0) */
1908
1909 return (NULL);
1910}
1911
1912/*
1913 * Lookup PCB in hash list, using pcbinfo tables. This variation locks the
1914 * hash list lock, and will return the inpcb locked (i.e., requires
1915 * INPLOOKUP_LOCKPCB).
1916 */
1917static struct inpcb *
1918in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr,
1919 u_int fport, struct in_addr laddr, u_int lport, int lookupflags,
1920 struct ifnet *ifp)
1921{
1922 struct inpcb *inp;
1923
1924 INP_HASH_RLOCK(pcbinfo);
1925 inp = in_pcblookup_hash_locked(pcbinfo, faddr, fport, laddr, lport,
1926 (lookupflags & ~(INPLOOKUP_RLOCKPCB | INPLOOKUP_WLOCKPCB)), ifp);
1927 if (inp != NULL) {
1928 in_pcbref(inp);
1929 INP_HASH_RUNLOCK(pcbinfo);
1930 if (lookupflags & INPLOOKUP_WLOCKPCB) {
1931 INP_WLOCK(inp);
1932 if (in_pcbrele_wlocked(inp))
1933 return (NULL);
1934 } else if (lookupflags & INPLOOKUP_RLOCKPCB) {
1935 INP_RLOCK(inp);
1936 if (in_pcbrele_rlocked(inp))
1937 return (NULL);
1938 } else
1939 panic("%s: locking bug", __func__);
1940 } else
1941 INP_HASH_RUNLOCK(pcbinfo);
1942 return (inp);
1943}
1944
1945/*
1946 * Public inpcb lookup routines, accepting a 4-tuple, and optionally, an mbuf
1947 * from which a pre-calculated hash value may be extracted.
1948 *
1949 * Possibly more of this logic should be in in_pcbgroup.c.
1950 */
1951struct inpcb *
1952in_pcblookup(struct inpcbinfo *pcbinfo, struct in_addr faddr, u_int fport,
1953 struct in_addr laddr, u_int lport, int lookupflags, struct ifnet *ifp)
1954{
1955#if defined(PCBGROUP) && !defined(RSS)
1956 struct inpcbgroup *pcbgroup;
1957#endif
1958
1959 KASSERT((lookupflags & ~INPLOOKUP_MASK) == 0,
1960 ("%s: invalid lookup flags %d", __func__, lookupflags));
1961 KASSERT((lookupflags & (INPLOOKUP_RLOCKPCB | INPLOOKUP_WLOCKPCB)) != 0,
1962 ("%s: LOCKPCB not set", __func__));
1963
1964 /*
1965 * When not using RSS, use connection groups in preference to the
1966 * reservation table when looking up 4-tuples. When using RSS, just
1967 * use the reservation table, due to the cost of the Toeplitz hash
1968 * in software.
1969 *
1970 * XXXRW: This policy belongs in the pcbgroup code, as in principle
1971 * we could be doing RSS with a non-Toeplitz hash that is affordable
1972 * in software.
1973 */
1974#if defined(PCBGROUP) && !defined(RSS)
1975 if (in_pcbgroup_enabled(pcbinfo)) {
1976 pcbgroup = in_pcbgroup_bytuple(pcbinfo, laddr, lport, faddr,
1977 fport);
1978 return (in_pcblookup_group(pcbinfo, pcbgroup, faddr, fport,
1979 laddr, lport, lookupflags, ifp));
1980 }
1981#endif
1982 return (in_pcblookup_hash(pcbinfo, faddr, fport, laddr, lport,
1983 lookupflags, ifp));
1984}
1985
1986struct inpcb *
1987in_pcblookup_mbuf(struct inpcbinfo *pcbinfo, struct in_addr faddr,
1988 u_int fport, struct in_addr laddr, u_int lport, int lookupflags,
1989 struct ifnet *ifp, struct mbuf *m)
1990{
1991#ifdef PCBGROUP
1992 struct inpcbgroup *pcbgroup;
1993#endif
1994
1995 KASSERT((lookupflags & ~INPLOOKUP_MASK) == 0,
1996 ("%s: invalid lookup flags %d", __func__, lookupflags));
1997 KASSERT((lookupflags & (INPLOOKUP_RLOCKPCB | INPLOOKUP_WLOCKPCB)) != 0,
1998 ("%s: LOCKPCB not set", __func__));
1999
2000#ifdef PCBGROUP
2001 /*
2002 * If we can use a hardware-generated hash to look up the connection
2003 * group, use that connection group to find the inpcb. Otherwise
2004 * fall back on a software hash -- or the reservation table if we're
2005 * using RSS.
2006 *
2007 * XXXRW: As above, that policy belongs in the pcbgroup code.
2008 */
2009 if (in_pcbgroup_enabled(pcbinfo) &&
2010 !(M_HASHTYPE_TEST(m, M_HASHTYPE_NONE))) {
2011 pcbgroup = in_pcbgroup_byhash(pcbinfo, M_HASHTYPE_GET(m),
2012 m->m_pkthdr.flowid);
2013 if (pcbgroup != NULL)
2014 return (in_pcblookup_group(pcbinfo, pcbgroup, faddr,
2015 fport, laddr, lport, lookupflags, ifp));
2016#ifndef RSS
2017 pcbgroup = in_pcbgroup_bytuple(pcbinfo, laddr, lport, faddr,
2018 fport);
2019 return (in_pcblookup_group(pcbinfo, pcbgroup, faddr, fport,
2020 laddr, lport, lookupflags, ifp));
2021#endif
2022 }
2023#endif
2024 return (in_pcblookup_hash(pcbinfo, faddr, fport, laddr, lport,
2025 lookupflags, ifp));
2026}
2027#endif /* INET */
2028
2029/*
2030 * Insert PCB onto various hash lists.
2031 */
2032static int
2033in_pcbinshash_internal(struct inpcb *inp, int do_pcbgroup_update)
2034{
2035 struct inpcbhead *pcbhash;
2036 struct inpcbporthead *pcbporthash;
2037 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
2038 struct inpcbport *phd;
2039 u_int32_t hashkey_faddr;
2040
2041 INP_WLOCK_ASSERT(inp);
2042 INP_HASH_WLOCK_ASSERT(pcbinfo);
2043
2044 KASSERT((inp->inp_flags & INP_INHASHLIST) == 0,
2045 ("in_pcbinshash: INP_INHASHLIST"));
2046
2047#ifdef INET6
2048 if (inp->inp_vflag & INP_IPV6)
2049 hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
2049 hashkey_faddr = INP6_PCBHASHKEY(&inp->in6p_faddr);
2050 else
2051#endif
2052 hashkey_faddr = inp->inp_faddr.s_addr;
2053
2054 pcbhash = &pcbinfo->ipi_hashbase[INP_PCBHASH(hashkey_faddr,
2055 inp->inp_lport, inp->inp_fport, pcbinfo->ipi_hashmask)];
2056
2057 pcbporthash = &pcbinfo->ipi_porthashbase[
2058 INP_PCBPORTHASH(inp->inp_lport, pcbinfo->ipi_porthashmask)];
2059
2060 /*
2061 * Go through port list and look for a head for this lport.
2062 */
2063 LIST_FOREACH(phd, pcbporthash, phd_hash) {
2064 if (phd->phd_port == inp->inp_lport)
2065 break;
2066 }
2067 /*
2068 * If none exists, malloc one and tack it on.
2069 */
2070 if (phd == NULL) {
2071 phd = malloc(sizeof(struct inpcbport), M_PCB, M_NOWAIT);
2072 if (phd == NULL) {
2073 return (ENOBUFS); /* XXX */
2074 }
2075 phd->phd_port = inp->inp_lport;
2076 LIST_INIT(&phd->phd_pcblist);
2077 LIST_INSERT_HEAD(pcbporthash, phd, phd_hash);
2078 }
2079 inp->inp_phd = phd;
2080 LIST_INSERT_HEAD(&phd->phd_pcblist, inp, inp_portlist);
2081 LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
2082 inp->inp_flags |= INP_INHASHLIST;
2083#ifdef PCBGROUP
2084 if (do_pcbgroup_update)
2085 in_pcbgroup_update(inp);
2086#endif
2087 return (0);
2088}
2089
2090/*
2091 * For now, there are two public interfaces to insert an inpcb into the hash
2092 * lists -- one that does update pcbgroups, and one that doesn't. The latter
2093 * is used only in the TCP syncache, where in_pcbinshash is called before the
2094 * full 4-tuple is set for the inpcb, and we don't want to install in the
2095 * pcbgroup until later.
2096 *
2097 * XXXRW: This seems like a misfeature. in_pcbinshash should always update
2098 * connection groups, and partially initialised inpcbs should not be exposed
2099 * to either reservation hash tables or pcbgroups.
2100 */
2101int
2102in_pcbinshash(struct inpcb *inp)
2103{
2104
2105 return (in_pcbinshash_internal(inp, 1));
2106}
2107
2108int
2109in_pcbinshash_nopcbgroup(struct inpcb *inp)
2110{
2111
2112 return (in_pcbinshash_internal(inp, 0));
2113}
2114
2115/*
2116 * Move PCB to the proper hash bucket when { faddr, fport } have been
2117 * changed. NOTE: This does not handle the case of the lport changing (the
2118 * hashed port list would have to be updated as well), so the lport must
2119 * not change after in_pcbinshash() has been called.
2120 */
2121void
2122in_pcbrehash_mbuf(struct inpcb *inp, struct mbuf *m)
2123{
2124 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
2125 struct inpcbhead *head;
2126 u_int32_t hashkey_faddr;
2127
2128 INP_WLOCK_ASSERT(inp);
2129 INP_HASH_WLOCK_ASSERT(pcbinfo);
2130
2131 KASSERT(inp->inp_flags & INP_INHASHLIST,
2132 ("in_pcbrehash: !INP_INHASHLIST"));
2133
2134#ifdef INET6
2135 if (inp->inp_vflag & INP_IPV6)
2050 else
2051#endif
2052 hashkey_faddr = inp->inp_faddr.s_addr;
2053
2054 pcbhash = &pcbinfo->ipi_hashbase[INP_PCBHASH(hashkey_faddr,
2055 inp->inp_lport, inp->inp_fport, pcbinfo->ipi_hashmask)];
2056
2057 pcbporthash = &pcbinfo->ipi_porthashbase[
2058 INP_PCBPORTHASH(inp->inp_lport, pcbinfo->ipi_porthashmask)];
2059
2060 /*
2061 * Go through port list and look for a head for this lport.
2062 */
2063 LIST_FOREACH(phd, pcbporthash, phd_hash) {
2064 if (phd->phd_port == inp->inp_lport)
2065 break;
2066 }
2067 /*
2068 * If none exists, malloc one and tack it on.
2069 */
2070 if (phd == NULL) {
2071 phd = malloc(sizeof(struct inpcbport), M_PCB, M_NOWAIT);
2072 if (phd == NULL) {
2073 return (ENOBUFS); /* XXX */
2074 }
2075 phd->phd_port = inp->inp_lport;
2076 LIST_INIT(&phd->phd_pcblist);
2077 LIST_INSERT_HEAD(pcbporthash, phd, phd_hash);
2078 }
2079 inp->inp_phd = phd;
2080 LIST_INSERT_HEAD(&phd->phd_pcblist, inp, inp_portlist);
2081 LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
2082 inp->inp_flags |= INP_INHASHLIST;
2083#ifdef PCBGROUP
2084 if (do_pcbgroup_update)
2085 in_pcbgroup_update(inp);
2086#endif
2087 return (0);
2088}
2089
2090/*
2091 * For now, there are two public interfaces to insert an inpcb into the hash
2092 * lists -- one that does update pcbgroups, and one that doesn't. The latter
2093 * is used only in the TCP syncache, where in_pcbinshash is called before the
2094 * full 4-tuple is set for the inpcb, and we don't want to install in the
2095 * pcbgroup until later.
2096 *
2097 * XXXRW: This seems like a misfeature. in_pcbinshash should always update
2098 * connection groups, and partially initialised inpcbs should not be exposed
2099 * to either reservation hash tables or pcbgroups.
2100 */
2101int
2102in_pcbinshash(struct inpcb *inp)
2103{
2104
2105 return (in_pcbinshash_internal(inp, 1));
2106}
2107
2108int
2109in_pcbinshash_nopcbgroup(struct inpcb *inp)
2110{
2111
2112 return (in_pcbinshash_internal(inp, 0));
2113}
2114
2115/*
2116 * Move PCB to the proper hash bucket when { faddr, fport } have been
2117 * changed. NOTE: This does not handle the case of the lport changing (the
2118 * hashed port list would have to be updated as well), so the lport must
2119 * not change after in_pcbinshash() has been called.
2120 */
2121void
2122in_pcbrehash_mbuf(struct inpcb *inp, struct mbuf *m)
2123{
2124 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
2125 struct inpcbhead *head;
2126 u_int32_t hashkey_faddr;
2127
2128 INP_WLOCK_ASSERT(inp);
2129 INP_HASH_WLOCK_ASSERT(pcbinfo);
2130
2131 KASSERT(inp->inp_flags & INP_INHASHLIST,
2132 ("in_pcbrehash: !INP_INHASHLIST"));
2133
2134#ifdef INET6
2135 if (inp->inp_vflag & INP_IPV6)
2136 hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */;
2136 hashkey_faddr = INP6_PCBHASHKEY(&inp->in6p_faddr);
2137 else
2138#endif
2139 hashkey_faddr = inp->inp_faddr.s_addr;
2140
2141 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(hashkey_faddr,
2142 inp->inp_lport, inp->inp_fport, pcbinfo->ipi_hashmask)];
2143
2144 LIST_REMOVE(inp, inp_hash);
2145 LIST_INSERT_HEAD(head, inp, inp_hash);
2146
2147#ifdef PCBGROUP
2148 if (m != NULL)
2149 in_pcbgroup_update_mbuf(inp, m);
2150 else
2151 in_pcbgroup_update(inp);
2152#endif
2153}
2154
2155void
2156in_pcbrehash(struct inpcb *inp)
2157{
2158
2159 in_pcbrehash_mbuf(inp, NULL);
2160}
2161
2162/*
2163 * Remove PCB from various lists.
2164 */
2165static void
2166in_pcbremlists(struct inpcb *inp)
2167{
2168 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
2169
2170 INP_INFO_WLOCK_ASSERT(pcbinfo);
2171 INP_WLOCK_ASSERT(inp);
2172
2173 inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
2174 if (inp->inp_flags & INP_INHASHLIST) {
2175 struct inpcbport *phd = inp->inp_phd;
2176
2177 INP_HASH_WLOCK(pcbinfo);
2178 LIST_REMOVE(inp, inp_hash);
2179 LIST_REMOVE(inp, inp_portlist);
2180 if (LIST_FIRST(&phd->phd_pcblist) == NULL) {
2181 LIST_REMOVE(phd, phd_hash);
2182 free(phd, M_PCB);
2183 }
2184 INP_HASH_WUNLOCK(pcbinfo);
2185 inp->inp_flags &= ~INP_INHASHLIST;
2186 }
2187 LIST_REMOVE(inp, inp_list);
2188 pcbinfo->ipi_count--;
2189#ifdef PCBGROUP
2190 in_pcbgroup_remove(inp);
2191#endif
2192}
2193
2194/*
2195 * A set label operation has occurred at the socket layer, propagate the
2196 * label change into the in_pcb for the socket.
2197 */
2198void
2199in_pcbsosetlabel(struct socket *so)
2200{
2201#ifdef MAC
2202 struct inpcb *inp;
2203
2204 inp = sotoinpcb(so);
2205 KASSERT(inp != NULL, ("in_pcbsosetlabel: so->so_pcb == NULL"));
2206
2207 INP_WLOCK(inp);
2208 SOCK_LOCK(so);
2209 mac_inpcb_sosetlabel(so, inp);
2210 SOCK_UNLOCK(so);
2211 INP_WUNLOCK(inp);
2212#endif
2213}
2214
2215/*
2216 * ipport_tick runs once per second, determining if random port allocation
2217 * should be continued. If more than ipport_randomcps ports have been
2218 * allocated in the last second, then we return to sequential port
2219 * allocation. We return to random allocation only once we drop below
2220 * ipport_randomcps for at least ipport_randomtime seconds.
2221 */
2222static void
2223ipport_tick(void *xtp)
2224{
2225 VNET_ITERATOR_DECL(vnet_iter);
2226
2227 VNET_LIST_RLOCK_NOSLEEP();
2228 VNET_FOREACH(vnet_iter) {
2229 CURVNET_SET(vnet_iter); /* XXX appease INVARIANTS here */
2230 if (V_ipport_tcpallocs <=
2231 V_ipport_tcplastcount + V_ipport_randomcps) {
2232 if (V_ipport_stoprandom > 0)
2233 V_ipport_stoprandom--;
2234 } else
2235 V_ipport_stoprandom = V_ipport_randomtime;
2236 V_ipport_tcplastcount = V_ipport_tcpallocs;
2237 CURVNET_RESTORE();
2238 }
2239 VNET_LIST_RUNLOCK_NOSLEEP();
2240 callout_reset(&ipport_tick_callout, hz, ipport_tick, NULL);
2241}
2242
2243static void
2244ip_fini(void *xtp)
2245{
2246
2247 callout_stop(&ipport_tick_callout);
2248}
2249
2250/*
2251 * The ipport_callout should start running at about the time we attach the
2252 * inet or inet6 domains.
2253 */
2254static void
2255ipport_tick_init(const void *unused __unused)
2256{
2257
2258 /* Start ipport_tick. */
2259 callout_init(&ipport_tick_callout, CALLOUT_MPSAFE);
2260 callout_reset(&ipport_tick_callout, 1, ipport_tick, NULL);
2261 EVENTHANDLER_REGISTER(shutdown_pre_sync, ip_fini, NULL,
2262 SHUTDOWN_PRI_DEFAULT);
2263}
2264SYSINIT(ipport_tick_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE,
2265 ipport_tick_init, NULL);
2266
2267void
2268inp_wlock(struct inpcb *inp)
2269{
2270
2271 INP_WLOCK(inp);
2272}
2273
2274void
2275inp_wunlock(struct inpcb *inp)
2276{
2277
2278 INP_WUNLOCK(inp);
2279}
2280
2281void
2282inp_rlock(struct inpcb *inp)
2283{
2284
2285 INP_RLOCK(inp);
2286}
2287
2288void
2289inp_runlock(struct inpcb *inp)
2290{
2291
2292 INP_RUNLOCK(inp);
2293}
2294
2295#ifdef INVARIANTS
2296void
2297inp_lock_assert(struct inpcb *inp)
2298{
2299
2300 INP_WLOCK_ASSERT(inp);
2301}
2302
2303void
2304inp_unlock_assert(struct inpcb *inp)
2305{
2306
2307 INP_UNLOCK_ASSERT(inp);
2308}
2309#endif
2310
2311void
2312inp_apply_all(void (*func)(struct inpcb *, void *), void *arg)
2313{
2314 struct inpcb *inp;
2315
2316 INP_INFO_RLOCK(&V_tcbinfo);
2317 LIST_FOREACH(inp, V_tcbinfo.ipi_listhead, inp_list) {
2318 INP_WLOCK(inp);
2319 func(inp, arg);
2320 INP_WUNLOCK(inp);
2321 }
2322 INP_INFO_RUNLOCK(&V_tcbinfo);
2323}
2324
2325struct socket *
2326inp_inpcbtosocket(struct inpcb *inp)
2327{
2328
2329 INP_WLOCK_ASSERT(inp);
2330 return (inp->inp_socket);
2331}
2332
2333struct tcpcb *
2334inp_inpcbtotcpcb(struct inpcb *inp)
2335{
2336
2337 INP_WLOCK_ASSERT(inp);
2338 return ((struct tcpcb *)inp->inp_ppcb);
2339}
2340
2341int
2342inp_ip_tos_get(const struct inpcb *inp)
2343{
2344
2345 return (inp->inp_ip_tos);
2346}
2347
2348void
2349inp_ip_tos_set(struct inpcb *inp, int val)
2350{
2351
2352 inp->inp_ip_tos = val;
2353}
2354
2355void
2356inp_4tuple_get(struct inpcb *inp, uint32_t *laddr, uint16_t *lp,
2357 uint32_t *faddr, uint16_t *fp)
2358{
2359
2360 INP_LOCK_ASSERT(inp);
2361 *laddr = inp->inp_laddr.s_addr;
2362 *faddr = inp->inp_faddr.s_addr;
2363 *lp = inp->inp_lport;
2364 *fp = inp->inp_fport;
2365}
2366
2367struct inpcb *
2368so_sotoinpcb(struct socket *so)
2369{
2370
2371 return (sotoinpcb(so));
2372}
2373
2374struct tcpcb *
2375so_sototcpcb(struct socket *so)
2376{
2377
2378 return (sototcpcb(so));
2379}
2380
2381#ifdef DDB
2382static void
2383db_print_indent(int indent)
2384{
2385 int i;
2386
2387 for (i = 0; i < indent; i++)
2388 db_printf(" ");
2389}
2390
2391static void
2392db_print_inconninfo(struct in_conninfo *inc, const char *name, int indent)
2393{
2394 char faddr_str[48], laddr_str[48];
2395
2396 db_print_indent(indent);
2397 db_printf("%s at %p\n", name, inc);
2398
2399 indent += 2;
2400
2401#ifdef INET6
2402 if (inc->inc_flags & INC_ISIPV6) {
2403 /* IPv6. */
2404 ip6_sprintf(laddr_str, &inc->inc6_laddr);
2405 ip6_sprintf(faddr_str, &inc->inc6_faddr);
2406 } else
2407#endif
2408 {
2409 /* IPv4. */
2410 inet_ntoa_r(inc->inc_laddr, laddr_str);
2411 inet_ntoa_r(inc->inc_faddr, faddr_str);
2412 }
2413 db_print_indent(indent);
2414 db_printf("inc_laddr %s inc_lport %u\n", laddr_str,
2415 ntohs(inc->inc_lport));
2416 db_print_indent(indent);
2417 db_printf("inc_faddr %s inc_fport %u\n", faddr_str,
2418 ntohs(inc->inc_fport));
2419}
2420
2421static void
2422db_print_inpflags(int inp_flags)
2423{
2424 int comma;
2425
2426 comma = 0;
2427 if (inp_flags & INP_RECVOPTS) {
2428 db_printf("%sINP_RECVOPTS", comma ? ", " : "");
2429 comma = 1;
2430 }
2431 if (inp_flags & INP_RECVRETOPTS) {
2432 db_printf("%sINP_RECVRETOPTS", comma ? ", " : "");
2433 comma = 1;
2434 }
2435 if (inp_flags & INP_RECVDSTADDR) {
2436 db_printf("%sINP_RECVDSTADDR", comma ? ", " : "");
2437 comma = 1;
2438 }
2439 if (inp_flags & INP_HDRINCL) {
2440 db_printf("%sINP_HDRINCL", comma ? ", " : "");
2441 comma = 1;
2442 }
2443 if (inp_flags & INP_HIGHPORT) {
2444 db_printf("%sINP_HIGHPORT", comma ? ", " : "");
2445 comma = 1;
2446 }
2447 if (inp_flags & INP_LOWPORT) {
2448 db_printf("%sINP_LOWPORT", comma ? ", " : "");
2449 comma = 1;
2450 }
2451 if (inp_flags & INP_ANONPORT) {
2452 db_printf("%sINP_ANONPORT", comma ? ", " : "");
2453 comma = 1;
2454 }
2455 if (inp_flags & INP_RECVIF) {
2456 db_printf("%sINP_RECVIF", comma ? ", " : "");
2457 comma = 1;
2458 }
2459 if (inp_flags & INP_MTUDISC) {
2460 db_printf("%sINP_MTUDISC", comma ? ", " : "");
2461 comma = 1;
2462 }
2463 if (inp_flags & INP_FAITH) {
2464 db_printf("%sINP_FAITH", comma ? ", " : "");
2465 comma = 1;
2466 }
2467 if (inp_flags & INP_RECVTTL) {
2468 db_printf("%sINP_RECVTTL", comma ? ", " : "");
2469 comma = 1;
2470 }
2471 if (inp_flags & INP_DONTFRAG) {
2472 db_printf("%sINP_DONTFRAG", comma ? ", " : "");
2473 comma = 1;
2474 }
2475 if (inp_flags & INP_RECVTOS) {
2476 db_printf("%sINP_RECVTOS", comma ? ", " : "");
2477 comma = 1;
2478 }
2479 if (inp_flags & IN6P_IPV6_V6ONLY) {
2480 db_printf("%sIN6P_IPV6_V6ONLY", comma ? ", " : "");
2481 comma = 1;
2482 }
2483 if (inp_flags & IN6P_PKTINFO) {
2484 db_printf("%sIN6P_PKTINFO", comma ? ", " : "");
2485 comma = 1;
2486 }
2487 if (inp_flags & IN6P_HOPLIMIT) {
2488 db_printf("%sIN6P_HOPLIMIT", comma ? ", " : "");
2489 comma = 1;
2490 }
2491 if (inp_flags & IN6P_HOPOPTS) {
2492 db_printf("%sIN6P_HOPOPTS", comma ? ", " : "");
2493 comma = 1;
2494 }
2495 if (inp_flags & IN6P_DSTOPTS) {
2496 db_printf("%sIN6P_DSTOPTS", comma ? ", " : "");
2497 comma = 1;
2498 }
2499 if (inp_flags & IN6P_RTHDR) {
2500 db_printf("%sIN6P_RTHDR", comma ? ", " : "");
2501 comma = 1;
2502 }
2503 if (inp_flags & IN6P_RTHDRDSTOPTS) {
2504 db_printf("%sIN6P_RTHDRDSTOPTS", comma ? ", " : "");
2505 comma = 1;
2506 }
2507 if (inp_flags & IN6P_TCLASS) {
2508 db_printf("%sIN6P_TCLASS", comma ? ", " : "");
2509 comma = 1;
2510 }
2511 if (inp_flags & IN6P_AUTOFLOWLABEL) {
2512 db_printf("%sIN6P_AUTOFLOWLABEL", comma ? ", " : "");
2513 comma = 1;
2514 }
2515 if (inp_flags & INP_TIMEWAIT) {
2516 db_printf("%sINP_TIMEWAIT", comma ? ", " : "");
2517 comma = 1;
2518 }
2519 if (inp_flags & INP_ONESBCAST) {
2520 db_printf("%sINP_ONESBCAST", comma ? ", " : "");
2521 comma = 1;
2522 }
2523 if (inp_flags & INP_DROPPED) {
2524 db_printf("%sINP_DROPPED", comma ? ", " : "");
2525 comma = 1;
2526 }
2527 if (inp_flags & INP_SOCKREF) {
2528 db_printf("%sINP_SOCKREF", comma ? ", " : "");
2529 comma = 1;
2530 }
2531 if (inp_flags & IN6P_RFC2292) {
2532 db_printf("%sIN6P_RFC2292", comma ? ", " : "");
2533 comma = 1;
2534 }
2535 if (inp_flags & IN6P_MTU) {
2536 db_printf("IN6P_MTU%s", comma ? ", " : "");
2537 comma = 1;
2538 }
2539}
2540
2541static void
2542db_print_inpvflag(u_char inp_vflag)
2543{
2544 int comma;
2545
2546 comma = 0;
2547 if (inp_vflag & INP_IPV4) {
2548 db_printf("%sINP_IPV4", comma ? ", " : "");
2549 comma = 1;
2550 }
2551 if (inp_vflag & INP_IPV6) {
2552 db_printf("%sINP_IPV6", comma ? ", " : "");
2553 comma = 1;
2554 }
2555 if (inp_vflag & INP_IPV6PROTO) {
2556 db_printf("%sINP_IPV6PROTO", comma ? ", " : "");
2557 comma = 1;
2558 }
2559}
2560
2561static void
2562db_print_inpcb(struct inpcb *inp, const char *name, int indent)
2563{
2564
2565 db_print_indent(indent);
2566 db_printf("%s at %p\n", name, inp);
2567
2568 indent += 2;
2569
2570 db_print_indent(indent);
2571 db_printf("inp_flow: 0x%x\n", inp->inp_flow);
2572
2573 db_print_inconninfo(&inp->inp_inc, "inp_conninfo", indent);
2574
2575 db_print_indent(indent);
2576 db_printf("inp_ppcb: %p inp_pcbinfo: %p inp_socket: %p\n",
2577 inp->inp_ppcb, inp->inp_pcbinfo, inp->inp_socket);
2578
2579 db_print_indent(indent);
2580 db_printf("inp_label: %p inp_flags: 0x%x (",
2581 inp->inp_label, inp->inp_flags);
2582 db_print_inpflags(inp->inp_flags);
2583 db_printf(")\n");
2584
2585 db_print_indent(indent);
2586 db_printf("inp_sp: %p inp_vflag: 0x%x (", inp->inp_sp,
2587 inp->inp_vflag);
2588 db_print_inpvflag(inp->inp_vflag);
2589 db_printf(")\n");
2590
2591 db_print_indent(indent);
2592 db_printf("inp_ip_ttl: %d inp_ip_p: %d inp_ip_minttl: %d\n",
2593 inp->inp_ip_ttl, inp->inp_ip_p, inp->inp_ip_minttl);
2594
2595 db_print_indent(indent);
2596#ifdef INET6
2597 if (inp->inp_vflag & INP_IPV6) {
2598 db_printf("in6p_options: %p in6p_outputopts: %p "
2599 "in6p_moptions: %p\n", inp->in6p_options,
2600 inp->in6p_outputopts, inp->in6p_moptions);
2601 db_printf("in6p_icmp6filt: %p in6p_cksum %d "
2602 "in6p_hops %u\n", inp->in6p_icmp6filt, inp->in6p_cksum,
2603 inp->in6p_hops);
2604 } else
2605#endif
2606 {
2607 db_printf("inp_ip_tos: %d inp_ip_options: %p "
2608 "inp_ip_moptions: %p\n", inp->inp_ip_tos,
2609 inp->inp_options, inp->inp_moptions);
2610 }
2611
2612 db_print_indent(indent);
2613 db_printf("inp_phd: %p inp_gencnt: %ju\n", inp->inp_phd,
2614 (uintmax_t)inp->inp_gencnt);
2615}
2616
2617DB_SHOW_COMMAND(inpcb, db_show_inpcb)
2618{
2619 struct inpcb *inp;
2620
2621 if (!have_addr) {
2622 db_printf("usage: show inpcb <addr>\n");
2623 return;
2624 }
2625 inp = (struct inpcb *)addr;
2626
2627 db_print_inpcb(inp, "inpcb", 0);
2628}
2629#endif /* DDB */
2137 else
2138#endif
2139 hashkey_faddr = inp->inp_faddr.s_addr;
2140
2141 head = &pcbinfo->ipi_hashbase[INP_PCBHASH(hashkey_faddr,
2142 inp->inp_lport, inp->inp_fport, pcbinfo->ipi_hashmask)];
2143
2144 LIST_REMOVE(inp, inp_hash);
2145 LIST_INSERT_HEAD(head, inp, inp_hash);
2146
2147#ifdef PCBGROUP
2148 if (m != NULL)
2149 in_pcbgroup_update_mbuf(inp, m);
2150 else
2151 in_pcbgroup_update(inp);
2152#endif
2153}
2154
2155void
2156in_pcbrehash(struct inpcb *inp)
2157{
2158
2159 in_pcbrehash_mbuf(inp, NULL);
2160}
2161
2162/*
2163 * Remove PCB from various lists.
2164 */
2165static void
2166in_pcbremlists(struct inpcb *inp)
2167{
2168 struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
2169
2170 INP_INFO_WLOCK_ASSERT(pcbinfo);
2171 INP_WLOCK_ASSERT(inp);
2172
2173 inp->inp_gencnt = ++pcbinfo->ipi_gencnt;
2174 if (inp->inp_flags & INP_INHASHLIST) {
2175 struct inpcbport *phd = inp->inp_phd;
2176
2177 INP_HASH_WLOCK(pcbinfo);
2178 LIST_REMOVE(inp, inp_hash);
2179 LIST_REMOVE(inp, inp_portlist);
2180 if (LIST_FIRST(&phd->phd_pcblist) == NULL) {
2181 LIST_REMOVE(phd, phd_hash);
2182 free(phd, M_PCB);
2183 }
2184 INP_HASH_WUNLOCK(pcbinfo);
2185 inp->inp_flags &= ~INP_INHASHLIST;
2186 }
2187 LIST_REMOVE(inp, inp_list);
2188 pcbinfo->ipi_count--;
2189#ifdef PCBGROUP
2190 in_pcbgroup_remove(inp);
2191#endif
2192}
2193
2194/*
2195 * A set label operation has occurred at the socket layer, propagate the
2196 * label change into the in_pcb for the socket.
2197 */
2198void
2199in_pcbsosetlabel(struct socket *so)
2200{
2201#ifdef MAC
2202 struct inpcb *inp;
2203
2204 inp = sotoinpcb(so);
2205 KASSERT(inp != NULL, ("in_pcbsosetlabel: so->so_pcb == NULL"));
2206
2207 INP_WLOCK(inp);
2208 SOCK_LOCK(so);
2209 mac_inpcb_sosetlabel(so, inp);
2210 SOCK_UNLOCK(so);
2211 INP_WUNLOCK(inp);
2212#endif
2213}
2214
2215/*
2216 * ipport_tick runs once per second, determining if random port allocation
2217 * should be continued. If more than ipport_randomcps ports have been
2218 * allocated in the last second, then we return to sequential port
2219 * allocation. We return to random allocation only once we drop below
2220 * ipport_randomcps for at least ipport_randomtime seconds.
2221 */
2222static void
2223ipport_tick(void *xtp)
2224{
2225 VNET_ITERATOR_DECL(vnet_iter);
2226
2227 VNET_LIST_RLOCK_NOSLEEP();
2228 VNET_FOREACH(vnet_iter) {
2229 CURVNET_SET(vnet_iter); /* XXX appease INVARIANTS here */
2230 if (V_ipport_tcpallocs <=
2231 V_ipport_tcplastcount + V_ipport_randomcps) {
2232 if (V_ipport_stoprandom > 0)
2233 V_ipport_stoprandom--;
2234 } else
2235 V_ipport_stoprandom = V_ipport_randomtime;
2236 V_ipport_tcplastcount = V_ipport_tcpallocs;
2237 CURVNET_RESTORE();
2238 }
2239 VNET_LIST_RUNLOCK_NOSLEEP();
2240 callout_reset(&ipport_tick_callout, hz, ipport_tick, NULL);
2241}
2242
2243static void
2244ip_fini(void *xtp)
2245{
2246
2247 callout_stop(&ipport_tick_callout);
2248}
2249
2250/*
2251 * The ipport_callout should start running at about the time we attach the
2252 * inet or inet6 domains.
2253 */
2254static void
2255ipport_tick_init(const void *unused __unused)
2256{
2257
2258 /* Start ipport_tick. */
2259 callout_init(&ipport_tick_callout, CALLOUT_MPSAFE);
2260 callout_reset(&ipport_tick_callout, 1, ipport_tick, NULL);
2261 EVENTHANDLER_REGISTER(shutdown_pre_sync, ip_fini, NULL,
2262 SHUTDOWN_PRI_DEFAULT);
2263}
2264SYSINIT(ipport_tick_init, SI_SUB_PROTO_DOMAIN, SI_ORDER_MIDDLE,
2265 ipport_tick_init, NULL);
2266
2267void
2268inp_wlock(struct inpcb *inp)
2269{
2270
2271 INP_WLOCK(inp);
2272}
2273
2274void
2275inp_wunlock(struct inpcb *inp)
2276{
2277
2278 INP_WUNLOCK(inp);
2279}
2280
2281void
2282inp_rlock(struct inpcb *inp)
2283{
2284
2285 INP_RLOCK(inp);
2286}
2287
2288void
2289inp_runlock(struct inpcb *inp)
2290{
2291
2292 INP_RUNLOCK(inp);
2293}
2294
2295#ifdef INVARIANTS
2296void
2297inp_lock_assert(struct inpcb *inp)
2298{
2299
2300 INP_WLOCK_ASSERT(inp);
2301}
2302
2303void
2304inp_unlock_assert(struct inpcb *inp)
2305{
2306
2307 INP_UNLOCK_ASSERT(inp);
2308}
2309#endif
2310
2311void
2312inp_apply_all(void (*func)(struct inpcb *, void *), void *arg)
2313{
2314 struct inpcb *inp;
2315
2316 INP_INFO_RLOCK(&V_tcbinfo);
2317 LIST_FOREACH(inp, V_tcbinfo.ipi_listhead, inp_list) {
2318 INP_WLOCK(inp);
2319 func(inp, arg);
2320 INP_WUNLOCK(inp);
2321 }
2322 INP_INFO_RUNLOCK(&V_tcbinfo);
2323}
2324
2325struct socket *
2326inp_inpcbtosocket(struct inpcb *inp)
2327{
2328
2329 INP_WLOCK_ASSERT(inp);
2330 return (inp->inp_socket);
2331}
2332
2333struct tcpcb *
2334inp_inpcbtotcpcb(struct inpcb *inp)
2335{
2336
2337 INP_WLOCK_ASSERT(inp);
2338 return ((struct tcpcb *)inp->inp_ppcb);
2339}
2340
2341int
2342inp_ip_tos_get(const struct inpcb *inp)
2343{
2344
2345 return (inp->inp_ip_tos);
2346}
2347
2348void
2349inp_ip_tos_set(struct inpcb *inp, int val)
2350{
2351
2352 inp->inp_ip_tos = val;
2353}
2354
2355void
2356inp_4tuple_get(struct inpcb *inp, uint32_t *laddr, uint16_t *lp,
2357 uint32_t *faddr, uint16_t *fp)
2358{
2359
2360 INP_LOCK_ASSERT(inp);
2361 *laddr = inp->inp_laddr.s_addr;
2362 *faddr = inp->inp_faddr.s_addr;
2363 *lp = inp->inp_lport;
2364 *fp = inp->inp_fport;
2365}
2366
2367struct inpcb *
2368so_sotoinpcb(struct socket *so)
2369{
2370
2371 return (sotoinpcb(so));
2372}
2373
2374struct tcpcb *
2375so_sototcpcb(struct socket *so)
2376{
2377
2378 return (sototcpcb(so));
2379}
2380
2381#ifdef DDB
2382static void
2383db_print_indent(int indent)
2384{
2385 int i;
2386
2387 for (i = 0; i < indent; i++)
2388 db_printf(" ");
2389}
2390
2391static void
2392db_print_inconninfo(struct in_conninfo *inc, const char *name, int indent)
2393{
2394 char faddr_str[48], laddr_str[48];
2395
2396 db_print_indent(indent);
2397 db_printf("%s at %p\n", name, inc);
2398
2399 indent += 2;
2400
2401#ifdef INET6
2402 if (inc->inc_flags & INC_ISIPV6) {
2403 /* IPv6. */
2404 ip6_sprintf(laddr_str, &inc->inc6_laddr);
2405 ip6_sprintf(faddr_str, &inc->inc6_faddr);
2406 } else
2407#endif
2408 {
2409 /* IPv4. */
2410 inet_ntoa_r(inc->inc_laddr, laddr_str);
2411 inet_ntoa_r(inc->inc_faddr, faddr_str);
2412 }
2413 db_print_indent(indent);
2414 db_printf("inc_laddr %s inc_lport %u\n", laddr_str,
2415 ntohs(inc->inc_lport));
2416 db_print_indent(indent);
2417 db_printf("inc_faddr %s inc_fport %u\n", faddr_str,
2418 ntohs(inc->inc_fport));
2419}
2420
2421static void
2422db_print_inpflags(int inp_flags)
2423{
2424 int comma;
2425
2426 comma = 0;
2427 if (inp_flags & INP_RECVOPTS) {
2428 db_printf("%sINP_RECVOPTS", comma ? ", " : "");
2429 comma = 1;
2430 }
2431 if (inp_flags & INP_RECVRETOPTS) {
2432 db_printf("%sINP_RECVRETOPTS", comma ? ", " : "");
2433 comma = 1;
2434 }
2435 if (inp_flags & INP_RECVDSTADDR) {
2436 db_printf("%sINP_RECVDSTADDR", comma ? ", " : "");
2437 comma = 1;
2438 }
2439 if (inp_flags & INP_HDRINCL) {
2440 db_printf("%sINP_HDRINCL", comma ? ", " : "");
2441 comma = 1;
2442 }
2443 if (inp_flags & INP_HIGHPORT) {
2444 db_printf("%sINP_HIGHPORT", comma ? ", " : "");
2445 comma = 1;
2446 }
2447 if (inp_flags & INP_LOWPORT) {
2448 db_printf("%sINP_LOWPORT", comma ? ", " : "");
2449 comma = 1;
2450 }
2451 if (inp_flags & INP_ANONPORT) {
2452 db_printf("%sINP_ANONPORT", comma ? ", " : "");
2453 comma = 1;
2454 }
2455 if (inp_flags & INP_RECVIF) {
2456 db_printf("%sINP_RECVIF", comma ? ", " : "");
2457 comma = 1;
2458 }
2459 if (inp_flags & INP_MTUDISC) {
2460 db_printf("%sINP_MTUDISC", comma ? ", " : "");
2461 comma = 1;
2462 }
2463 if (inp_flags & INP_FAITH) {
2464 db_printf("%sINP_FAITH", comma ? ", " : "");
2465 comma = 1;
2466 }
2467 if (inp_flags & INP_RECVTTL) {
2468 db_printf("%sINP_RECVTTL", comma ? ", " : "");
2469 comma = 1;
2470 }
2471 if (inp_flags & INP_DONTFRAG) {
2472 db_printf("%sINP_DONTFRAG", comma ? ", " : "");
2473 comma = 1;
2474 }
2475 if (inp_flags & INP_RECVTOS) {
2476 db_printf("%sINP_RECVTOS", comma ? ", " : "");
2477 comma = 1;
2478 }
2479 if (inp_flags & IN6P_IPV6_V6ONLY) {
2480 db_printf("%sIN6P_IPV6_V6ONLY", comma ? ", " : "");
2481 comma = 1;
2482 }
2483 if (inp_flags & IN6P_PKTINFO) {
2484 db_printf("%sIN6P_PKTINFO", comma ? ", " : "");
2485 comma = 1;
2486 }
2487 if (inp_flags & IN6P_HOPLIMIT) {
2488 db_printf("%sIN6P_HOPLIMIT", comma ? ", " : "");
2489 comma = 1;
2490 }
2491 if (inp_flags & IN6P_HOPOPTS) {
2492 db_printf("%sIN6P_HOPOPTS", comma ? ", " : "");
2493 comma = 1;
2494 }
2495 if (inp_flags & IN6P_DSTOPTS) {
2496 db_printf("%sIN6P_DSTOPTS", comma ? ", " : "");
2497 comma = 1;
2498 }
2499 if (inp_flags & IN6P_RTHDR) {
2500 db_printf("%sIN6P_RTHDR", comma ? ", " : "");
2501 comma = 1;
2502 }
2503 if (inp_flags & IN6P_RTHDRDSTOPTS) {
2504 db_printf("%sIN6P_RTHDRDSTOPTS", comma ? ", " : "");
2505 comma = 1;
2506 }
2507 if (inp_flags & IN6P_TCLASS) {
2508 db_printf("%sIN6P_TCLASS", comma ? ", " : "");
2509 comma = 1;
2510 }
2511 if (inp_flags & IN6P_AUTOFLOWLABEL) {
2512 db_printf("%sIN6P_AUTOFLOWLABEL", comma ? ", " : "");
2513 comma = 1;
2514 }
2515 if (inp_flags & INP_TIMEWAIT) {
2516 db_printf("%sINP_TIMEWAIT", comma ? ", " : "");
2517 comma = 1;
2518 }
2519 if (inp_flags & INP_ONESBCAST) {
2520 db_printf("%sINP_ONESBCAST", comma ? ", " : "");
2521 comma = 1;
2522 }
2523 if (inp_flags & INP_DROPPED) {
2524 db_printf("%sINP_DROPPED", comma ? ", " : "");
2525 comma = 1;
2526 }
2527 if (inp_flags & INP_SOCKREF) {
2528 db_printf("%sINP_SOCKREF", comma ? ", " : "");
2529 comma = 1;
2530 }
2531 if (inp_flags & IN6P_RFC2292) {
2532 db_printf("%sIN6P_RFC2292", comma ? ", " : "");
2533 comma = 1;
2534 }
2535 if (inp_flags & IN6P_MTU) {
2536 db_printf("IN6P_MTU%s", comma ? ", " : "");
2537 comma = 1;
2538 }
2539}
2540
2541static void
2542db_print_inpvflag(u_char inp_vflag)
2543{
2544 int comma;
2545
2546 comma = 0;
2547 if (inp_vflag & INP_IPV4) {
2548 db_printf("%sINP_IPV4", comma ? ", " : "");
2549 comma = 1;
2550 }
2551 if (inp_vflag & INP_IPV6) {
2552 db_printf("%sINP_IPV6", comma ? ", " : "");
2553 comma = 1;
2554 }
2555 if (inp_vflag & INP_IPV6PROTO) {
2556 db_printf("%sINP_IPV6PROTO", comma ? ", " : "");
2557 comma = 1;
2558 }
2559}
2560
2561static void
2562db_print_inpcb(struct inpcb *inp, const char *name, int indent)
2563{
2564
2565 db_print_indent(indent);
2566 db_printf("%s at %p\n", name, inp);
2567
2568 indent += 2;
2569
2570 db_print_indent(indent);
2571 db_printf("inp_flow: 0x%x\n", inp->inp_flow);
2572
2573 db_print_inconninfo(&inp->inp_inc, "inp_conninfo", indent);
2574
2575 db_print_indent(indent);
2576 db_printf("inp_ppcb: %p inp_pcbinfo: %p inp_socket: %p\n",
2577 inp->inp_ppcb, inp->inp_pcbinfo, inp->inp_socket);
2578
2579 db_print_indent(indent);
2580 db_printf("inp_label: %p inp_flags: 0x%x (",
2581 inp->inp_label, inp->inp_flags);
2582 db_print_inpflags(inp->inp_flags);
2583 db_printf(")\n");
2584
2585 db_print_indent(indent);
2586 db_printf("inp_sp: %p inp_vflag: 0x%x (", inp->inp_sp,
2587 inp->inp_vflag);
2588 db_print_inpvflag(inp->inp_vflag);
2589 db_printf(")\n");
2590
2591 db_print_indent(indent);
2592 db_printf("inp_ip_ttl: %d inp_ip_p: %d inp_ip_minttl: %d\n",
2593 inp->inp_ip_ttl, inp->inp_ip_p, inp->inp_ip_minttl);
2594
2595 db_print_indent(indent);
2596#ifdef INET6
2597 if (inp->inp_vflag & INP_IPV6) {
2598 db_printf("in6p_options: %p in6p_outputopts: %p "
2599 "in6p_moptions: %p\n", inp->in6p_options,
2600 inp->in6p_outputopts, inp->in6p_moptions);
2601 db_printf("in6p_icmp6filt: %p in6p_cksum %d "
2602 "in6p_hops %u\n", inp->in6p_icmp6filt, inp->in6p_cksum,
2603 inp->in6p_hops);
2604 } else
2605#endif
2606 {
2607 db_printf("inp_ip_tos: %d inp_ip_options: %p "
2608 "inp_ip_moptions: %p\n", inp->inp_ip_tos,
2609 inp->inp_options, inp->inp_moptions);
2610 }
2611
2612 db_print_indent(indent);
2613 db_printf("inp_phd: %p inp_gencnt: %ju\n", inp->inp_phd,
2614 (uintmax_t)inp->inp_gencnt);
2615}
2616
2617DB_SHOW_COMMAND(inpcb, db_show_inpcb)
2618{
2619 struct inpcb *inp;
2620
2621 if (!have_addr) {
2622 db_printf("usage: show inpcb <addr>\n");
2623 return;
2624 }
2625 inp = (struct inpcb *)addr;
2626
2627 db_print_inpcb(inp, "inpcb", 0);
2628}
2629#endif /* DDB */