ip_reass.c revision 280971
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *	@(#)ip_input.c	8.2 (Berkeley) 1/4/94
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/netinet/ip_input.c 280971 2015-04-01 22:26:39Z glebius $");
34
35#include "opt_bootp.h"
36#include "opt_ipfw.h"
37#include "opt_ipstealth.h"
38#include "opt_ipsec.h"
39#include "opt_route.h"
40#include "opt_rss.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/mbuf.h>
45#include <sys/malloc.h>
46#include <sys/domain.h>
47#include <sys/protosw.h>
48#include <sys/socket.h>
49#include <sys/time.h>
50#include <sys/kernel.h>
51#include <sys/lock.h>
52#include <sys/rwlock.h>
53#include <sys/sdt.h>
54#include <sys/syslog.h>
55#include <sys/sysctl.h>
56
57#include <net/pfil.h>
58#include <net/if.h>
59#include <net/if_types.h>
60#include <net/if_var.h>
61#include <net/if_dl.h>
62#include <net/route.h>
63#include <net/netisr.h>
64#include <net/rss_config.h>
65#include <net/vnet.h>
66
67#include <netinet/in.h>
68#include <netinet/in_kdtrace.h>
69#include <netinet/in_systm.h>
70#include <netinet/in_var.h>
71#include <netinet/ip.h>
72#include <netinet/in_pcb.h>
73#include <netinet/ip_var.h>
74#include <netinet/ip_fw.h>
75#include <netinet/ip_icmp.h>
76#include <netinet/ip_options.h>
77#include <machine/in_cksum.h>
78#include <netinet/ip_carp.h>
79#ifdef IPSEC
80#include <netinet/ip_ipsec.h>
81#endif /* IPSEC */
82#include <netinet/in_rss.h>
83
84#include <sys/socketvar.h>
85
86#include <security/mac/mac_framework.h>
87
88#ifdef CTASSERT
89CTASSERT(sizeof(struct ip) == 20);
90#endif
91
92struct	rwlock in_ifaddr_lock;
93RW_SYSINIT(in_ifaddr_lock, &in_ifaddr_lock, "in_ifaddr_lock");
94
95VNET_DEFINE(int, rsvp_on);
96
97VNET_DEFINE(int, ipforwarding);
98SYSCTL_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_VNET | CTLFLAG_RW,
99    &VNET_NAME(ipforwarding), 0,
100    "Enable IP forwarding between interfaces");
101
102static VNET_DEFINE(int, ipsendredirects) = 1;	/* XXX */
103#define	V_ipsendredirects	VNET(ipsendredirects)
104SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_VNET | CTLFLAG_RW,
105    &VNET_NAME(ipsendredirects), 0,
106    "Enable sending IP redirects");
107
108/*
109 * XXX - Setting ip_checkinterface mostly implements the receive side of
110 * the Strong ES model described in RFC 1122, but since the routing table
111 * and transmit implementation do not implement the Strong ES model,
112 * setting this to 1 results in an odd hybrid.
113 *
114 * XXX - ip_checkinterface currently must be disabled if you use ipnat
115 * to translate the destination address to another local interface.
116 *
117 * XXX - ip_checkinterface must be disabled if you add IP aliases
118 * to the loopback interface instead of the interface where the
119 * packets for those addresses are received.
120 */
121static VNET_DEFINE(int, ip_checkinterface);
122#define	V_ip_checkinterface	VNET(ip_checkinterface)
123SYSCTL_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_VNET | CTLFLAG_RW,
124    &VNET_NAME(ip_checkinterface), 0,
125    "Verify packet arrives on correct interface");
126
127VNET_DEFINE(struct pfil_head, inet_pfil_hook);	/* Packet filter hooks */
128
129static struct netisr_handler ip_nh = {
130	.nh_name = "ip",
131	.nh_handler = ip_input,
132	.nh_proto = NETISR_IP,
133#ifdef	RSS
134	.nh_m2cpuid = rss_soft_m2cpuid,
135	.nh_policy = NETISR_POLICY_CPU,
136	.nh_dispatch = NETISR_DISPATCH_HYBRID,
137#else
138	.nh_policy = NETISR_POLICY_FLOW,
139#endif
140};
141
142#ifdef	RSS
143/*
144 * Directly dispatched frames are currently assumed
145 * to have a flowid already calculated.
146 *
147 * It should likely have something that assert it
148 * actually has valid flow details.
149 */
150static struct netisr_handler ip_direct_nh = {
151	.nh_name = "ip_direct",
152	.nh_handler = ip_direct_input,
153	.nh_proto = NETISR_IP_DIRECT,
154	.nh_m2cpuid = rss_m2cpuid,
155	.nh_policy = NETISR_POLICY_CPU,
156	.nh_dispatch = NETISR_DISPATCH_HYBRID,
157};
158#endif
159
160extern	struct domain inetdomain;
161extern	struct protosw inetsw[];
162u_char	ip_protox[IPPROTO_MAX];
163VNET_DEFINE(struct in_ifaddrhead, in_ifaddrhead);  /* first inet address */
164VNET_DEFINE(struct in_ifaddrhashhead *, in_ifaddrhashtbl); /* inet addr hash table  */
165VNET_DEFINE(u_long, in_ifaddrhmask);		/* mask for hash table */
166
167static VNET_DEFINE(uma_zone_t, ipq_zone);
168static VNET_DEFINE(TAILQ_HEAD(ipqhead, ipq), ipq[IPREASS_NHASH]);
169static struct mtx ipqlock;
170
171#define	V_ipq_zone		VNET(ipq_zone)
172#define	V_ipq			VNET(ipq)
173
174#define	IPQ_LOCK()	mtx_lock(&ipqlock)
175#define	IPQ_UNLOCK()	mtx_unlock(&ipqlock)
176#define	IPQ_LOCK_INIT()	mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF)
177#define	IPQ_LOCK_ASSERT()	mtx_assert(&ipqlock, MA_OWNED)
178
179static void	maxnipq_update(void);
180static void	ipq_zone_change(void *);
181static void	ip_drain_locked(void);
182
183static VNET_DEFINE(int, maxnipq);  /* Administrative limit on # reass queues. */
184static VNET_DEFINE(int, nipq);			/* Total # of reass queues */
185#define	V_maxnipq		VNET(maxnipq)
186#define	V_nipq			VNET(nipq)
187SYSCTL_INT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_VNET | CTLFLAG_RD,
188    &VNET_NAME(nipq), 0,
189    "Current number of IPv4 fragment reassembly queue entries");
190
191static VNET_DEFINE(int, maxfragsperpacket);
192#define	V_maxfragsperpacket	VNET(maxfragsperpacket)
193SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_VNET | CTLFLAG_RW,
194    &VNET_NAME(maxfragsperpacket), 0,
195    "Maximum number of IPv4 fragments allowed per packet");
196
197#ifdef IPCTL_DEFMTU
198SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW,
199    &ip_mtu, 0, "Default MTU");
200#endif
201
202#ifdef IPSTEALTH
203VNET_DEFINE(int, ipstealth);
204SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_VNET | CTLFLAG_RW,
205    &VNET_NAME(ipstealth), 0,
206    "IP stealth mode, no TTL decrementation on forwarding");
207#endif
208
209static void	ip_freef(struct ipqhead *, struct ipq *);
210
211/*
212 * IP statistics are stored in the "array" of counter(9)s.
213 */
214VNET_PCPUSTAT_DEFINE(struct ipstat, ipstat);
215VNET_PCPUSTAT_SYSINIT(ipstat);
216SYSCTL_VNET_PCPUSTAT(_net_inet_ip, IPCTL_STATS, stats, struct ipstat, ipstat,
217    "IP statistics (struct ipstat, netinet/ip_var.h)");
218
219#ifdef VIMAGE
220VNET_PCPUSTAT_SYSUNINIT(ipstat);
221#endif /* VIMAGE */
222
223/*
224 * Kernel module interface for updating ipstat.  The argument is an index
225 * into ipstat treated as an array.
226 */
227void
228kmod_ipstat_inc(int statnum)
229{
230
231	counter_u64_add(VNET(ipstat)[statnum], 1);
232}
233
234void
235kmod_ipstat_dec(int statnum)
236{
237
238	counter_u64_add(VNET(ipstat)[statnum], -1);
239}
240
241static int
242sysctl_netinet_intr_queue_maxlen(SYSCTL_HANDLER_ARGS)
243{
244	int error, qlimit;
245
246	netisr_getqlimit(&ip_nh, &qlimit);
247	error = sysctl_handle_int(oidp, &qlimit, 0, req);
248	if (error || !req->newptr)
249		return (error);
250	if (qlimit < 1)
251		return (EINVAL);
252	return (netisr_setqlimit(&ip_nh, qlimit));
253}
254SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen,
255    CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_queue_maxlen, "I",
256    "Maximum size of the IP input queue");
257
258static int
259sysctl_netinet_intr_queue_drops(SYSCTL_HANDLER_ARGS)
260{
261	u_int64_t qdrops_long;
262	int error, qdrops;
263
264	netisr_getqdrops(&ip_nh, &qdrops_long);
265	qdrops = qdrops_long;
266	error = sysctl_handle_int(oidp, &qdrops, 0, req);
267	if (error || !req->newptr)
268		return (error);
269	if (qdrops != 0)
270		return (EINVAL);
271	netisr_clearqdrops(&ip_nh);
272	return (0);
273}
274
275SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops,
276    CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_queue_drops, "I",
277    "Number of packets dropped from the IP input queue");
278
279#ifdef	RSS
280static int
281sysctl_netinet_intr_direct_queue_maxlen(SYSCTL_HANDLER_ARGS)
282{
283	int error, qlimit;
284
285	netisr_getqlimit(&ip_direct_nh, &qlimit);
286	error = sysctl_handle_int(oidp, &qlimit, 0, req);
287	if (error || !req->newptr)
288		return (error);
289	if (qlimit < 1)
290		return (EINVAL);
291	return (netisr_setqlimit(&ip_direct_nh, qlimit));
292}
293SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_direct_queue_maxlen,
294    CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet_intr_direct_queue_maxlen, "I",
295    "Maximum size of the IP direct input queue");
296
297static int
298sysctl_netinet_intr_direct_queue_drops(SYSCTL_HANDLER_ARGS)
299{
300	u_int64_t qdrops_long;
301	int error, qdrops;
302
303	netisr_getqdrops(&ip_direct_nh, &qdrops_long);
304	qdrops = qdrops_long;
305	error = sysctl_handle_int(oidp, &qdrops, 0, req);
306	if (error || !req->newptr)
307		return (error);
308	if (qdrops != 0)
309		return (EINVAL);
310	netisr_clearqdrops(&ip_direct_nh);
311	return (0);
312}
313
314SYSCTL_PROC(_net_inet_ip, IPCTL_INTRQDROPS, intr_direct_queue_drops,
315    CTLTYPE_INT|CTLFLAG_RD, 0, 0, sysctl_netinet_intr_direct_queue_drops, "I",
316    "Number of packets dropped from the IP direct input queue");
317#endif	/* RSS */
318
319/*
320 * IP initialization: fill in IP protocol switch table.
321 * All protocols not implemented in kernel go to raw IP protocol handler.
322 */
323void
324ip_init(void)
325{
326	struct protosw *pr;
327	int i;
328
329	TAILQ_INIT(&V_in_ifaddrhead);
330	V_in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &V_in_ifaddrhmask);
331
332	/* Initialize IP reassembly queue. */
333	for (i = 0; i < IPREASS_NHASH; i++)
334		TAILQ_INIT(&V_ipq[i]);
335	V_maxnipq = nmbclusters / 32;
336	V_maxfragsperpacket = 16;
337	V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL,
338	    NULL, UMA_ALIGN_PTR, 0);
339	maxnipq_update();
340
341	/* Initialize packet filter hooks. */
342	V_inet_pfil_hook.ph_type = PFIL_TYPE_AF;
343	V_inet_pfil_hook.ph_af = AF_INET;
344	if ((i = pfil_head_register(&V_inet_pfil_hook)) != 0)
345		printf("%s: WARNING: unable to register pfil hook, "
346			"error %d\n", __func__, i);
347
348	/* Skip initialization of globals for non-default instances. */
349	if (!IS_DEFAULT_VNET(curvnet))
350		return;
351
352	pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
353	if (pr == NULL)
354		panic("ip_init: PF_INET not found");
355
356	/* Initialize the entire ip_protox[] array to IPPROTO_RAW. */
357	for (i = 0; i < IPPROTO_MAX; i++)
358		ip_protox[i] = pr - inetsw;
359	/*
360	 * Cycle through IP protocols and put them into the appropriate place
361	 * in ip_protox[].
362	 */
363	for (pr = inetdomain.dom_protosw;
364	    pr < inetdomain.dom_protoswNPROTOSW; pr++)
365		if (pr->pr_domain->dom_family == PF_INET &&
366		    pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) {
367			/* Be careful to only index valid IP protocols. */
368			if (pr->pr_protocol < IPPROTO_MAX)
369				ip_protox[pr->pr_protocol] = pr - inetsw;
370		}
371
372	EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change,
373		NULL, EVENTHANDLER_PRI_ANY);
374
375	/* Initialize various other remaining things. */
376	IPQ_LOCK_INIT();
377	netisr_register(&ip_nh);
378#ifdef	RSS
379	netisr_register(&ip_direct_nh);
380#endif
381}
382
383#ifdef VIMAGE
384void
385ip_destroy(void)
386{
387	int i;
388
389	if ((i = pfil_head_unregister(&V_inet_pfil_hook)) != 0)
390		printf("%s: WARNING: unable to unregister pfil hook, "
391		    "error %d\n", __func__, i);
392
393	/* Cleanup in_ifaddr hash table; should be empty. */
394	hashdestroy(V_in_ifaddrhashtbl, M_IFADDR, V_in_ifaddrhmask);
395
396	IPQ_LOCK();
397	ip_drain_locked();
398	IPQ_UNLOCK();
399
400	uma_zdestroy(V_ipq_zone);
401}
402#endif
403
404#ifdef	RSS
405/*
406 * IP direct input routine.
407 *
408 * This is called when reinjecting completed fragments where
409 * all of the previous checking and book-keeping has been done.
410 */
411void
412ip_direct_input(struct mbuf *m)
413{
414	struct ip *ip;
415	int hlen;
416
417	ip = mtod(m, struct ip *);
418	hlen = ip->ip_hl << 2;
419
420	IPSTAT_INC(ips_delivered);
421	(*inetsw[ip_protox[ip->ip_p]].pr_input)(&m, &hlen, ip->ip_p);
422	return;
423}
424#endif
425
426/*
427 * Ip input routine.  Checksum and byte swap header.  If fragmented
428 * try to reassemble.  Process options.  Pass to next level.
429 */
430void
431ip_input(struct mbuf *m)
432{
433	struct ip *ip = NULL;
434	struct in_ifaddr *ia = NULL;
435	struct ifaddr *ifa;
436	struct ifnet *ifp;
437	int    checkif, hlen = 0;
438	uint16_t sum, ip_len;
439	int dchg = 0;				/* dest changed after fw */
440	struct in_addr odst;			/* original dst address */
441
442	M_ASSERTPKTHDR(m);
443
444	if (m->m_flags & M_FASTFWD_OURS) {
445		m->m_flags &= ~M_FASTFWD_OURS;
446		/* Set up some basics that will be used later. */
447		ip = mtod(m, struct ip *);
448		hlen = ip->ip_hl << 2;
449		ip_len = ntohs(ip->ip_len);
450		goto ours;
451	}
452
453	IPSTAT_INC(ips_total);
454
455	if (m->m_pkthdr.len < sizeof(struct ip))
456		goto tooshort;
457
458	if (m->m_len < sizeof (struct ip) &&
459	    (m = m_pullup(m, sizeof (struct ip))) == NULL) {
460		IPSTAT_INC(ips_toosmall);
461		return;
462	}
463	ip = mtod(m, struct ip *);
464
465	if (ip->ip_v != IPVERSION) {
466		IPSTAT_INC(ips_badvers);
467		goto bad;
468	}
469
470	hlen = ip->ip_hl << 2;
471	if (hlen < sizeof(struct ip)) {	/* minimum header length */
472		IPSTAT_INC(ips_badhlen);
473		goto bad;
474	}
475	if (hlen > m->m_len) {
476		if ((m = m_pullup(m, hlen)) == NULL) {
477			IPSTAT_INC(ips_badhlen);
478			return;
479		}
480		ip = mtod(m, struct ip *);
481	}
482
483	IP_PROBE(receive, NULL, NULL, ip, m->m_pkthdr.rcvif, ip, NULL);
484
485	/* 127/8 must not appear on wire - RFC1122 */
486	ifp = m->m_pkthdr.rcvif;
487	if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
488	    (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
489		if ((ifp->if_flags & IFF_LOOPBACK) == 0) {
490			IPSTAT_INC(ips_badaddr);
491			goto bad;
492		}
493	}
494
495	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
496		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
497	} else {
498		if (hlen == sizeof(struct ip)) {
499			sum = in_cksum_hdr(ip);
500		} else {
501			sum = in_cksum(m, hlen);
502		}
503	}
504	if (sum) {
505		IPSTAT_INC(ips_badsum);
506		goto bad;
507	}
508
509#ifdef ALTQ
510	if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0)
511		/* packet is dropped by traffic conditioner */
512		return;
513#endif
514
515	ip_len = ntohs(ip->ip_len);
516	if (ip_len < hlen) {
517		IPSTAT_INC(ips_badlen);
518		goto bad;
519	}
520
521	/*
522	 * Check that the amount of data in the buffers
523	 * is as at least much as the IP header would have us expect.
524	 * Trim mbufs if longer than we expect.
525	 * Drop packet if shorter than we expect.
526	 */
527	if (m->m_pkthdr.len < ip_len) {
528tooshort:
529		IPSTAT_INC(ips_tooshort);
530		goto bad;
531	}
532	if (m->m_pkthdr.len > ip_len) {
533		if (m->m_len == m->m_pkthdr.len) {
534			m->m_len = ip_len;
535			m->m_pkthdr.len = ip_len;
536		} else
537			m_adj(m, ip_len - m->m_pkthdr.len);
538	}
539
540#ifdef IPSEC
541	/*
542	 * Bypass packet filtering for packets previously handled by IPsec.
543	 */
544	if (ip_ipsec_filtertunnel(m))
545		goto passin;
546#endif /* IPSEC */
547
548	/*
549	 * Run through list of hooks for input packets.
550	 *
551	 * NB: Beware of the destination address changing (e.g.
552	 *     by NAT rewriting).  When this happens, tell
553	 *     ip_forward to do the right thing.
554	 */
555
556	/* Jump over all PFIL processing if hooks are not active. */
557	if (!PFIL_HOOKED(&V_inet_pfil_hook))
558		goto passin;
559
560	odst = ip->ip_dst;
561	if (pfil_run_hooks(&V_inet_pfil_hook, &m, ifp, PFIL_IN, NULL) != 0)
562		return;
563	if (m == NULL)			/* consumed by filter */
564		return;
565
566	ip = mtod(m, struct ip *);
567	dchg = (odst.s_addr != ip->ip_dst.s_addr);
568	ifp = m->m_pkthdr.rcvif;
569
570	if (m->m_flags & M_FASTFWD_OURS) {
571		m->m_flags &= ~M_FASTFWD_OURS;
572		goto ours;
573	}
574	if (m->m_flags & M_IP_NEXTHOP) {
575		dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL);
576		if (dchg != 0) {
577			/*
578			 * Directly ship the packet on.  This allows
579			 * forwarding packets originally destined to us
580			 * to some other directly connected host.
581			 */
582			ip_forward(m, 1);
583			return;
584		}
585	}
586passin:
587
588	/*
589	 * Process options and, if not destined for us,
590	 * ship it on.  ip_dooptions returns 1 when an
591	 * error was detected (causing an icmp message
592	 * to be sent and the original packet to be freed).
593	 */
594	if (hlen > sizeof (struct ip) && ip_dooptions(m, 0))
595		return;
596
597        /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no
598         * matter if it is destined to another node, or whether it is
599         * a multicast one, RSVP wants it! and prevents it from being forwarded
600         * anywhere else. Also checks if the rsvp daemon is running before
601	 * grabbing the packet.
602         */
603	if (V_rsvp_on && ip->ip_p==IPPROTO_RSVP)
604		goto ours;
605
606	/*
607	 * Check our list of addresses, to see if the packet is for us.
608	 * If we don't have any addresses, assume any unicast packet
609	 * we receive might be for us (and let the upper layers deal
610	 * with it).
611	 */
612	if (TAILQ_EMPTY(&V_in_ifaddrhead) &&
613	    (m->m_flags & (M_MCAST|M_BCAST)) == 0)
614		goto ours;
615
616	/*
617	 * Enable a consistency check between the destination address
618	 * and the arrival interface for a unicast packet (the RFC 1122
619	 * strong ES model) if IP forwarding is disabled and the packet
620	 * is not locally generated and the packet is not subject to
621	 * 'ipfw fwd'.
622	 *
623	 * XXX - Checking also should be disabled if the destination
624	 * address is ipnat'ed to a different interface.
625	 *
626	 * XXX - Checking is incompatible with IP aliases added
627	 * to the loopback interface instead of the interface where
628	 * the packets are received.
629	 *
630	 * XXX - This is the case for carp vhost IPs as well so we
631	 * insert a workaround. If the packet got here, we already
632	 * checked with carp_iamatch() and carp_forus().
633	 */
634	checkif = V_ip_checkinterface && (V_ipforwarding == 0) &&
635	    ifp != NULL && ((ifp->if_flags & IFF_LOOPBACK) == 0) &&
636	    ifp->if_carp == NULL && (dchg == 0);
637
638	/*
639	 * Check for exact addresses in the hash bucket.
640	 */
641	/* IN_IFADDR_RLOCK(); */
642	LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) {
643		/*
644		 * If the address matches, verify that the packet
645		 * arrived via the correct interface if checking is
646		 * enabled.
647		 */
648		if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr &&
649		    (!checkif || ia->ia_ifp == ifp)) {
650			counter_u64_add(ia->ia_ifa.ifa_ipackets, 1);
651			counter_u64_add(ia->ia_ifa.ifa_ibytes,
652			    m->m_pkthdr.len);
653			/* IN_IFADDR_RUNLOCK(); */
654			goto ours;
655		}
656	}
657	/* IN_IFADDR_RUNLOCK(); */
658
659	/*
660	 * Check for broadcast addresses.
661	 *
662	 * Only accept broadcast packets that arrive via the matching
663	 * interface.  Reception of forwarded directed broadcasts would
664	 * be handled via ip_forward() and ether_output() with the loopback
665	 * into the stack for SIMPLEX interfaces handled by ether_output().
666	 */
667	if (ifp != NULL && ifp->if_flags & IFF_BROADCAST) {
668		IF_ADDR_RLOCK(ifp);
669	        TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
670			if (ifa->ifa_addr->sa_family != AF_INET)
671				continue;
672			ia = ifatoia(ifa);
673			if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr ==
674			    ip->ip_dst.s_addr) {
675				counter_u64_add(ia->ia_ifa.ifa_ipackets, 1);
676				counter_u64_add(ia->ia_ifa.ifa_ibytes,
677				    m->m_pkthdr.len);
678				IF_ADDR_RUNLOCK(ifp);
679				goto ours;
680			}
681#ifdef BOOTP_COMPAT
682			if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) {
683				counter_u64_add(ia->ia_ifa.ifa_ipackets, 1);
684				counter_u64_add(ia->ia_ifa.ifa_ibytes,
685				    m->m_pkthdr.len);
686				IF_ADDR_RUNLOCK(ifp);
687				goto ours;
688			}
689#endif
690		}
691		IF_ADDR_RUNLOCK(ifp);
692		ia = NULL;
693	}
694	/* RFC 3927 2.7: Do not forward datagrams for 169.254.0.0/16. */
695	if (IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) {
696		IPSTAT_INC(ips_cantforward);
697		m_freem(m);
698		return;
699	}
700	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
701		if (V_ip_mrouter) {
702			/*
703			 * If we are acting as a multicast router, all
704			 * incoming multicast packets are passed to the
705			 * kernel-level multicast forwarding function.
706			 * The packet is returned (relatively) intact; if
707			 * ip_mforward() returns a non-zero value, the packet
708			 * must be discarded, else it may be accepted below.
709			 */
710			if (ip_mforward && ip_mforward(ip, ifp, m, 0) != 0) {
711				IPSTAT_INC(ips_cantforward);
712				m_freem(m);
713				return;
714			}
715
716			/*
717			 * The process-level routing daemon needs to receive
718			 * all multicast IGMP packets, whether or not this
719			 * host belongs to their destination groups.
720			 */
721			if (ip->ip_p == IPPROTO_IGMP)
722				goto ours;
723			IPSTAT_INC(ips_forward);
724		}
725		/*
726		 * Assume the packet is for us, to avoid prematurely taking
727		 * a lock on the in_multi hash. Protocols must perform
728		 * their own filtering and update statistics accordingly.
729		 */
730		goto ours;
731	}
732	if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST)
733		goto ours;
734	if (ip->ip_dst.s_addr == INADDR_ANY)
735		goto ours;
736
737	/*
738	 * Not for us; forward if possible and desirable.
739	 */
740	if (V_ipforwarding == 0) {
741		IPSTAT_INC(ips_cantforward);
742		m_freem(m);
743	} else {
744		ip_forward(m, dchg);
745	}
746	return;
747
748ours:
749#ifdef IPSTEALTH
750	/*
751	 * IPSTEALTH: Process non-routing options only
752	 * if the packet is destined for us.
753	 */
754	if (V_ipstealth && hlen > sizeof (struct ip) && ip_dooptions(m, 1))
755		return;
756#endif /* IPSTEALTH */
757
758	/*
759	 * Attempt reassembly; if it succeeds, proceed.
760	 * ip_reass() will return a different mbuf.
761	 */
762	if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) {
763		/* XXXGL: shouldn't we save & set m_flags? */
764		m = ip_reass(m);
765		if (m == NULL)
766			return;
767		ip = mtod(m, struct ip *);
768		/* Get the header length of the reassembled packet */
769		hlen = ip->ip_hl << 2;
770	}
771
772#ifdef IPSEC
773	/*
774	 * enforce IPsec policy checking if we are seeing last header.
775	 * note that we do not visit this with protocols with pcb layer
776	 * code - like udp/tcp/raw ip.
777	 */
778	if (ip_ipsec_input(m, ip->ip_p) != 0)
779		goto bad;
780#endif /* IPSEC */
781
782	/*
783	 * Switch out to protocol's input routine.
784	 */
785	IPSTAT_INC(ips_delivered);
786
787	(*inetsw[ip_protox[ip->ip_p]].pr_input)(&m, &hlen, ip->ip_p);
788	return;
789bad:
790	m_freem(m);
791}
792
793/*
794 * After maxnipq has been updated, propagate the change to UMA.  The UMA zone
795 * max has slightly different semantics than the sysctl, for historical
796 * reasons.
797 */
798static void
799maxnipq_update(void)
800{
801
802	/*
803	 * -1 for unlimited allocation.
804	 */
805	if (V_maxnipq < 0)
806		uma_zone_set_max(V_ipq_zone, 0);
807	/*
808	 * Positive number for specific bound.
809	 */
810	if (V_maxnipq > 0)
811		uma_zone_set_max(V_ipq_zone, V_maxnipq);
812	/*
813	 * Zero specifies no further fragment queue allocation -- set the
814	 * bound very low, but rely on implementation elsewhere to actually
815	 * prevent allocation and reclaim current queues.
816	 */
817	if (V_maxnipq == 0)
818		uma_zone_set_max(V_ipq_zone, 1);
819}
820
821static void
822ipq_zone_change(void *tag)
823{
824
825	if (V_maxnipq > 0 && V_maxnipq < (nmbclusters / 32)) {
826		V_maxnipq = nmbclusters / 32;
827		maxnipq_update();
828	}
829}
830
831static int
832sysctl_maxnipq(SYSCTL_HANDLER_ARGS)
833{
834	int error, i;
835
836	i = V_maxnipq;
837	error = sysctl_handle_int(oidp, &i, 0, req);
838	if (error || !req->newptr)
839		return (error);
840
841	/*
842	 * XXXRW: Might be a good idea to sanity check the argument and place
843	 * an extreme upper bound.
844	 */
845	if (i < -1)
846		return (EINVAL);
847	V_maxnipq = i;
848	maxnipq_update();
849	return (0);
850}
851
852SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW,
853    NULL, 0, sysctl_maxnipq, "I",
854    "Maximum number of IPv4 fragment reassembly queue entries");
855
856#define	M_IP_FRAG	M_PROTO9
857
858/*
859 * Take incoming datagram fragment and try to reassemble it into
860 * whole datagram.  If the argument is the first fragment or one
861 * in between the function will return NULL and store the mbuf
862 * in the fragment chain.  If the argument is the last fragment
863 * the packet will be reassembled and the pointer to the new
864 * mbuf returned for further processing.  Only m_tags attached
865 * to the first packet/fragment are preserved.
866 * The IP header is *NOT* adjusted out of iplen.
867 */
868struct mbuf *
869ip_reass(struct mbuf *m)
870{
871	struct ip *ip;
872	struct mbuf *p, *q, *nq, *t;
873	struct ipq *fp = NULL;
874	struct ipqhead *head;
875	int i, hlen, next;
876	u_int8_t ecn, ecn0;
877	u_short hash;
878#ifdef	RSS
879	uint32_t rss_hash, rss_type;
880#endif
881
882	/* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
883	if (V_maxnipq == 0 || V_maxfragsperpacket == 0) {
884		IPSTAT_INC(ips_fragments);
885		IPSTAT_INC(ips_fragdropped);
886		m_freem(m);
887		return (NULL);
888	}
889
890	ip = mtod(m, struct ip *);
891	hlen = ip->ip_hl << 2;
892
893	hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
894	head = &V_ipq[hash];
895	IPQ_LOCK();
896
897	/*
898	 * Look for queue of fragments
899	 * of this datagram.
900	 */
901	TAILQ_FOREACH(fp, head, ipq_list)
902		if (ip->ip_id == fp->ipq_id &&
903		    ip->ip_src.s_addr == fp->ipq_src.s_addr &&
904		    ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
905#ifdef MAC
906		    mac_ipq_match(m, fp) &&
907#endif
908		    ip->ip_p == fp->ipq_p)
909			goto found;
910
911	fp = NULL;
912
913	/*
914	 * Attempt to trim the number of allocated fragment queues if it
915	 * exceeds the administrative limit.
916	 */
917	if ((V_nipq > V_maxnipq) && (V_maxnipq > 0)) {
918		/*
919		 * drop something from the tail of the current queue
920		 * before proceeding further
921		 */
922		struct ipq *q = TAILQ_LAST(head, ipqhead);
923		if (q == NULL) {   /* gak */
924			for (i = 0; i < IPREASS_NHASH; i++) {
925				struct ipq *r = TAILQ_LAST(&V_ipq[i], ipqhead);
926				if (r) {
927					IPSTAT_ADD(ips_fragtimeout,
928					    r->ipq_nfrags);
929					ip_freef(&V_ipq[i], r);
930					break;
931				}
932			}
933		} else {
934			IPSTAT_ADD(ips_fragtimeout, q->ipq_nfrags);
935			ip_freef(head, q);
936		}
937	}
938
939found:
940	/*
941	 * Adjust ip_len to not reflect header,
942	 * convert offset of this to bytes.
943	 */
944	ip->ip_len = htons(ntohs(ip->ip_len) - hlen);
945	if (ip->ip_off & htons(IP_MF)) {
946		/*
947		 * Make sure that fragments have a data length
948		 * that's a non-zero multiple of 8 bytes.
949		 */
950		if (ip->ip_len == htons(0) || (ntohs(ip->ip_len) & 0x7) != 0) {
951			IPSTAT_INC(ips_toosmall); /* XXX */
952			goto dropfrag;
953		}
954		m->m_flags |= M_IP_FRAG;
955	} else
956		m->m_flags &= ~M_IP_FRAG;
957	ip->ip_off = htons(ntohs(ip->ip_off) << 3);
958
959	/*
960	 * Attempt reassembly; if it succeeds, proceed.
961	 * ip_reass() will return a different mbuf.
962	 */
963	IPSTAT_INC(ips_fragments);
964	m->m_pkthdr.PH_loc.ptr = ip;
965
966	/* Previous ip_reass() started here. */
967	/*
968	 * Presence of header sizes in mbufs
969	 * would confuse code below.
970	 */
971	m->m_data += hlen;
972	m->m_len -= hlen;
973
974	/*
975	 * If first fragment to arrive, create a reassembly queue.
976	 */
977	if (fp == NULL) {
978		fp = uma_zalloc(V_ipq_zone, M_NOWAIT);
979		if (fp == NULL)
980			goto dropfrag;
981#ifdef MAC
982		if (mac_ipq_init(fp, M_NOWAIT) != 0) {
983			uma_zfree(V_ipq_zone, fp);
984			fp = NULL;
985			goto dropfrag;
986		}
987		mac_ipq_create(m, fp);
988#endif
989		TAILQ_INSERT_HEAD(head, fp, ipq_list);
990		V_nipq++;
991		fp->ipq_nfrags = 1;
992		fp->ipq_ttl = IPFRAGTTL;
993		fp->ipq_p = ip->ip_p;
994		fp->ipq_id = ip->ip_id;
995		fp->ipq_src = ip->ip_src;
996		fp->ipq_dst = ip->ip_dst;
997		fp->ipq_frags = m;
998		m->m_nextpkt = NULL;
999		goto done;
1000	} else {
1001		fp->ipq_nfrags++;
1002#ifdef MAC
1003		mac_ipq_update(m, fp);
1004#endif
1005	}
1006
1007#define GETIP(m)	((struct ip*)((m)->m_pkthdr.PH_loc.ptr))
1008
1009	/*
1010	 * Handle ECN by comparing this segment with the first one;
1011	 * if CE is set, do not lose CE.
1012	 * drop if CE and not-ECT are mixed for the same packet.
1013	 */
1014	ecn = ip->ip_tos & IPTOS_ECN_MASK;
1015	ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
1016	if (ecn == IPTOS_ECN_CE) {
1017		if (ecn0 == IPTOS_ECN_NOTECT)
1018			goto dropfrag;
1019		if (ecn0 != IPTOS_ECN_CE)
1020			GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
1021	}
1022	if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT)
1023		goto dropfrag;
1024
1025	/*
1026	 * Find a segment which begins after this one does.
1027	 */
1028	for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
1029		if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off))
1030			break;
1031
1032	/*
1033	 * If there is a preceding segment, it may provide some of
1034	 * our data already.  If so, drop the data from the incoming
1035	 * segment.  If it provides all of our data, drop us, otherwise
1036	 * stick new segment in the proper place.
1037	 *
1038	 * If some of the data is dropped from the preceding
1039	 * segment, then it's checksum is invalidated.
1040	 */
1041	if (p) {
1042		i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) -
1043		    ntohs(ip->ip_off);
1044		if (i > 0) {
1045			if (i >= ntohs(ip->ip_len))
1046				goto dropfrag;
1047			m_adj(m, i);
1048			m->m_pkthdr.csum_flags = 0;
1049			ip->ip_off = htons(ntohs(ip->ip_off) + i);
1050			ip->ip_len = htons(ntohs(ip->ip_len) - i);
1051		}
1052		m->m_nextpkt = p->m_nextpkt;
1053		p->m_nextpkt = m;
1054	} else {
1055		m->m_nextpkt = fp->ipq_frags;
1056		fp->ipq_frags = m;
1057	}
1058
1059	/*
1060	 * While we overlap succeeding segments trim them or,
1061	 * if they are completely covered, dequeue them.
1062	 */
1063	for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) >
1064	    ntohs(GETIP(q)->ip_off); q = nq) {
1065		i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) -
1066		    ntohs(GETIP(q)->ip_off);
1067		if (i < ntohs(GETIP(q)->ip_len)) {
1068			GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i);
1069			GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i);
1070			m_adj(q, i);
1071			q->m_pkthdr.csum_flags = 0;
1072			break;
1073		}
1074		nq = q->m_nextpkt;
1075		m->m_nextpkt = nq;
1076		IPSTAT_INC(ips_fragdropped);
1077		fp->ipq_nfrags--;
1078		m_freem(q);
1079	}
1080
1081	/*
1082	 * Check for complete reassembly and perform frag per packet
1083	 * limiting.
1084	 *
1085	 * Frag limiting is performed here so that the nth frag has
1086	 * a chance to complete the packet before we drop the packet.
1087	 * As a result, n+1 frags are actually allowed per packet, but
1088	 * only n will ever be stored. (n = maxfragsperpacket.)
1089	 *
1090	 */
1091	next = 0;
1092	for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
1093		if (ntohs(GETIP(q)->ip_off) != next) {
1094			if (fp->ipq_nfrags > V_maxfragsperpacket) {
1095				IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
1096				ip_freef(head, fp);
1097			}
1098			goto done;
1099		}
1100		next += ntohs(GETIP(q)->ip_len);
1101	}
1102	/* Make sure the last packet didn't have the IP_MF flag */
1103	if (p->m_flags & M_IP_FRAG) {
1104		if (fp->ipq_nfrags > V_maxfragsperpacket) {
1105			IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
1106			ip_freef(head, fp);
1107		}
1108		goto done;
1109	}
1110
1111	/*
1112	 * Reassembly is complete.  Make sure the packet is a sane size.
1113	 */
1114	q = fp->ipq_frags;
1115	ip = GETIP(q);
1116	if (next + (ip->ip_hl << 2) > IP_MAXPACKET) {
1117		IPSTAT_INC(ips_toolong);
1118		IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags);
1119		ip_freef(head, fp);
1120		goto done;
1121	}
1122
1123	/*
1124	 * Concatenate fragments.
1125	 */
1126	m = q;
1127	t = m->m_next;
1128	m->m_next = NULL;
1129	m_cat(m, t);
1130	nq = q->m_nextpkt;
1131	q->m_nextpkt = NULL;
1132	for (q = nq; q != NULL; q = nq) {
1133		nq = q->m_nextpkt;
1134		q->m_nextpkt = NULL;
1135		m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags;
1136		m->m_pkthdr.csum_data += q->m_pkthdr.csum_data;
1137		m_cat(m, q);
1138	}
1139	/*
1140	 * In order to do checksumming faster we do 'end-around carry' here
1141	 * (and not in for{} loop), though it implies we are not going to
1142	 * reassemble more than 64k fragments.
1143	 */
1144	while (m->m_pkthdr.csum_data & 0xffff0000)
1145		m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) +
1146		    (m->m_pkthdr.csum_data >> 16);
1147#ifdef MAC
1148	mac_ipq_reassemble(fp, m);
1149	mac_ipq_destroy(fp);
1150#endif
1151
1152	/*
1153	 * Create header for new ip packet by modifying header of first
1154	 * packet;  dequeue and discard fragment reassembly header.
1155	 * Make header visible.
1156	 */
1157	ip->ip_len = htons((ip->ip_hl << 2) + next);
1158	ip->ip_src = fp->ipq_src;
1159	ip->ip_dst = fp->ipq_dst;
1160	TAILQ_REMOVE(head, fp, ipq_list);
1161	V_nipq--;
1162	uma_zfree(V_ipq_zone, fp);
1163	m->m_len += (ip->ip_hl << 2);
1164	m->m_data -= (ip->ip_hl << 2);
1165	/* some debugging cruft by sklower, below, will go away soon */
1166	if (m->m_flags & M_PKTHDR)	/* XXX this should be done elsewhere */
1167		m_fixhdr(m);
1168	IPSTAT_INC(ips_reassembled);
1169	IPQ_UNLOCK();
1170
1171#ifdef	RSS
1172	/*
1173	 * Query the RSS layer for the flowid / flowtype for the
1174	 * mbuf payload.
1175	 *
1176	 * For now, just assume we have to calculate a new one.
1177	 * Later on we should check to see if the assigned flowid matches
1178	 * what RSS wants for the given IP protocol and if so, just keep it.
1179	 *
1180	 * We then queue into the relevant netisr so it can be dispatched
1181	 * to the correct CPU.
1182	 *
1183	 * Note - this may return 1, which means the flowid in the mbuf
1184	 * is correct for the configured RSS hash types and can be used.
1185	 */
1186	if (rss_mbuf_software_hash_v4(m, 0, &rss_hash, &rss_type) == 0) {
1187		m->m_pkthdr.flowid = rss_hash;
1188		M_HASHTYPE_SET(m, rss_type);
1189	}
1190
1191	/*
1192	 * Queue/dispatch for reprocessing.
1193	 *
1194	 * Note: this is much slower than just handling the frame in the
1195	 * current receive context.  It's likely worth investigating
1196	 * why this is.
1197	 */
1198	netisr_dispatch(NETISR_IP_DIRECT, m);
1199	return (NULL);
1200#endif
1201
1202	/* Handle in-line */
1203	return (m);
1204
1205dropfrag:
1206	IPSTAT_INC(ips_fragdropped);
1207	if (fp != NULL)
1208		fp->ipq_nfrags--;
1209	m_freem(m);
1210done:
1211	IPQ_UNLOCK();
1212	return (NULL);
1213
1214#undef GETIP
1215}
1216
1217/*
1218 * Free a fragment reassembly header and all
1219 * associated datagrams.
1220 */
1221static void
1222ip_freef(struct ipqhead *fhp, struct ipq *fp)
1223{
1224	struct mbuf *q;
1225
1226	IPQ_LOCK_ASSERT();
1227
1228	while (fp->ipq_frags) {
1229		q = fp->ipq_frags;
1230		fp->ipq_frags = q->m_nextpkt;
1231		m_freem(q);
1232	}
1233	TAILQ_REMOVE(fhp, fp, ipq_list);
1234	uma_zfree(V_ipq_zone, fp);
1235	V_nipq--;
1236}
1237
1238/*
1239 * IP timer processing;
1240 * if a timer expires on a reassembly
1241 * queue, discard it.
1242 */
1243void
1244ip_slowtimo(void)
1245{
1246	VNET_ITERATOR_DECL(vnet_iter);
1247	struct ipq *fp;
1248	int i;
1249
1250	VNET_LIST_RLOCK_NOSLEEP();
1251	IPQ_LOCK();
1252	VNET_FOREACH(vnet_iter) {
1253		CURVNET_SET(vnet_iter);
1254		for (i = 0; i < IPREASS_NHASH; i++) {
1255			for(fp = TAILQ_FIRST(&V_ipq[i]); fp;) {
1256				struct ipq *fpp;
1257
1258				fpp = fp;
1259				fp = TAILQ_NEXT(fp, ipq_list);
1260				if(--fpp->ipq_ttl == 0) {
1261					IPSTAT_ADD(ips_fragtimeout,
1262					    fpp->ipq_nfrags);
1263					ip_freef(&V_ipq[i], fpp);
1264				}
1265			}
1266		}
1267		/*
1268		 * If we are over the maximum number of fragments
1269		 * (due to the limit being lowered), drain off
1270		 * enough to get down to the new limit.
1271		 */
1272		if (V_maxnipq >= 0 && V_nipq > V_maxnipq) {
1273			for (i = 0; i < IPREASS_NHASH; i++) {
1274				while (V_nipq > V_maxnipq &&
1275				    !TAILQ_EMPTY(&V_ipq[i])) {
1276					IPSTAT_ADD(ips_fragdropped,
1277					    TAILQ_FIRST(&V_ipq[i])->ipq_nfrags);
1278					ip_freef(&V_ipq[i],
1279					    TAILQ_FIRST(&V_ipq[i]));
1280				}
1281			}
1282		}
1283		CURVNET_RESTORE();
1284	}
1285	IPQ_UNLOCK();
1286	VNET_LIST_RUNLOCK_NOSLEEP();
1287}
1288
1289/*
1290 * Drain off all datagram fragments.
1291 */
1292static void
1293ip_drain_locked(void)
1294{
1295	int     i;
1296
1297	IPQ_LOCK_ASSERT();
1298
1299	for (i = 0; i < IPREASS_NHASH; i++) {
1300		while(!TAILQ_EMPTY(&V_ipq[i])) {
1301			IPSTAT_ADD(ips_fragdropped,
1302			    TAILQ_FIRST(&V_ipq[i])->ipq_nfrags);
1303			ip_freef(&V_ipq[i], TAILQ_FIRST(&V_ipq[i]));
1304		}
1305	}
1306}
1307
1308void
1309ip_drain(void)
1310{
1311	VNET_ITERATOR_DECL(vnet_iter);
1312
1313	VNET_LIST_RLOCK_NOSLEEP();
1314	IPQ_LOCK();
1315	VNET_FOREACH(vnet_iter) {
1316		CURVNET_SET(vnet_iter);
1317		ip_drain_locked();
1318		CURVNET_RESTORE();
1319	}
1320	IPQ_UNLOCK();
1321	VNET_LIST_RUNLOCK_NOSLEEP();
1322}
1323
1324/*
1325 * The protocol to be inserted into ip_protox[] must be already registered
1326 * in inetsw[], either statically or through pf_proto_register().
1327 */
1328int
1329ipproto_register(short ipproto)
1330{
1331	struct protosw *pr;
1332
1333	/* Sanity checks. */
1334	if (ipproto <= 0 || ipproto >= IPPROTO_MAX)
1335		return (EPROTONOSUPPORT);
1336
1337	/*
1338	 * The protocol slot must not be occupied by another protocol
1339	 * already.  An index pointing to IPPROTO_RAW is unused.
1340	 */
1341	pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
1342	if (pr == NULL)
1343		return (EPFNOSUPPORT);
1344	if (ip_protox[ipproto] != pr - inetsw)	/* IPPROTO_RAW */
1345		return (EEXIST);
1346
1347	/* Find the protocol position in inetsw[] and set the index. */
1348	for (pr = inetdomain.dom_protosw;
1349	     pr < inetdomain.dom_protoswNPROTOSW; pr++) {
1350		if (pr->pr_domain->dom_family == PF_INET &&
1351		    pr->pr_protocol && pr->pr_protocol == ipproto) {
1352			ip_protox[pr->pr_protocol] = pr - inetsw;
1353			return (0);
1354		}
1355	}
1356	return (EPROTONOSUPPORT);
1357}
1358
1359int
1360ipproto_unregister(short ipproto)
1361{
1362	struct protosw *pr;
1363
1364	/* Sanity checks. */
1365	if (ipproto <= 0 || ipproto >= IPPROTO_MAX)
1366		return (EPROTONOSUPPORT);
1367
1368	/* Check if the protocol was indeed registered. */
1369	pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
1370	if (pr == NULL)
1371		return (EPFNOSUPPORT);
1372	if (ip_protox[ipproto] == pr - inetsw)  /* IPPROTO_RAW */
1373		return (ENOENT);
1374
1375	/* Reset the protocol slot to IPPROTO_RAW. */
1376	ip_protox[ipproto] = pr - inetsw;
1377	return (0);
1378}
1379
1380/*
1381 * Given address of next destination (final or next hop), return (referenced)
1382 * internet address info of interface to be used to get there.
1383 */
1384struct in_ifaddr *
1385ip_rtaddr(struct in_addr dst, u_int fibnum)
1386{
1387	struct route sro;
1388	struct sockaddr_in *sin;
1389	struct in_ifaddr *ia;
1390
1391	bzero(&sro, sizeof(sro));
1392	sin = (struct sockaddr_in *)&sro.ro_dst;
1393	sin->sin_family = AF_INET;
1394	sin->sin_len = sizeof(*sin);
1395	sin->sin_addr = dst;
1396	in_rtalloc_ign(&sro, 0, fibnum);
1397
1398	if (sro.ro_rt == NULL)
1399		return (NULL);
1400
1401	ia = ifatoia(sro.ro_rt->rt_ifa);
1402	ifa_ref(&ia->ia_ifa);
1403	RTFREE(sro.ro_rt);
1404	return (ia);
1405}
1406
1407u_char inetctlerrmap[PRC_NCMDS] = {
1408	0,		0,		0,		0,
1409	0,		EMSGSIZE,	EHOSTDOWN,	EHOSTUNREACH,
1410	EHOSTUNREACH,	EHOSTUNREACH,	ECONNREFUSED,	ECONNREFUSED,
1411	EMSGSIZE,	EHOSTUNREACH,	0,		0,
1412	0,		0,		EHOSTUNREACH,	0,
1413	ENOPROTOOPT,	ECONNREFUSED
1414};
1415
1416/*
1417 * Forward a packet.  If some error occurs return the sender
1418 * an icmp packet.  Note we can't always generate a meaningful
1419 * icmp message because icmp doesn't have a large enough repertoire
1420 * of codes and types.
1421 *
1422 * If not forwarding, just drop the packet.  This could be confusing
1423 * if ipforwarding was zero but some routing protocol was advancing
1424 * us as a gateway to somewhere.  However, we must let the routing
1425 * protocol deal with that.
1426 *
1427 * The srcrt parameter indicates whether the packet is being forwarded
1428 * via a source route.
1429 */
1430void
1431ip_forward(struct mbuf *m, int srcrt)
1432{
1433	struct ip *ip = mtod(m, struct ip *);
1434	struct in_ifaddr *ia;
1435	struct mbuf *mcopy;
1436	struct in_addr dest;
1437	struct route ro;
1438	int error, type = 0, code = 0, mtu = 0;
1439
1440	if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) {
1441		IPSTAT_INC(ips_cantforward);
1442		m_freem(m);
1443		return;
1444	}
1445#ifdef IPSEC
1446	if (ip_ipsec_fwd(m) != 0) {
1447		IPSTAT_INC(ips_cantforward);
1448		m_freem(m);
1449		return;
1450	}
1451#endif /* IPSEC */
1452#ifdef IPSTEALTH
1453	if (!V_ipstealth) {
1454#endif
1455		if (ip->ip_ttl <= IPTTLDEC) {
1456			icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS,
1457			    0, 0);
1458			return;
1459		}
1460#ifdef IPSTEALTH
1461	}
1462#endif
1463
1464	ia = ip_rtaddr(ip->ip_dst, M_GETFIB(m));
1465#ifndef IPSEC
1466	/*
1467	 * 'ia' may be NULL if there is no route for this destination.
1468	 * In case of IPsec, Don't discard it just yet, but pass it to
1469	 * ip_output in case of outgoing IPsec policy.
1470	 */
1471	if (!srcrt && ia == NULL) {
1472		icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0);
1473		return;
1474	}
1475#endif
1476
1477	/*
1478	 * Save the IP header and at most 8 bytes of the payload,
1479	 * in case we need to generate an ICMP message to the src.
1480	 *
1481	 * XXX this can be optimized a lot by saving the data in a local
1482	 * buffer on the stack (72 bytes at most), and only allocating the
1483	 * mbuf if really necessary. The vast majority of the packets
1484	 * are forwarded without having to send an ICMP back (either
1485	 * because unnecessary, or because rate limited), so we are
1486	 * really we are wasting a lot of work here.
1487	 *
1488	 * We don't use m_copy() because it might return a reference
1489	 * to a shared cluster. Both this function and ip_output()
1490	 * assume exclusive access to the IP header in `m', so any
1491	 * data in a cluster may change before we reach icmp_error().
1492	 */
1493	mcopy = m_gethdr(M_NOWAIT, m->m_type);
1494	if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_NOWAIT)) {
1495		/*
1496		 * It's probably ok if the pkthdr dup fails (because
1497		 * the deep copy of the tag chain failed), but for now
1498		 * be conservative and just discard the copy since
1499		 * code below may some day want the tags.
1500		 */
1501		m_free(mcopy);
1502		mcopy = NULL;
1503	}
1504	if (mcopy != NULL) {
1505		mcopy->m_len = min(ntohs(ip->ip_len), M_TRAILINGSPACE(mcopy));
1506		mcopy->m_pkthdr.len = mcopy->m_len;
1507		m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t));
1508	}
1509
1510#ifdef IPSTEALTH
1511	if (!V_ipstealth) {
1512#endif
1513		ip->ip_ttl -= IPTTLDEC;
1514#ifdef IPSTEALTH
1515	}
1516#endif
1517
1518	/*
1519	 * If forwarding packet using same interface that it came in on,
1520	 * perhaps should send a redirect to sender to shortcut a hop.
1521	 * Only send redirect if source is sending directly to us,
1522	 * and if packet was not source routed (or has any options).
1523	 * Also, don't send redirect if forwarding using a default route
1524	 * or a route modified by a redirect.
1525	 */
1526	dest.s_addr = 0;
1527	if (!srcrt && V_ipsendredirects &&
1528	    ia != NULL && ia->ia_ifp == m->m_pkthdr.rcvif) {
1529		struct sockaddr_in *sin;
1530		struct rtentry *rt;
1531
1532		bzero(&ro, sizeof(ro));
1533		sin = (struct sockaddr_in *)&ro.ro_dst;
1534		sin->sin_family = AF_INET;
1535		sin->sin_len = sizeof(*sin);
1536		sin->sin_addr = ip->ip_dst;
1537		in_rtalloc_ign(&ro, 0, M_GETFIB(m));
1538
1539		rt = ro.ro_rt;
1540
1541		if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 &&
1542		    satosin(rt_key(rt))->sin_addr.s_addr != 0) {
1543#define	RTA(rt)	((struct in_ifaddr *)(rt->rt_ifa))
1544			u_long src = ntohl(ip->ip_src.s_addr);
1545
1546			if (RTA(rt) &&
1547			    (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) {
1548				if (rt->rt_flags & RTF_GATEWAY)
1549					dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr;
1550				else
1551					dest.s_addr = ip->ip_dst.s_addr;
1552				/* Router requirements says to only send host redirects */
1553				type = ICMP_REDIRECT;
1554				code = ICMP_REDIRECT_HOST;
1555			}
1556		}
1557		if (rt)
1558			RTFREE(rt);
1559	}
1560
1561	/*
1562	 * Try to cache the route MTU from ip_output so we can consider it for
1563	 * the ICMP_UNREACH_NEEDFRAG "Next-Hop MTU" field described in RFC1191.
1564	 */
1565	bzero(&ro, sizeof(ro));
1566
1567	error = ip_output(m, NULL, &ro, IP_FORWARDING, NULL, NULL);
1568
1569	if (error == EMSGSIZE && ro.ro_rt)
1570		mtu = ro.ro_rt->rt_mtu;
1571	RO_RTFREE(&ro);
1572
1573	if (error)
1574		IPSTAT_INC(ips_cantforward);
1575	else {
1576		IPSTAT_INC(ips_forward);
1577		if (type)
1578			IPSTAT_INC(ips_redirectsent);
1579		else {
1580			if (mcopy)
1581				m_freem(mcopy);
1582			if (ia != NULL)
1583				ifa_free(&ia->ia_ifa);
1584			return;
1585		}
1586	}
1587	if (mcopy == NULL) {
1588		if (ia != NULL)
1589			ifa_free(&ia->ia_ifa);
1590		return;
1591	}
1592
1593	switch (error) {
1594
1595	case 0:				/* forwarded, but need redirect */
1596		/* type, code set above */
1597		break;
1598
1599	case ENETUNREACH:
1600	case EHOSTUNREACH:
1601	case ENETDOWN:
1602	case EHOSTDOWN:
1603	default:
1604		type = ICMP_UNREACH;
1605		code = ICMP_UNREACH_HOST;
1606		break;
1607
1608	case EMSGSIZE:
1609		type = ICMP_UNREACH;
1610		code = ICMP_UNREACH_NEEDFRAG;
1611
1612#ifdef IPSEC
1613		/*
1614		 * If IPsec is configured for this path,
1615		 * override any possibly mtu value set by ip_output.
1616		 */
1617		mtu = ip_ipsec_mtu(mcopy, mtu);
1618#endif /* IPSEC */
1619		/*
1620		 * If the MTU was set before make sure we are below the
1621		 * interface MTU.
1622		 * If the MTU wasn't set before use the interface mtu or
1623		 * fall back to the next smaller mtu step compared to the
1624		 * current packet size.
1625		 */
1626		if (mtu != 0) {
1627			if (ia != NULL)
1628				mtu = min(mtu, ia->ia_ifp->if_mtu);
1629		} else {
1630			if (ia != NULL)
1631				mtu = ia->ia_ifp->if_mtu;
1632			else
1633				mtu = ip_next_mtu(ntohs(ip->ip_len), 0);
1634		}
1635		IPSTAT_INC(ips_cantfrag);
1636		break;
1637
1638	case ENOBUFS:
1639	case EACCES:			/* ipfw denied packet */
1640		m_freem(mcopy);
1641		if (ia != NULL)
1642			ifa_free(&ia->ia_ifa);
1643		return;
1644	}
1645	if (ia != NULL)
1646		ifa_free(&ia->ia_ifa);
1647	icmp_error(mcopy, type, code, dest.s_addr, mtu);
1648}
1649
1650void
1651ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
1652    struct mbuf *m)
1653{
1654
1655	if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) {
1656		struct bintime bt;
1657
1658		bintime(&bt);
1659		if (inp->inp_socket->so_options & SO_BINTIME) {
1660			*mp = sbcreatecontrol((caddr_t)&bt, sizeof(bt),
1661			    SCM_BINTIME, SOL_SOCKET);
1662			if (*mp)
1663				mp = &(*mp)->m_next;
1664		}
1665		if (inp->inp_socket->so_options & SO_TIMESTAMP) {
1666			struct timeval tv;
1667
1668			bintime2timeval(&bt, &tv);
1669			*mp = sbcreatecontrol((caddr_t)&tv, sizeof(tv),
1670			    SCM_TIMESTAMP, SOL_SOCKET);
1671			if (*mp)
1672				mp = &(*mp)->m_next;
1673		}
1674	}
1675	if (inp->inp_flags & INP_RECVDSTADDR) {
1676		*mp = sbcreatecontrol((caddr_t)&ip->ip_dst,
1677		    sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP);
1678		if (*mp)
1679			mp = &(*mp)->m_next;
1680	}
1681	if (inp->inp_flags & INP_RECVTTL) {
1682		*mp = sbcreatecontrol((caddr_t)&ip->ip_ttl,
1683		    sizeof(u_char), IP_RECVTTL, IPPROTO_IP);
1684		if (*mp)
1685			mp = &(*mp)->m_next;
1686	}
1687#ifdef notyet
1688	/* XXX
1689	 * Moving these out of udp_input() made them even more broken
1690	 * than they already were.
1691	 */
1692	/* options were tossed already */
1693	if (inp->inp_flags & INP_RECVOPTS) {
1694		*mp = sbcreatecontrol((caddr_t)opts_deleted_above,
1695		    sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP);
1696		if (*mp)
1697			mp = &(*mp)->m_next;
1698	}
1699	/* ip_srcroute doesn't do what we want here, need to fix */
1700	if (inp->inp_flags & INP_RECVRETOPTS) {
1701		*mp = sbcreatecontrol((caddr_t)ip_srcroute(m),
1702		    sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP);
1703		if (*mp)
1704			mp = &(*mp)->m_next;
1705	}
1706#endif
1707	if (inp->inp_flags & INP_RECVIF) {
1708		struct ifnet *ifp;
1709		struct sdlbuf {
1710			struct sockaddr_dl sdl;
1711			u_char	pad[32];
1712		} sdlbuf;
1713		struct sockaddr_dl *sdp;
1714		struct sockaddr_dl *sdl2 = &sdlbuf.sdl;
1715
1716		if ((ifp = m->m_pkthdr.rcvif) &&
1717		    ifp->if_index && ifp->if_index <= V_if_index) {
1718			sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr;
1719			/*
1720			 * Change our mind and don't try copy.
1721			 */
1722			if (sdp->sdl_family != AF_LINK ||
1723			    sdp->sdl_len > sizeof(sdlbuf)) {
1724				goto makedummy;
1725			}
1726			bcopy(sdp, sdl2, sdp->sdl_len);
1727		} else {
1728makedummy:
1729			sdl2->sdl_len =
1730			    offsetof(struct sockaddr_dl, sdl_data[0]);
1731			sdl2->sdl_family = AF_LINK;
1732			sdl2->sdl_index = 0;
1733			sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0;
1734		}
1735		*mp = sbcreatecontrol((caddr_t)sdl2, sdl2->sdl_len,
1736		    IP_RECVIF, IPPROTO_IP);
1737		if (*mp)
1738			mp = &(*mp)->m_next;
1739	}
1740	if (inp->inp_flags & INP_RECVTOS) {
1741		*mp = sbcreatecontrol((caddr_t)&ip->ip_tos,
1742		    sizeof(u_char), IP_RECVTOS, IPPROTO_IP);
1743		if (*mp)
1744			mp = &(*mp)->m_next;
1745	}
1746
1747	if (inp->inp_flags2 & INP_RECVFLOWID) {
1748		uint32_t flowid, flow_type;
1749
1750		flowid = m->m_pkthdr.flowid;
1751		flow_type = M_HASHTYPE_GET(m);
1752
1753		/*
1754		 * XXX should handle the failure of one or the
1755		 * other - don't populate both?
1756		 */
1757		*mp = sbcreatecontrol((caddr_t) &flowid,
1758		    sizeof(uint32_t), IP_FLOWID, IPPROTO_IP);
1759		if (*mp)
1760			mp = &(*mp)->m_next;
1761		*mp = sbcreatecontrol((caddr_t) &flow_type,
1762		    sizeof(uint32_t), IP_FLOWTYPE, IPPROTO_IP);
1763		if (*mp)
1764			mp = &(*mp)->m_next;
1765	}
1766
1767#ifdef	RSS
1768	if (inp->inp_flags2 & INP_RECVRSSBUCKETID) {
1769		uint32_t flowid, flow_type;
1770		uint32_t rss_bucketid;
1771
1772		flowid = m->m_pkthdr.flowid;
1773		flow_type = M_HASHTYPE_GET(m);
1774
1775		if (rss_hash2bucket(flowid, flow_type, &rss_bucketid) == 0) {
1776			*mp = sbcreatecontrol((caddr_t) &rss_bucketid,
1777			   sizeof(uint32_t), IP_RSSBUCKETID, IPPROTO_IP);
1778			if (*mp)
1779				mp = &(*mp)->m_next;
1780		}
1781	}
1782#endif
1783}
1784
1785/*
1786 * XXXRW: Multicast routing code in ip_mroute.c is generally MPSAFE, but the
1787 * ip_rsvp and ip_rsvp_on variables need to be interlocked with rsvp_on
1788 * locking.  This code remains in ip_input.c as ip_mroute.c is optionally
1789 * compiled.
1790 */
1791static VNET_DEFINE(int, ip_rsvp_on);
1792VNET_DEFINE(struct socket *, ip_rsvpd);
1793
1794#define	V_ip_rsvp_on		VNET(ip_rsvp_on)
1795
1796int
1797ip_rsvp_init(struct socket *so)
1798{
1799
1800	if (so->so_type != SOCK_RAW ||
1801	    so->so_proto->pr_protocol != IPPROTO_RSVP)
1802		return EOPNOTSUPP;
1803
1804	if (V_ip_rsvpd != NULL)
1805		return EADDRINUSE;
1806
1807	V_ip_rsvpd = so;
1808	/*
1809	 * This may seem silly, but we need to be sure we don't over-increment
1810	 * the RSVP counter, in case something slips up.
1811	 */
1812	if (!V_ip_rsvp_on) {
1813		V_ip_rsvp_on = 1;
1814		V_rsvp_on++;
1815	}
1816
1817	return 0;
1818}
1819
1820int
1821ip_rsvp_done(void)
1822{
1823
1824	V_ip_rsvpd = NULL;
1825	/*
1826	 * This may seem silly, but we need to be sure we don't over-decrement
1827	 * the RSVP counter, in case something slips up.
1828	 */
1829	if (V_ip_rsvp_on) {
1830		V_ip_rsvp_on = 0;
1831		V_rsvp_on--;
1832	}
1833	return 0;
1834}
1835
1836int
1837rsvp_input(struct mbuf **mp, int *offp, int proto)
1838{
1839	struct mbuf *m;
1840
1841	m = *mp;
1842	*mp = NULL;
1843
1844	if (rsvp_input_p) { /* call the real one if loaded */
1845		*mp = m;
1846		rsvp_input_p(mp, offp, proto);
1847		return (IPPROTO_DONE);
1848	}
1849
1850	/* Can still get packets with rsvp_on = 0 if there is a local member
1851	 * of the group to which the RSVP packet is addressed.  But in this
1852	 * case we want to throw the packet away.
1853	 */
1854
1855	if (!V_rsvp_on) {
1856		m_freem(m);
1857		return (IPPROTO_DONE);
1858	}
1859
1860	if (V_ip_rsvpd != NULL) {
1861		*mp = m;
1862		rip_input(mp, offp, proto);
1863		return (IPPROTO_DONE);
1864	}
1865	/* Drop the packet */
1866	m_freem(m);
1867	return (IPPROTO_DONE);
1868}
1869