Deleted Added
full compact
ip_input.c (155425) ip_input.c (157927)
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
30 * $FreeBSD: head/sys/netinet/ip_input.c 155425 2006-02-07 11:48:10Z oleg $
30 * $FreeBSD: head/sys/netinet/ip_input.c 157927 2006-04-21 09:25:40Z ps $
31 */
32
33#include "opt_bootp.h"
34#include "opt_ipfw.h"
35#include "opt_ipstealth.h"
36#include "opt_ipsec.h"
37#include "opt_mac.h"
38#include "opt_carp.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/callout.h>
43#include <sys/mac.h>
44#include <sys/mbuf.h>
45#include <sys/malloc.h>
46#include <sys/domain.h>
47#include <sys/protosw.h>
48#include <sys/socket.h>
49#include <sys/time.h>
50#include <sys/kernel.h>
51#include <sys/syslog.h>
52#include <sys/sysctl.h>
53
54#include <net/pfil.h>
55#include <net/if.h>
56#include <net/if_types.h>
57#include <net/if_var.h>
58#include <net/if_dl.h>
59#include <net/route.h>
60#include <net/netisr.h>
61
62#include <netinet/in.h>
63#include <netinet/in_systm.h>
64#include <netinet/in_var.h>
65#include <netinet/ip.h>
66#include <netinet/in_pcb.h>
67#include <netinet/ip_var.h>
68#include <netinet/ip_icmp.h>
69#include <netinet/ip_options.h>
70#include <machine/in_cksum.h>
71#ifdef DEV_CARP
72#include <netinet/ip_carp.h>
73#endif
74#if defined(IPSEC) || defined(FAST_IPSEC)
75#include <netinet/ip_ipsec.h>
76#endif /* IPSEC */
77
78#include <sys/socketvar.h>
79
80/* XXX: Temporary until ipfw_ether and ipfw_bridge are converted. */
81#include <netinet/ip_fw.h>
82#include <netinet/ip_dummynet.h>
83
84int rsvp_on = 0;
85
86int ipforwarding = 0;
87SYSCTL_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW,
88 &ipforwarding, 0, "Enable IP forwarding between interfaces");
89
90static int ipsendredirects = 1; /* XXX */
91SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW,
92 &ipsendredirects, 0, "Enable sending IP redirects");
93
94int ip_defttl = IPDEFTTL;
95SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW,
96 &ip_defttl, 0, "Maximum TTL on IP packets");
97
98static int ip_keepfaith = 0;
99SYSCTL_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW,
100 &ip_keepfaith, 0,
101 "Enable packet capture for FAITH IPv4->IPv6 translater daemon");
102
103static int ip_sendsourcequench = 0;
104SYSCTL_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW,
105 &ip_sendsourcequench, 0,
106 "Enable the transmission of source quench packets");
107
108int ip_do_randomid = 0;
109SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW,
110 &ip_do_randomid, 0,
111 "Assign random ip_id values");
112
113/*
114 * XXX - Setting ip_checkinterface mostly implements the receive side of
115 * the Strong ES model described in RFC 1122, but since the routing table
116 * and transmit implementation do not implement the Strong ES model,
117 * setting this to 1 results in an odd hybrid.
118 *
119 * XXX - ip_checkinterface currently must be disabled if you use ipnat
120 * to translate the destination address to another local interface.
121 *
122 * XXX - ip_checkinterface must be disabled if you add IP aliases
123 * to the loopback interface instead of the interface where the
124 * packets for those addresses are received.
125 */
126static int ip_checkinterface = 0;
127SYSCTL_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW,
128 &ip_checkinterface, 0, "Verify packet arrives on correct interface");
129
130struct pfil_head inet_pfil_hook; /* Packet filter hooks */
131
132static struct ifqueue ipintrq;
133static int ipqmaxlen = IFQ_MAXLEN;
134
135extern struct domain inetdomain;
136extern struct protosw inetsw[];
137u_char ip_protox[IPPROTO_MAX];
138struct in_ifaddrhead in_ifaddrhead; /* first inet address */
139struct in_ifaddrhashhead *in_ifaddrhashtbl; /* inet addr hash table */
140u_long in_ifaddrhmask; /* mask for hash table */
141
142SYSCTL_INT(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, CTLFLAG_RW,
143 &ipintrq.ifq_maxlen, 0, "Maximum size of the IP input queue");
144SYSCTL_INT(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, CTLFLAG_RD,
145 &ipintrq.ifq_drops, 0, "Number of packets dropped from the IP input queue");
146
147struct ipstat ipstat;
148SYSCTL_STRUCT(_net_inet_ip, IPCTL_STATS, stats, CTLFLAG_RW,
149 &ipstat, ipstat, "IP statistics (struct ipstat, netinet/ip_var.h)");
150
151/*
152 * IP datagram reassembly.
153 */
154#define IPREASS_NHASH_LOG2 6
155#define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2)
156#define IPREASS_HMASK (IPREASS_NHASH - 1)
157#define IPREASS_HASH(x,y) \
158 (((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK)
159
160static uma_zone_t ipq_zone;
161static TAILQ_HEAD(ipqhead, ipq) ipq[IPREASS_NHASH];
162static struct mtx ipqlock;
163
164#define IPQ_LOCK() mtx_lock(&ipqlock)
165#define IPQ_UNLOCK() mtx_unlock(&ipqlock)
166#define IPQ_LOCK_INIT() mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF)
167#define IPQ_LOCK_ASSERT() mtx_assert(&ipqlock, MA_OWNED)
168
169static void maxnipq_update(void);
31 */
32
33#include "opt_bootp.h"
34#include "opt_ipfw.h"
35#include "opt_ipstealth.h"
36#include "opt_ipsec.h"
37#include "opt_mac.h"
38#include "opt_carp.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/callout.h>
43#include <sys/mac.h>
44#include <sys/mbuf.h>
45#include <sys/malloc.h>
46#include <sys/domain.h>
47#include <sys/protosw.h>
48#include <sys/socket.h>
49#include <sys/time.h>
50#include <sys/kernel.h>
51#include <sys/syslog.h>
52#include <sys/sysctl.h>
53
54#include <net/pfil.h>
55#include <net/if.h>
56#include <net/if_types.h>
57#include <net/if_var.h>
58#include <net/if_dl.h>
59#include <net/route.h>
60#include <net/netisr.h>
61
62#include <netinet/in.h>
63#include <netinet/in_systm.h>
64#include <netinet/in_var.h>
65#include <netinet/ip.h>
66#include <netinet/in_pcb.h>
67#include <netinet/ip_var.h>
68#include <netinet/ip_icmp.h>
69#include <netinet/ip_options.h>
70#include <machine/in_cksum.h>
71#ifdef DEV_CARP
72#include <netinet/ip_carp.h>
73#endif
74#if defined(IPSEC) || defined(FAST_IPSEC)
75#include <netinet/ip_ipsec.h>
76#endif /* IPSEC */
77
78#include <sys/socketvar.h>
79
80/* XXX: Temporary until ipfw_ether and ipfw_bridge are converted. */
81#include <netinet/ip_fw.h>
82#include <netinet/ip_dummynet.h>
83
84int rsvp_on = 0;
85
86int ipforwarding = 0;
87SYSCTL_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW,
88 &ipforwarding, 0, "Enable IP forwarding between interfaces");
89
90static int ipsendredirects = 1; /* XXX */
91SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW,
92 &ipsendredirects, 0, "Enable sending IP redirects");
93
94int ip_defttl = IPDEFTTL;
95SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW,
96 &ip_defttl, 0, "Maximum TTL on IP packets");
97
98static int ip_keepfaith = 0;
99SYSCTL_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW,
100 &ip_keepfaith, 0,
101 "Enable packet capture for FAITH IPv4->IPv6 translater daemon");
102
103static int ip_sendsourcequench = 0;
104SYSCTL_INT(_net_inet_ip, OID_AUTO, sendsourcequench, CTLFLAG_RW,
105 &ip_sendsourcequench, 0,
106 "Enable the transmission of source quench packets");
107
108int ip_do_randomid = 0;
109SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW,
110 &ip_do_randomid, 0,
111 "Assign random ip_id values");
112
113/*
114 * XXX - Setting ip_checkinterface mostly implements the receive side of
115 * the Strong ES model described in RFC 1122, but since the routing table
116 * and transmit implementation do not implement the Strong ES model,
117 * setting this to 1 results in an odd hybrid.
118 *
119 * XXX - ip_checkinterface currently must be disabled if you use ipnat
120 * to translate the destination address to another local interface.
121 *
122 * XXX - ip_checkinterface must be disabled if you add IP aliases
123 * to the loopback interface instead of the interface where the
124 * packets for those addresses are received.
125 */
126static int ip_checkinterface = 0;
127SYSCTL_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW,
128 &ip_checkinterface, 0, "Verify packet arrives on correct interface");
129
130struct pfil_head inet_pfil_hook; /* Packet filter hooks */
131
132static struct ifqueue ipintrq;
133static int ipqmaxlen = IFQ_MAXLEN;
134
135extern struct domain inetdomain;
136extern struct protosw inetsw[];
137u_char ip_protox[IPPROTO_MAX];
138struct in_ifaddrhead in_ifaddrhead; /* first inet address */
139struct in_ifaddrhashhead *in_ifaddrhashtbl; /* inet addr hash table */
140u_long in_ifaddrhmask; /* mask for hash table */
141
142SYSCTL_INT(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, CTLFLAG_RW,
143 &ipintrq.ifq_maxlen, 0, "Maximum size of the IP input queue");
144SYSCTL_INT(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, CTLFLAG_RD,
145 &ipintrq.ifq_drops, 0, "Number of packets dropped from the IP input queue");
146
147struct ipstat ipstat;
148SYSCTL_STRUCT(_net_inet_ip, IPCTL_STATS, stats, CTLFLAG_RW,
149 &ipstat, ipstat, "IP statistics (struct ipstat, netinet/ip_var.h)");
150
151/*
152 * IP datagram reassembly.
153 */
154#define IPREASS_NHASH_LOG2 6
155#define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2)
156#define IPREASS_HMASK (IPREASS_NHASH - 1)
157#define IPREASS_HASH(x,y) \
158 (((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK)
159
160static uma_zone_t ipq_zone;
161static TAILQ_HEAD(ipqhead, ipq) ipq[IPREASS_NHASH];
162static struct mtx ipqlock;
163
164#define IPQ_LOCK() mtx_lock(&ipqlock)
165#define IPQ_UNLOCK() mtx_unlock(&ipqlock)
166#define IPQ_LOCK_INIT() mtx_init(&ipqlock, "ipqlock", NULL, MTX_DEF)
167#define IPQ_LOCK_ASSERT() mtx_assert(&ipqlock, MA_OWNED)
168
169static void maxnipq_update(void);
170static void ipq_zone_change(void *);
170
171static int maxnipq; /* Administrative limit on # reass queues. */
172static int nipq = 0; /* Total # of reass queues */
173SYSCTL_INT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD, &nipq, 0,
174 "Current number of IPv4 fragment reassembly queue entries");
175
176static int maxfragsperpacket;
177SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW,
178 &maxfragsperpacket, 0,
179 "Maximum number of IPv4 fragments allowed per packet");
180
181struct callout ipport_tick_callout;
182
183#ifdef IPCTL_DEFMTU
184SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW,
185 &ip_mtu, 0, "Default MTU");
186#endif
187
188#ifdef IPSTEALTH
189int ipstealth = 0;
190SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW,
191 &ipstealth, 0, "");
192#endif
193
194/*
195 * ipfw_ether and ipfw_bridge hooks.
196 * XXX: Temporary until those are converted to pfil_hooks as well.
197 */
198ip_fw_chk_t *ip_fw_chk_ptr = NULL;
199ip_dn_io_t *ip_dn_io_ptr = NULL;
200int fw_enable = 1;
201int fw_one_pass = 1;
202
203static void ip_freef(struct ipqhead *, struct ipq *);
204
205/*
206 * IP initialization: fill in IP protocol switch table.
207 * All protocols not implemented in kernel go to raw IP protocol handler.
208 */
209void
210ip_init()
211{
212 register struct protosw *pr;
213 register int i;
214
215 TAILQ_INIT(&in_ifaddrhead);
216 in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &in_ifaddrhmask);
217 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
218 if (pr == NULL)
219 panic("ip_init: PF_INET not found");
220
221 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */
222 for (i = 0; i < IPPROTO_MAX; i++)
223 ip_protox[i] = pr - inetsw;
224 /*
225 * Cycle through IP protocols and put them into the appropriate place
226 * in ip_protox[].
227 */
228 for (pr = inetdomain.dom_protosw;
229 pr < inetdomain.dom_protoswNPROTOSW; pr++)
230 if (pr->pr_domain->dom_family == PF_INET &&
231 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) {
232 /* Be careful to only index valid IP protocols. */
233 if (pr->pr_protocol < IPPROTO_MAX)
234 ip_protox[pr->pr_protocol] = pr - inetsw;
235 }
236
237 /* Initialize packet filter hooks. */
238 inet_pfil_hook.ph_type = PFIL_TYPE_AF;
239 inet_pfil_hook.ph_af = AF_INET;
240 if ((i = pfil_head_register(&inet_pfil_hook)) != 0)
241 printf("%s: WARNING: unable to register pfil hook, "
242 "error %d\n", __func__, i);
243
244 /* Initialize IP reassembly queue. */
245 IPQ_LOCK_INIT();
246 for (i = 0; i < IPREASS_NHASH; i++)
247 TAILQ_INIT(&ipq[i]);
248 maxnipq = nmbclusters / 32;
249 maxfragsperpacket = 16;
250 ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL,
251 NULL, UMA_ALIGN_PTR, 0);
252 maxnipq_update();
253
254 /* Start ipport_tick. */
255 callout_init(&ipport_tick_callout, CALLOUT_MPSAFE);
256 ipport_tick(NULL);
257 EVENTHANDLER_REGISTER(shutdown_pre_sync, ip_fini, NULL,
258 SHUTDOWN_PRI_DEFAULT);
171
172static int maxnipq; /* Administrative limit on # reass queues. */
173static int nipq = 0; /* Total # of reass queues */
174SYSCTL_INT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD, &nipq, 0,
175 "Current number of IPv4 fragment reassembly queue entries");
176
177static int maxfragsperpacket;
178SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW,
179 &maxfragsperpacket, 0,
180 "Maximum number of IPv4 fragments allowed per packet");
181
182struct callout ipport_tick_callout;
183
184#ifdef IPCTL_DEFMTU
185SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW,
186 &ip_mtu, 0, "Default MTU");
187#endif
188
189#ifdef IPSTEALTH
190int ipstealth = 0;
191SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW,
192 &ipstealth, 0, "");
193#endif
194
195/*
196 * ipfw_ether and ipfw_bridge hooks.
197 * XXX: Temporary until those are converted to pfil_hooks as well.
198 */
199ip_fw_chk_t *ip_fw_chk_ptr = NULL;
200ip_dn_io_t *ip_dn_io_ptr = NULL;
201int fw_enable = 1;
202int fw_one_pass = 1;
203
204static void ip_freef(struct ipqhead *, struct ipq *);
205
206/*
207 * IP initialization: fill in IP protocol switch table.
208 * All protocols not implemented in kernel go to raw IP protocol handler.
209 */
210void
211ip_init()
212{
213 register struct protosw *pr;
214 register int i;
215
216 TAILQ_INIT(&in_ifaddrhead);
217 in_ifaddrhashtbl = hashinit(INADDR_NHASH, M_IFADDR, &in_ifaddrhmask);
218 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
219 if (pr == NULL)
220 panic("ip_init: PF_INET not found");
221
222 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */
223 for (i = 0; i < IPPROTO_MAX; i++)
224 ip_protox[i] = pr - inetsw;
225 /*
226 * Cycle through IP protocols and put them into the appropriate place
227 * in ip_protox[].
228 */
229 for (pr = inetdomain.dom_protosw;
230 pr < inetdomain.dom_protoswNPROTOSW; pr++)
231 if (pr->pr_domain->dom_family == PF_INET &&
232 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) {
233 /* Be careful to only index valid IP protocols. */
234 if (pr->pr_protocol < IPPROTO_MAX)
235 ip_protox[pr->pr_protocol] = pr - inetsw;
236 }
237
238 /* Initialize packet filter hooks. */
239 inet_pfil_hook.ph_type = PFIL_TYPE_AF;
240 inet_pfil_hook.ph_af = AF_INET;
241 if ((i = pfil_head_register(&inet_pfil_hook)) != 0)
242 printf("%s: WARNING: unable to register pfil hook, "
243 "error %d\n", __func__, i);
244
245 /* Initialize IP reassembly queue. */
246 IPQ_LOCK_INIT();
247 for (i = 0; i < IPREASS_NHASH; i++)
248 TAILQ_INIT(&ipq[i]);
249 maxnipq = nmbclusters / 32;
250 maxfragsperpacket = 16;
251 ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL,
252 NULL, UMA_ALIGN_PTR, 0);
253 maxnipq_update();
254
255 /* Start ipport_tick. */
256 callout_init(&ipport_tick_callout, CALLOUT_MPSAFE);
257 ipport_tick(NULL);
258 EVENTHANDLER_REGISTER(shutdown_pre_sync, ip_fini, NULL,
259 SHUTDOWN_PRI_DEFAULT);
260 EVENTHANDLER_REGISTER(nmbclusters_change, ipq_zone_change,
261 NULL, EVENTHANDLER_PRI_ANY);
259
260 /* Initialize various other remaining things. */
261 ip_id = time_second & 0xffff;
262 ipintrq.ifq_maxlen = ipqmaxlen;
263 mtx_init(&ipintrq.ifq_mtx, "ip_inq", NULL, MTX_DEF);
264 netisr_register(NETISR_IP, ip_input, &ipintrq, NETISR_MPSAFE);
265}
266
267void ip_fini(xtp)
268 void *xtp;
269{
270 callout_stop(&ipport_tick_callout);
271}
272
273/*
274 * Ip input routine. Checksum and byte swap header. If fragmented
275 * try to reassemble. Process options. Pass to next level.
276 */
277void
278ip_input(struct mbuf *m)
279{
280 struct ip *ip = NULL;
281 struct in_ifaddr *ia = NULL;
282 struct ifaddr *ifa;
283 int checkif, hlen = 0;
284 u_short sum;
285 int dchg = 0; /* dest changed after fw */
286 struct in_addr odst; /* original dst address */
287
288 M_ASSERTPKTHDR(m);
289
290 if (m->m_flags & M_FASTFWD_OURS) {
291 /*
292 * Firewall or NAT changed destination to local.
293 * We expect ip_len and ip_off to be in host byte order.
294 */
295 m->m_flags &= ~M_FASTFWD_OURS;
296 /* Set up some basics that will be used later. */
297 ip = mtod(m, struct ip *);
298 hlen = ip->ip_hl << 2;
299 goto ours;
300 }
301
302 ipstat.ips_total++;
303
304 if (m->m_pkthdr.len < sizeof(struct ip))
305 goto tooshort;
306
307 if (m->m_len < sizeof (struct ip) &&
308 (m = m_pullup(m, sizeof (struct ip))) == NULL) {
309 ipstat.ips_toosmall++;
310 return;
311 }
312 ip = mtod(m, struct ip *);
313
314 if (ip->ip_v != IPVERSION) {
315 ipstat.ips_badvers++;
316 goto bad;
317 }
318
319 hlen = ip->ip_hl << 2;
320 if (hlen < sizeof(struct ip)) { /* minimum header length */
321 ipstat.ips_badhlen++;
322 goto bad;
323 }
324 if (hlen > m->m_len) {
325 if ((m = m_pullup(m, hlen)) == NULL) {
326 ipstat.ips_badhlen++;
327 return;
328 }
329 ip = mtod(m, struct ip *);
330 }
331
332 /* 127/8 must not appear on wire - RFC1122 */
333 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
334 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
335 if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) {
336 ipstat.ips_badaddr++;
337 goto bad;
338 }
339 }
340
341 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
342 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
343 } else {
344 if (hlen == sizeof(struct ip)) {
345 sum = in_cksum_hdr(ip);
346 } else {
347 sum = in_cksum(m, hlen);
348 }
349 }
350 if (sum) {
351 ipstat.ips_badsum++;
352 goto bad;
353 }
354
355#ifdef ALTQ
356 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0)
357 /* packet is dropped by traffic conditioner */
358 return;
359#endif
360
361 /*
362 * Convert fields to host representation.
363 */
364 ip->ip_len = ntohs(ip->ip_len);
365 if (ip->ip_len < hlen) {
366 ipstat.ips_badlen++;
367 goto bad;
368 }
369 ip->ip_off = ntohs(ip->ip_off);
370
371 /*
372 * Check that the amount of data in the buffers
373 * is as at least much as the IP header would have us expect.
374 * Trim mbufs if longer than we expect.
375 * Drop packet if shorter than we expect.
376 */
377 if (m->m_pkthdr.len < ip->ip_len) {
378tooshort:
379 ipstat.ips_tooshort++;
380 goto bad;
381 }
382 if (m->m_pkthdr.len > ip->ip_len) {
383 if (m->m_len == m->m_pkthdr.len) {
384 m->m_len = ip->ip_len;
385 m->m_pkthdr.len = ip->ip_len;
386 } else
387 m_adj(m, ip->ip_len - m->m_pkthdr.len);
388 }
389#if defined(IPSEC) || defined(FAST_IPSEC)
390 /*
391 * Bypass packet filtering for packets from a tunnel (gif).
392 */
393 if (ip_ipsec_filtergif(m))
394 goto passin;
395#endif /* IPSEC */
396
397 /*
398 * Run through list of hooks for input packets.
399 *
400 * NB: Beware of the destination address changing (e.g.
401 * by NAT rewriting). When this happens, tell
402 * ip_forward to do the right thing.
403 */
404
405 /* Jump over all PFIL processing if hooks are not active. */
406 if (!PFIL_HOOKED(&inet_pfil_hook))
407 goto passin;
408
409 odst = ip->ip_dst;
410 if (pfil_run_hooks(&inet_pfil_hook, &m, m->m_pkthdr.rcvif,
411 PFIL_IN, NULL) != 0)
412 return;
413 if (m == NULL) /* consumed by filter */
414 return;
415
416 ip = mtod(m, struct ip *);
417 dchg = (odst.s_addr != ip->ip_dst.s_addr);
418
419#ifdef IPFIREWALL_FORWARD
420 if (m->m_flags & M_FASTFWD_OURS) {
421 m->m_flags &= ~M_FASTFWD_OURS;
422 goto ours;
423 }
424#ifndef IPFIREWALL_FORWARD_EXTENDED
425 dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL);
426#else
427 if ((dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL)) != 0) {
428 /*
429 * Directly ship on the packet. This allows to forward packets
430 * that were destined for us to some other directly connected
431 * host.
432 */
433 ip_forward(m, dchg);
434 return;
435 }
436#endif /* IPFIREWALL_FORWARD_EXTENDED */
437#endif /* IPFIREWALL_FORWARD */
438
439passin:
440 /*
441 * Process options and, if not destined for us,
442 * ship it on. ip_dooptions returns 1 when an
443 * error was detected (causing an icmp message
444 * to be sent and the original packet to be freed).
445 */
446 if (hlen > sizeof (struct ip) && ip_dooptions(m, 0))
447 return;
448
449 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no
450 * matter if it is destined to another node, or whether it is
451 * a multicast one, RSVP wants it! and prevents it from being forwarded
452 * anywhere else. Also checks if the rsvp daemon is running before
453 * grabbing the packet.
454 */
455 if (rsvp_on && ip->ip_p==IPPROTO_RSVP)
456 goto ours;
457
458 /*
459 * Check our list of addresses, to see if the packet is for us.
460 * If we don't have any addresses, assume any unicast packet
461 * we receive might be for us (and let the upper layers deal
462 * with it).
463 */
464 if (TAILQ_EMPTY(&in_ifaddrhead) &&
465 (m->m_flags & (M_MCAST|M_BCAST)) == 0)
466 goto ours;
467
468 /*
469 * Enable a consistency check between the destination address
470 * and the arrival interface for a unicast packet (the RFC 1122
471 * strong ES model) if IP forwarding is disabled and the packet
472 * is not locally generated and the packet is not subject to
473 * 'ipfw fwd'.
474 *
475 * XXX - Checking also should be disabled if the destination
476 * address is ipnat'ed to a different interface.
477 *
478 * XXX - Checking is incompatible with IP aliases added
479 * to the loopback interface instead of the interface where
480 * the packets are received.
481 *
482 * XXX - This is the case for carp vhost IPs as well so we
483 * insert a workaround. If the packet got here, we already
484 * checked with carp_iamatch() and carp_forus().
485 */
486 checkif = ip_checkinterface && (ipforwarding == 0) &&
487 m->m_pkthdr.rcvif != NULL &&
488 ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) &&
489#ifdef DEV_CARP
490 !m->m_pkthdr.rcvif->if_carp &&
491#endif
492 (dchg == 0);
493
494 /*
495 * Check for exact addresses in the hash bucket.
496 */
497 LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) {
498 /*
499 * If the address matches, verify that the packet
500 * arrived via the correct interface if checking is
501 * enabled.
502 */
503 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr &&
504 (!checkif || ia->ia_ifp == m->m_pkthdr.rcvif))
505 goto ours;
506 }
507 /*
508 * Check for broadcast addresses.
509 *
510 * Only accept broadcast packets that arrive via the matching
511 * interface. Reception of forwarded directed broadcasts would
512 * be handled via ip_forward() and ether_output() with the loopback
513 * into the stack for SIMPLEX interfaces handled by ether_output().
514 */
515 if (m->m_pkthdr.rcvif != NULL &&
516 m->m_pkthdr.rcvif->if_flags & IFF_BROADCAST) {
517 TAILQ_FOREACH(ifa, &m->m_pkthdr.rcvif->if_addrhead, ifa_link) {
518 if (ifa->ifa_addr->sa_family != AF_INET)
519 continue;
520 ia = ifatoia(ifa);
521 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr ==
522 ip->ip_dst.s_addr)
523 goto ours;
524 if (ia->ia_netbroadcast.s_addr == ip->ip_dst.s_addr)
525 goto ours;
526#ifdef BOOTP_COMPAT
527 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY)
528 goto ours;
529#endif
530 }
531 }
532 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
533 struct in_multi *inm;
534 if (ip_mrouter) {
535 /*
536 * If we are acting as a multicast router, all
537 * incoming multicast packets are passed to the
538 * kernel-level multicast forwarding function.
539 * The packet is returned (relatively) intact; if
540 * ip_mforward() returns a non-zero value, the packet
541 * must be discarded, else it may be accepted below.
542 */
543 if (ip_mforward &&
544 ip_mforward(ip, m->m_pkthdr.rcvif, m, 0) != 0) {
545 ipstat.ips_cantforward++;
546 m_freem(m);
547 return;
548 }
549
550 /*
551 * The process-level routing daemon needs to receive
552 * all multicast IGMP packets, whether or not this
553 * host belongs to their destination groups.
554 */
555 if (ip->ip_p == IPPROTO_IGMP)
556 goto ours;
557 ipstat.ips_forward++;
558 }
559 /*
560 * See if we belong to the destination multicast group on the
561 * arrival interface.
562 */
563 IN_MULTI_LOCK();
564 IN_LOOKUP_MULTI(ip->ip_dst, m->m_pkthdr.rcvif, inm);
565 IN_MULTI_UNLOCK();
566 if (inm == NULL) {
567 ipstat.ips_notmember++;
568 m_freem(m);
569 return;
570 }
571 goto ours;
572 }
573 if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST)
574 goto ours;
575 if (ip->ip_dst.s_addr == INADDR_ANY)
576 goto ours;
577
578 /*
579 * FAITH(Firewall Aided Internet Translator)
580 */
581 if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type == IFT_FAITH) {
582 if (ip_keepfaith) {
583 if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP)
584 goto ours;
585 }
586 m_freem(m);
587 return;
588 }
589
590 /*
591 * Not for us; forward if possible and desirable.
592 */
593 if (ipforwarding == 0) {
594 ipstat.ips_cantforward++;
595 m_freem(m);
596 } else {
597#if defined(IPSEC) || defined(FAST_IPSEC)
598 if (ip_ipsec_fwd(m))
599 goto bad;
600#endif /* IPSEC */
601 ip_forward(m, dchg);
602 }
603 return;
604
605ours:
606#ifdef IPSTEALTH
607 /*
608 * IPSTEALTH: Process non-routing options only
609 * if the packet is destined for us.
610 */
611 if (ipstealth && hlen > sizeof (struct ip) &&
612 ip_dooptions(m, 1))
613 return;
614#endif /* IPSTEALTH */
615
616 /* Count the packet in the ip address stats */
617 if (ia != NULL) {
618 ia->ia_ifa.if_ipackets++;
619 ia->ia_ifa.if_ibytes += m->m_pkthdr.len;
620 }
621
622 /*
623 * Attempt reassembly; if it succeeds, proceed.
624 * ip_reass() will return a different mbuf.
625 */
626 if (ip->ip_off & (IP_MF | IP_OFFMASK)) {
627 m = ip_reass(m);
628 if (m == NULL)
629 return;
630 ip = mtod(m, struct ip *);
631 /* Get the header length of the reassembled packet */
632 hlen = ip->ip_hl << 2;
633 }
634
635 /*
636 * Further protocols expect the packet length to be w/o the
637 * IP header.
638 */
639 ip->ip_len -= hlen;
640
641#if defined(IPSEC) || defined(FAST_IPSEC)
642 /*
643 * enforce IPsec policy checking if we are seeing last header.
644 * note that we do not visit this with protocols with pcb layer
645 * code - like udp/tcp/raw ip.
646 */
647 if (ip_ipsec_input(m))
648 goto bad;
649#endif /* IPSEC */
650
651 /*
652 * Switch out to protocol's input routine.
653 */
654 ipstat.ips_delivered++;
655
656 (*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen);
657 return;
658bad:
659 m_freem(m);
660}
661
662/*
663 * After maxnipq has been updated, propagate the change to UMA. The UMA zone
664 * max has slightly different semantics than the sysctl, for historical
665 * reasons.
666 */
667static void
668maxnipq_update(void)
669{
670
671 /*
672 * -1 for unlimited allocation.
673 */
674 if (maxnipq < 0)
675 uma_zone_set_max(ipq_zone, 0);
676 /*
677 * Positive number for specific bound.
678 */
679 if (maxnipq > 0)
680 uma_zone_set_max(ipq_zone, maxnipq);
681 /*
682 * Zero specifies no further fragment queue allocation -- set the
683 * bound very low, but rely on implementation elsewhere to actually
684 * prevent allocation and reclaim current queues.
685 */
686 if (maxnipq == 0)
687 uma_zone_set_max(ipq_zone, 1);
688}
689
262
263 /* Initialize various other remaining things. */
264 ip_id = time_second & 0xffff;
265 ipintrq.ifq_maxlen = ipqmaxlen;
266 mtx_init(&ipintrq.ifq_mtx, "ip_inq", NULL, MTX_DEF);
267 netisr_register(NETISR_IP, ip_input, &ipintrq, NETISR_MPSAFE);
268}
269
270void ip_fini(xtp)
271 void *xtp;
272{
273 callout_stop(&ipport_tick_callout);
274}
275
276/*
277 * Ip input routine. Checksum and byte swap header. If fragmented
278 * try to reassemble. Process options. Pass to next level.
279 */
280void
281ip_input(struct mbuf *m)
282{
283 struct ip *ip = NULL;
284 struct in_ifaddr *ia = NULL;
285 struct ifaddr *ifa;
286 int checkif, hlen = 0;
287 u_short sum;
288 int dchg = 0; /* dest changed after fw */
289 struct in_addr odst; /* original dst address */
290
291 M_ASSERTPKTHDR(m);
292
293 if (m->m_flags & M_FASTFWD_OURS) {
294 /*
295 * Firewall or NAT changed destination to local.
296 * We expect ip_len and ip_off to be in host byte order.
297 */
298 m->m_flags &= ~M_FASTFWD_OURS;
299 /* Set up some basics that will be used later. */
300 ip = mtod(m, struct ip *);
301 hlen = ip->ip_hl << 2;
302 goto ours;
303 }
304
305 ipstat.ips_total++;
306
307 if (m->m_pkthdr.len < sizeof(struct ip))
308 goto tooshort;
309
310 if (m->m_len < sizeof (struct ip) &&
311 (m = m_pullup(m, sizeof (struct ip))) == NULL) {
312 ipstat.ips_toosmall++;
313 return;
314 }
315 ip = mtod(m, struct ip *);
316
317 if (ip->ip_v != IPVERSION) {
318 ipstat.ips_badvers++;
319 goto bad;
320 }
321
322 hlen = ip->ip_hl << 2;
323 if (hlen < sizeof(struct ip)) { /* minimum header length */
324 ipstat.ips_badhlen++;
325 goto bad;
326 }
327 if (hlen > m->m_len) {
328 if ((m = m_pullup(m, hlen)) == NULL) {
329 ipstat.ips_badhlen++;
330 return;
331 }
332 ip = mtod(m, struct ip *);
333 }
334
335 /* 127/8 must not appear on wire - RFC1122 */
336 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
337 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
338 if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) {
339 ipstat.ips_badaddr++;
340 goto bad;
341 }
342 }
343
344 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
345 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
346 } else {
347 if (hlen == sizeof(struct ip)) {
348 sum = in_cksum_hdr(ip);
349 } else {
350 sum = in_cksum(m, hlen);
351 }
352 }
353 if (sum) {
354 ipstat.ips_badsum++;
355 goto bad;
356 }
357
358#ifdef ALTQ
359 if (altq_input != NULL && (*altq_input)(m, AF_INET) == 0)
360 /* packet is dropped by traffic conditioner */
361 return;
362#endif
363
364 /*
365 * Convert fields to host representation.
366 */
367 ip->ip_len = ntohs(ip->ip_len);
368 if (ip->ip_len < hlen) {
369 ipstat.ips_badlen++;
370 goto bad;
371 }
372 ip->ip_off = ntohs(ip->ip_off);
373
374 /*
375 * Check that the amount of data in the buffers
376 * is as at least much as the IP header would have us expect.
377 * Trim mbufs if longer than we expect.
378 * Drop packet if shorter than we expect.
379 */
380 if (m->m_pkthdr.len < ip->ip_len) {
381tooshort:
382 ipstat.ips_tooshort++;
383 goto bad;
384 }
385 if (m->m_pkthdr.len > ip->ip_len) {
386 if (m->m_len == m->m_pkthdr.len) {
387 m->m_len = ip->ip_len;
388 m->m_pkthdr.len = ip->ip_len;
389 } else
390 m_adj(m, ip->ip_len - m->m_pkthdr.len);
391 }
392#if defined(IPSEC) || defined(FAST_IPSEC)
393 /*
394 * Bypass packet filtering for packets from a tunnel (gif).
395 */
396 if (ip_ipsec_filtergif(m))
397 goto passin;
398#endif /* IPSEC */
399
400 /*
401 * Run through list of hooks for input packets.
402 *
403 * NB: Beware of the destination address changing (e.g.
404 * by NAT rewriting). When this happens, tell
405 * ip_forward to do the right thing.
406 */
407
408 /* Jump over all PFIL processing if hooks are not active. */
409 if (!PFIL_HOOKED(&inet_pfil_hook))
410 goto passin;
411
412 odst = ip->ip_dst;
413 if (pfil_run_hooks(&inet_pfil_hook, &m, m->m_pkthdr.rcvif,
414 PFIL_IN, NULL) != 0)
415 return;
416 if (m == NULL) /* consumed by filter */
417 return;
418
419 ip = mtod(m, struct ip *);
420 dchg = (odst.s_addr != ip->ip_dst.s_addr);
421
422#ifdef IPFIREWALL_FORWARD
423 if (m->m_flags & M_FASTFWD_OURS) {
424 m->m_flags &= ~M_FASTFWD_OURS;
425 goto ours;
426 }
427#ifndef IPFIREWALL_FORWARD_EXTENDED
428 dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL);
429#else
430 if ((dchg = (m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL)) != 0) {
431 /*
432 * Directly ship on the packet. This allows to forward packets
433 * that were destined for us to some other directly connected
434 * host.
435 */
436 ip_forward(m, dchg);
437 return;
438 }
439#endif /* IPFIREWALL_FORWARD_EXTENDED */
440#endif /* IPFIREWALL_FORWARD */
441
442passin:
443 /*
444 * Process options and, if not destined for us,
445 * ship it on. ip_dooptions returns 1 when an
446 * error was detected (causing an icmp message
447 * to be sent and the original packet to be freed).
448 */
449 if (hlen > sizeof (struct ip) && ip_dooptions(m, 0))
450 return;
451
452 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no
453 * matter if it is destined to another node, or whether it is
454 * a multicast one, RSVP wants it! and prevents it from being forwarded
455 * anywhere else. Also checks if the rsvp daemon is running before
456 * grabbing the packet.
457 */
458 if (rsvp_on && ip->ip_p==IPPROTO_RSVP)
459 goto ours;
460
461 /*
462 * Check our list of addresses, to see if the packet is for us.
463 * If we don't have any addresses, assume any unicast packet
464 * we receive might be for us (and let the upper layers deal
465 * with it).
466 */
467 if (TAILQ_EMPTY(&in_ifaddrhead) &&
468 (m->m_flags & (M_MCAST|M_BCAST)) == 0)
469 goto ours;
470
471 /*
472 * Enable a consistency check between the destination address
473 * and the arrival interface for a unicast packet (the RFC 1122
474 * strong ES model) if IP forwarding is disabled and the packet
475 * is not locally generated and the packet is not subject to
476 * 'ipfw fwd'.
477 *
478 * XXX - Checking also should be disabled if the destination
479 * address is ipnat'ed to a different interface.
480 *
481 * XXX - Checking is incompatible with IP aliases added
482 * to the loopback interface instead of the interface where
483 * the packets are received.
484 *
485 * XXX - This is the case for carp vhost IPs as well so we
486 * insert a workaround. If the packet got here, we already
487 * checked with carp_iamatch() and carp_forus().
488 */
489 checkif = ip_checkinterface && (ipforwarding == 0) &&
490 m->m_pkthdr.rcvif != NULL &&
491 ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) &&
492#ifdef DEV_CARP
493 !m->m_pkthdr.rcvif->if_carp &&
494#endif
495 (dchg == 0);
496
497 /*
498 * Check for exact addresses in the hash bucket.
499 */
500 LIST_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) {
501 /*
502 * If the address matches, verify that the packet
503 * arrived via the correct interface if checking is
504 * enabled.
505 */
506 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr &&
507 (!checkif || ia->ia_ifp == m->m_pkthdr.rcvif))
508 goto ours;
509 }
510 /*
511 * Check for broadcast addresses.
512 *
513 * Only accept broadcast packets that arrive via the matching
514 * interface. Reception of forwarded directed broadcasts would
515 * be handled via ip_forward() and ether_output() with the loopback
516 * into the stack for SIMPLEX interfaces handled by ether_output().
517 */
518 if (m->m_pkthdr.rcvif != NULL &&
519 m->m_pkthdr.rcvif->if_flags & IFF_BROADCAST) {
520 TAILQ_FOREACH(ifa, &m->m_pkthdr.rcvif->if_addrhead, ifa_link) {
521 if (ifa->ifa_addr->sa_family != AF_INET)
522 continue;
523 ia = ifatoia(ifa);
524 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr ==
525 ip->ip_dst.s_addr)
526 goto ours;
527 if (ia->ia_netbroadcast.s_addr == ip->ip_dst.s_addr)
528 goto ours;
529#ifdef BOOTP_COMPAT
530 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY)
531 goto ours;
532#endif
533 }
534 }
535 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
536 struct in_multi *inm;
537 if (ip_mrouter) {
538 /*
539 * If we are acting as a multicast router, all
540 * incoming multicast packets are passed to the
541 * kernel-level multicast forwarding function.
542 * The packet is returned (relatively) intact; if
543 * ip_mforward() returns a non-zero value, the packet
544 * must be discarded, else it may be accepted below.
545 */
546 if (ip_mforward &&
547 ip_mforward(ip, m->m_pkthdr.rcvif, m, 0) != 0) {
548 ipstat.ips_cantforward++;
549 m_freem(m);
550 return;
551 }
552
553 /*
554 * The process-level routing daemon needs to receive
555 * all multicast IGMP packets, whether or not this
556 * host belongs to their destination groups.
557 */
558 if (ip->ip_p == IPPROTO_IGMP)
559 goto ours;
560 ipstat.ips_forward++;
561 }
562 /*
563 * See if we belong to the destination multicast group on the
564 * arrival interface.
565 */
566 IN_MULTI_LOCK();
567 IN_LOOKUP_MULTI(ip->ip_dst, m->m_pkthdr.rcvif, inm);
568 IN_MULTI_UNLOCK();
569 if (inm == NULL) {
570 ipstat.ips_notmember++;
571 m_freem(m);
572 return;
573 }
574 goto ours;
575 }
576 if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST)
577 goto ours;
578 if (ip->ip_dst.s_addr == INADDR_ANY)
579 goto ours;
580
581 /*
582 * FAITH(Firewall Aided Internet Translator)
583 */
584 if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type == IFT_FAITH) {
585 if (ip_keepfaith) {
586 if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP)
587 goto ours;
588 }
589 m_freem(m);
590 return;
591 }
592
593 /*
594 * Not for us; forward if possible and desirable.
595 */
596 if (ipforwarding == 0) {
597 ipstat.ips_cantforward++;
598 m_freem(m);
599 } else {
600#if defined(IPSEC) || defined(FAST_IPSEC)
601 if (ip_ipsec_fwd(m))
602 goto bad;
603#endif /* IPSEC */
604 ip_forward(m, dchg);
605 }
606 return;
607
608ours:
609#ifdef IPSTEALTH
610 /*
611 * IPSTEALTH: Process non-routing options only
612 * if the packet is destined for us.
613 */
614 if (ipstealth && hlen > sizeof (struct ip) &&
615 ip_dooptions(m, 1))
616 return;
617#endif /* IPSTEALTH */
618
619 /* Count the packet in the ip address stats */
620 if (ia != NULL) {
621 ia->ia_ifa.if_ipackets++;
622 ia->ia_ifa.if_ibytes += m->m_pkthdr.len;
623 }
624
625 /*
626 * Attempt reassembly; if it succeeds, proceed.
627 * ip_reass() will return a different mbuf.
628 */
629 if (ip->ip_off & (IP_MF | IP_OFFMASK)) {
630 m = ip_reass(m);
631 if (m == NULL)
632 return;
633 ip = mtod(m, struct ip *);
634 /* Get the header length of the reassembled packet */
635 hlen = ip->ip_hl << 2;
636 }
637
638 /*
639 * Further protocols expect the packet length to be w/o the
640 * IP header.
641 */
642 ip->ip_len -= hlen;
643
644#if defined(IPSEC) || defined(FAST_IPSEC)
645 /*
646 * enforce IPsec policy checking if we are seeing last header.
647 * note that we do not visit this with protocols with pcb layer
648 * code - like udp/tcp/raw ip.
649 */
650 if (ip_ipsec_input(m))
651 goto bad;
652#endif /* IPSEC */
653
654 /*
655 * Switch out to protocol's input routine.
656 */
657 ipstat.ips_delivered++;
658
659 (*inetsw[ip_protox[ip->ip_p]].pr_input)(m, hlen);
660 return;
661bad:
662 m_freem(m);
663}
664
665/*
666 * After maxnipq has been updated, propagate the change to UMA. The UMA zone
667 * max has slightly different semantics than the sysctl, for historical
668 * reasons.
669 */
670static void
671maxnipq_update(void)
672{
673
674 /*
675 * -1 for unlimited allocation.
676 */
677 if (maxnipq < 0)
678 uma_zone_set_max(ipq_zone, 0);
679 /*
680 * Positive number for specific bound.
681 */
682 if (maxnipq > 0)
683 uma_zone_set_max(ipq_zone, maxnipq);
684 /*
685 * Zero specifies no further fragment queue allocation -- set the
686 * bound very low, but rely on implementation elsewhere to actually
687 * prevent allocation and reclaim current queues.
688 */
689 if (maxnipq == 0)
690 uma_zone_set_max(ipq_zone, 1);
691}
692
693static void
694ipq_zone_change(void *tag)
695{
696
697 if (maxnipq > 0 && maxnipq < (nmbclusters / 32)) {
698 maxnipq = nmbclusters / 32;
699 maxnipq_update();
700 }
701}
702
690static int
691sysctl_maxnipq(SYSCTL_HANDLER_ARGS)
692{
693 int error, i;
694
695 i = maxnipq;
696 error = sysctl_handle_int(oidp, &i, 0, req);
697 if (error || !req->newptr)
698 return (error);
699
700 /*
701 * XXXRW: Might be a good idea to sanity check the argument and place
702 * an extreme upper bound.
703 */
704 if (i < -1)
705 return (EINVAL);
706 maxnipq = i;
707 maxnipq_update();
708 return (0);
709}
710
711SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW,
712 NULL, 0, sysctl_maxnipq, "I",
713 "Maximum number of IPv4 fragment reassembly queue entries");
714
715/*
716 * Take incoming datagram fragment and try to reassemble it into
717 * whole datagram. If the argument is the first fragment or one
718 * in between the function will return NULL and store the mbuf
719 * in the fragment chain. If the argument is the last fragment
720 * the packet will be reassembled and the pointer to the new
721 * mbuf returned for further processing. Only m_tags attached
722 * to the first packet/fragment are preserved.
723 * The IP header is *NOT* adjusted out of iplen.
724 */
725
726struct mbuf *
727ip_reass(struct mbuf *m)
728{
729 struct ip *ip;
730 struct mbuf *p, *q, *nq, *t;
731 struct ipq *fp = NULL;
732 struct ipqhead *head;
733 int i, hlen, next;
734 u_int8_t ecn, ecn0;
735 u_short hash;
736
737 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
738 if (maxnipq == 0 || maxfragsperpacket == 0) {
739 ipstat.ips_fragments++;
740 ipstat.ips_fragdropped++;
741 m_freem(m);
742 return (NULL);
743 }
744
745 ip = mtod(m, struct ip *);
746 hlen = ip->ip_hl << 2;
747
748 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
749 head = &ipq[hash];
750 IPQ_LOCK();
751
752 /*
753 * Look for queue of fragments
754 * of this datagram.
755 */
756 TAILQ_FOREACH(fp, head, ipq_list)
757 if (ip->ip_id == fp->ipq_id &&
758 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
759 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
760#ifdef MAC
761 mac_fragment_match(m, fp) &&
762#endif
763 ip->ip_p == fp->ipq_p)
764 goto found;
765
766 fp = NULL;
767
768 /*
769 * Attempt to trim the number of allocated fragment queues if it
770 * exceeds the administrative limit.
771 */
772 if ((nipq > maxnipq) && (maxnipq > 0)) {
773 /*
774 * drop something from the tail of the current queue
775 * before proceeding further
776 */
777 struct ipq *q = TAILQ_LAST(head, ipqhead);
778 if (q == NULL) { /* gak */
779 for (i = 0; i < IPREASS_NHASH; i++) {
780 struct ipq *r = TAILQ_LAST(&ipq[i], ipqhead);
781 if (r) {
782 ipstat.ips_fragtimeout += r->ipq_nfrags;
783 ip_freef(&ipq[i], r);
784 break;
785 }
786 }
787 } else {
788 ipstat.ips_fragtimeout += q->ipq_nfrags;
789 ip_freef(head, q);
790 }
791 }
792
793found:
794 /*
795 * Adjust ip_len to not reflect header,
796 * convert offset of this to bytes.
797 */
798 ip->ip_len -= hlen;
799 if (ip->ip_off & IP_MF) {
800 /*
801 * Make sure that fragments have a data length
802 * that's a non-zero multiple of 8 bytes.
803 */
804 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) {
805 ipstat.ips_toosmall++; /* XXX */
806 goto dropfrag;
807 }
808 m->m_flags |= M_FRAG;
809 } else
810 m->m_flags &= ~M_FRAG;
811 ip->ip_off <<= 3;
812
813
814 /*
815 * Attempt reassembly; if it succeeds, proceed.
816 * ip_reass() will return a different mbuf.
817 */
818 ipstat.ips_fragments++;
819 m->m_pkthdr.header = ip;
820
821 /* Previous ip_reass() started here. */
822 /*
823 * Presence of header sizes in mbufs
824 * would confuse code below.
825 */
826 m->m_data += hlen;
827 m->m_len -= hlen;
828
829 /*
830 * If first fragment to arrive, create a reassembly queue.
831 */
832 if (fp == NULL) {
833 fp = uma_zalloc(ipq_zone, M_NOWAIT);
834 if (fp == NULL)
835 goto dropfrag;
836#ifdef MAC
837 if (mac_init_ipq(fp, M_NOWAIT) != 0) {
838 uma_zfree(ipq_zone, fp);
839 goto dropfrag;
840 }
841 mac_create_ipq(m, fp);
842#endif
843 TAILQ_INSERT_HEAD(head, fp, ipq_list);
844 nipq++;
845 fp->ipq_nfrags = 1;
846 fp->ipq_ttl = IPFRAGTTL;
847 fp->ipq_p = ip->ip_p;
848 fp->ipq_id = ip->ip_id;
849 fp->ipq_src = ip->ip_src;
850 fp->ipq_dst = ip->ip_dst;
851 fp->ipq_frags = m;
852 m->m_nextpkt = NULL;
853 goto done;
854 } else {
855 fp->ipq_nfrags++;
856#ifdef MAC
857 mac_update_ipq(m, fp);
858#endif
859 }
860
861#define GETIP(m) ((struct ip*)((m)->m_pkthdr.header))
862
863 /*
864 * Handle ECN by comparing this segment with the first one;
865 * if CE is set, do not lose CE.
866 * drop if CE and not-ECT are mixed for the same packet.
867 */
868 ecn = ip->ip_tos & IPTOS_ECN_MASK;
869 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
870 if (ecn == IPTOS_ECN_CE) {
871 if (ecn0 == IPTOS_ECN_NOTECT)
872 goto dropfrag;
873 if (ecn0 != IPTOS_ECN_CE)
874 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
875 }
876 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT)
877 goto dropfrag;
878
879 /*
880 * Find a segment which begins after this one does.
881 */
882 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
883 if (GETIP(q)->ip_off > ip->ip_off)
884 break;
885
886 /*
887 * If there is a preceding segment, it may provide some of
888 * our data already. If so, drop the data from the incoming
889 * segment. If it provides all of our data, drop us, otherwise
890 * stick new segment in the proper place.
891 *
892 * If some of the data is dropped from the the preceding
893 * segment, then it's checksum is invalidated.
894 */
895 if (p) {
896 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
897 if (i > 0) {
898 if (i >= ip->ip_len)
899 goto dropfrag;
900 m_adj(m, i);
901 m->m_pkthdr.csum_flags = 0;
902 ip->ip_off += i;
903 ip->ip_len -= i;
904 }
905 m->m_nextpkt = p->m_nextpkt;
906 p->m_nextpkt = m;
907 } else {
908 m->m_nextpkt = fp->ipq_frags;
909 fp->ipq_frags = m;
910 }
911
912 /*
913 * While we overlap succeeding segments trim them or,
914 * if they are completely covered, dequeue them.
915 */
916 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
917 q = nq) {
918 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
919 if (i < GETIP(q)->ip_len) {
920 GETIP(q)->ip_len -= i;
921 GETIP(q)->ip_off += i;
922 m_adj(q, i);
923 q->m_pkthdr.csum_flags = 0;
924 break;
925 }
926 nq = q->m_nextpkt;
927 m->m_nextpkt = nq;
928 ipstat.ips_fragdropped++;
929 fp->ipq_nfrags--;
930 m_freem(q);
931 }
932
933 /*
934 * Check for complete reassembly and perform frag per packet
935 * limiting.
936 *
937 * Frag limiting is performed here so that the nth frag has
938 * a chance to complete the packet before we drop the packet.
939 * As a result, n+1 frags are actually allowed per packet, but
940 * only n will ever be stored. (n = maxfragsperpacket.)
941 *
942 */
943 next = 0;
944 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
945 if (GETIP(q)->ip_off != next) {
946 if (fp->ipq_nfrags > maxfragsperpacket) {
947 ipstat.ips_fragdropped += fp->ipq_nfrags;
948 ip_freef(head, fp);
949 }
950 goto done;
951 }
952 next += GETIP(q)->ip_len;
953 }
954 /* Make sure the last packet didn't have the IP_MF flag */
955 if (p->m_flags & M_FRAG) {
956 if (fp->ipq_nfrags > maxfragsperpacket) {
957 ipstat.ips_fragdropped += fp->ipq_nfrags;
958 ip_freef(head, fp);
959 }
960 goto done;
961 }
962
963 /*
964 * Reassembly is complete. Make sure the packet is a sane size.
965 */
966 q = fp->ipq_frags;
967 ip = GETIP(q);
968 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) {
969 ipstat.ips_toolong++;
970 ipstat.ips_fragdropped += fp->ipq_nfrags;
971 ip_freef(head, fp);
972 goto done;
973 }
974
975 /*
976 * Concatenate fragments.
977 */
978 m = q;
979 t = m->m_next;
980 m->m_next = NULL;
981 m_cat(m, t);
982 nq = q->m_nextpkt;
983 q->m_nextpkt = NULL;
984 for (q = nq; q != NULL; q = nq) {
985 nq = q->m_nextpkt;
986 q->m_nextpkt = NULL;
987 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags;
988 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data;
989 m_cat(m, q);
990 }
991 /*
992 * In order to do checksumming faster we do 'end-around carry' here
993 * (and not in for{} loop), though it implies we are not going to
994 * reassemble more than 64k fragments.
995 */
996 m->m_pkthdr.csum_data =
997 (m->m_pkthdr.csum_data & 0xffff) + (m->m_pkthdr.csum_data >> 16);
998#ifdef MAC
999 mac_create_datagram_from_ipq(fp, m);
1000 mac_destroy_ipq(fp);
1001#endif
1002
1003 /*
1004 * Create header for new ip packet by modifying header of first
1005 * packet; dequeue and discard fragment reassembly header.
1006 * Make header visible.
1007 */
1008 ip->ip_len = (ip->ip_hl << 2) + next;
1009 ip->ip_src = fp->ipq_src;
1010 ip->ip_dst = fp->ipq_dst;
1011 TAILQ_REMOVE(head, fp, ipq_list);
1012 nipq--;
1013 uma_zfree(ipq_zone, fp);
1014 m->m_len += (ip->ip_hl << 2);
1015 m->m_data -= (ip->ip_hl << 2);
1016 /* some debugging cruft by sklower, below, will go away soon */
1017 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
1018 m_fixhdr(m);
1019 ipstat.ips_reassembled++;
1020 IPQ_UNLOCK();
1021 return (m);
1022
1023dropfrag:
1024 ipstat.ips_fragdropped++;
1025 if (fp != NULL)
1026 fp->ipq_nfrags--;
1027 m_freem(m);
1028done:
1029 IPQ_UNLOCK();
1030 return (NULL);
1031
1032#undef GETIP
1033}
1034
1035/*
1036 * Free a fragment reassembly header and all
1037 * associated datagrams.
1038 */
1039static void
1040ip_freef(fhp, fp)
1041 struct ipqhead *fhp;
1042 struct ipq *fp;
1043{
1044 register struct mbuf *q;
1045
1046 IPQ_LOCK_ASSERT();
1047
1048 while (fp->ipq_frags) {
1049 q = fp->ipq_frags;
1050 fp->ipq_frags = q->m_nextpkt;
1051 m_freem(q);
1052 }
1053 TAILQ_REMOVE(fhp, fp, ipq_list);
1054 uma_zfree(ipq_zone, fp);
1055 nipq--;
1056}
1057
1058/*
1059 * IP timer processing;
1060 * if a timer expires on a reassembly
1061 * queue, discard it.
1062 */
1063void
1064ip_slowtimo()
1065{
1066 register struct ipq *fp;
1067 int i;
1068
1069 IPQ_LOCK();
1070 for (i = 0; i < IPREASS_NHASH; i++) {
1071 for(fp = TAILQ_FIRST(&ipq[i]); fp;) {
1072 struct ipq *fpp;
1073
1074 fpp = fp;
1075 fp = TAILQ_NEXT(fp, ipq_list);
1076 if(--fpp->ipq_ttl == 0) {
1077 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
1078 ip_freef(&ipq[i], fpp);
1079 }
1080 }
1081 }
1082 /*
1083 * If we are over the maximum number of fragments
1084 * (due to the limit being lowered), drain off
1085 * enough to get down to the new limit.
1086 */
1087 if (maxnipq >= 0 && nipq > maxnipq) {
1088 for (i = 0; i < IPREASS_NHASH; i++) {
1089 while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i])) {
1090 ipstat.ips_fragdropped +=
1091 TAILQ_FIRST(&ipq[i])->ipq_nfrags;
1092 ip_freef(&ipq[i], TAILQ_FIRST(&ipq[i]));
1093 }
1094 }
1095 }
1096 IPQ_UNLOCK();
1097}
1098
1099/*
1100 * Drain off all datagram fragments.
1101 */
1102void
1103ip_drain()
1104{
1105 int i;
1106
1107 IPQ_LOCK();
1108 for (i = 0; i < IPREASS_NHASH; i++) {
1109 while(!TAILQ_EMPTY(&ipq[i])) {
1110 ipstat.ips_fragdropped +=
1111 TAILQ_FIRST(&ipq[i])->ipq_nfrags;
1112 ip_freef(&ipq[i], TAILQ_FIRST(&ipq[i]));
1113 }
1114 }
1115 IPQ_UNLOCK();
1116 in_rtqdrain();
1117}
1118
1119/*
1120 * The protocol to be inserted into ip_protox[] must be already registered
1121 * in inetsw[], either statically or through pf_proto_register().
1122 */
1123int
1124ipproto_register(u_char ipproto)
1125{
1126 struct protosw *pr;
1127
1128 /* Sanity checks. */
1129 if (ipproto == 0)
1130 return (EPROTONOSUPPORT);
1131
1132 /*
1133 * The protocol slot must not be occupied by another protocol
1134 * already. An index pointing to IPPROTO_RAW is unused.
1135 */
1136 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
1137 if (pr == NULL)
1138 return (EPFNOSUPPORT);
1139 if (ip_protox[ipproto] != pr - inetsw) /* IPPROTO_RAW */
1140 return (EEXIST);
1141
1142 /* Find the protocol position in inetsw[] and set the index. */
1143 for (pr = inetdomain.dom_protosw;
1144 pr < inetdomain.dom_protoswNPROTOSW; pr++) {
1145 if (pr->pr_domain->dom_family == PF_INET &&
1146 pr->pr_protocol && pr->pr_protocol == ipproto) {
1147 /* Be careful to only index valid IP protocols. */
1148 if (pr->pr_protocol < IPPROTO_MAX) {
1149 ip_protox[pr->pr_protocol] = pr - inetsw;
1150 return (0);
1151 } else
1152 return (EINVAL);
1153 }
1154 }
1155 return (EPROTONOSUPPORT);
1156}
1157
1158int
1159ipproto_unregister(u_char ipproto)
1160{
1161 struct protosw *pr;
1162
1163 /* Sanity checks. */
1164 if (ipproto == 0)
1165 return (EPROTONOSUPPORT);
1166
1167 /* Check if the protocol was indeed registered. */
1168 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
1169 if (pr == NULL)
1170 return (EPFNOSUPPORT);
1171 if (ip_protox[ipproto] == pr - inetsw) /* IPPROTO_RAW */
1172 return (ENOENT);
1173
1174 /* Reset the protocol slot to IPPROTO_RAW. */
1175 ip_protox[ipproto] = pr - inetsw;
1176 return (0);
1177}
1178
1179/*
1180 * Given address of next destination (final or next hop),
1181 * return internet address info of interface to be used to get there.
1182 */
1183struct in_ifaddr *
1184ip_rtaddr(dst)
1185 struct in_addr dst;
1186{
1187 struct route sro;
1188 struct sockaddr_in *sin;
1189 struct in_ifaddr *ifa;
1190
1191 bzero(&sro, sizeof(sro));
1192 sin = (struct sockaddr_in *)&sro.ro_dst;
1193 sin->sin_family = AF_INET;
1194 sin->sin_len = sizeof(*sin);
1195 sin->sin_addr = dst;
1196 rtalloc_ign(&sro, RTF_CLONING);
1197
1198 if (sro.ro_rt == NULL)
1199 return (NULL);
1200
1201 ifa = ifatoia(sro.ro_rt->rt_ifa);
1202 RTFREE(sro.ro_rt);
1203 return (ifa);
1204}
1205
1206u_char inetctlerrmap[PRC_NCMDS] = {
1207 0, 0, 0, 0,
1208 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH,
1209 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED,
1210 EMSGSIZE, EHOSTUNREACH, 0, 0,
1211 0, 0, EHOSTUNREACH, 0,
1212 ENOPROTOOPT, ECONNREFUSED
1213};
1214
1215/*
1216 * Forward a packet. If some error occurs return the sender
1217 * an icmp packet. Note we can't always generate a meaningful
1218 * icmp message because icmp doesn't have a large enough repertoire
1219 * of codes and types.
1220 *
1221 * If not forwarding, just drop the packet. This could be confusing
1222 * if ipforwarding was zero but some routing protocol was advancing
1223 * us as a gateway to somewhere. However, we must let the routing
1224 * protocol deal with that.
1225 *
1226 * The srcrt parameter indicates whether the packet is being forwarded
1227 * via a source route.
1228 */
1229void
1230ip_forward(struct mbuf *m, int srcrt)
1231{
1232 struct ip *ip = mtod(m, struct ip *);
1233 struct in_ifaddr *ia = NULL;
1234 struct mbuf *mcopy;
1235 struct in_addr dest;
1236 int error, type = 0, code = 0, mtu = 0;
1237
1238 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) {
1239 ipstat.ips_cantforward++;
1240 m_freem(m);
1241 return;
1242 }
1243#ifdef IPSTEALTH
1244 if (!ipstealth) {
1245#endif
1246 if (ip->ip_ttl <= IPTTLDEC) {
1247 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS,
1248 0, 0);
1249 return;
1250 }
1251#ifdef IPSTEALTH
1252 }
1253#endif
1254
1255 if (!srcrt && (ia = ip_rtaddr(ip->ip_dst)) == NULL) {
1256 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0);
1257 return;
1258 }
1259
1260 /*
1261 * Save the IP header and at most 8 bytes of the payload,
1262 * in case we need to generate an ICMP message to the src.
1263 *
1264 * XXX this can be optimized a lot by saving the data in a local
1265 * buffer on the stack (72 bytes at most), and only allocating the
1266 * mbuf if really necessary. The vast majority of the packets
1267 * are forwarded without having to send an ICMP back (either
1268 * because unnecessary, or because rate limited), so we are
1269 * really we are wasting a lot of work here.
1270 *
1271 * We don't use m_copy() because it might return a reference
1272 * to a shared cluster. Both this function and ip_output()
1273 * assume exclusive access to the IP header in `m', so any
1274 * data in a cluster may change before we reach icmp_error().
1275 */
1276 MGETHDR(mcopy, M_DONTWAIT, m->m_type);
1277 if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_DONTWAIT)) {
1278 /*
1279 * It's probably ok if the pkthdr dup fails (because
1280 * the deep copy of the tag chain failed), but for now
1281 * be conservative and just discard the copy since
1282 * code below may some day want the tags.
1283 */
1284 m_free(mcopy);
1285 mcopy = NULL;
1286 }
1287 if (mcopy != NULL) {
1288 mcopy->m_len = min(ip->ip_len, M_TRAILINGSPACE(mcopy));
1289 mcopy->m_pkthdr.len = mcopy->m_len;
1290 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t));
1291 }
1292
1293#ifdef IPSTEALTH
1294 if (!ipstealth) {
1295#endif
1296 ip->ip_ttl -= IPTTLDEC;
1297#ifdef IPSTEALTH
1298 }
1299#endif
1300
1301 /*
1302 * If forwarding packet using same interface that it came in on,
1303 * perhaps should send a redirect to sender to shortcut a hop.
1304 * Only send redirect if source is sending directly to us,
1305 * and if packet was not source routed (or has any options).
1306 * Also, don't send redirect if forwarding using a default route
1307 * or a route modified by a redirect.
1308 */
1309 dest.s_addr = 0;
1310 if (!srcrt && ipsendredirects && ia->ia_ifp == m->m_pkthdr.rcvif) {
1311 struct sockaddr_in *sin;
1312 struct route ro;
1313 struct rtentry *rt;
1314
1315 bzero(&ro, sizeof(ro));
1316 sin = (struct sockaddr_in *)&ro.ro_dst;
1317 sin->sin_family = AF_INET;
1318 sin->sin_len = sizeof(*sin);
1319 sin->sin_addr = ip->ip_dst;
1320 rtalloc_ign(&ro, RTF_CLONING);
1321
1322 rt = ro.ro_rt;
1323
1324 if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 &&
1325 satosin(rt_key(rt))->sin_addr.s_addr != 0) {
1326#define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa))
1327 u_long src = ntohl(ip->ip_src.s_addr);
1328
1329 if (RTA(rt) &&
1330 (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) {
1331 if (rt->rt_flags & RTF_GATEWAY)
1332 dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr;
1333 else
1334 dest.s_addr = ip->ip_dst.s_addr;
1335 /* Router requirements says to only send host redirects */
1336 type = ICMP_REDIRECT;
1337 code = ICMP_REDIRECT_HOST;
1338 }
1339 }
1340 if (rt)
1341 RTFREE(rt);
1342 }
1343
1344 error = ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
1345 if (error)
1346 ipstat.ips_cantforward++;
1347 else {
1348 ipstat.ips_forward++;
1349 if (type)
1350 ipstat.ips_redirectsent++;
1351 else {
1352 if (mcopy)
1353 m_freem(mcopy);
1354 return;
1355 }
1356 }
1357 if (mcopy == NULL)
1358 return;
1359
1360 switch (error) {
1361
1362 case 0: /* forwarded, but need redirect */
1363 /* type, code set above */
1364 break;
1365
1366 case ENETUNREACH: /* shouldn't happen, checked above */
1367 case EHOSTUNREACH:
1368 case ENETDOWN:
1369 case EHOSTDOWN:
1370 default:
1371 type = ICMP_UNREACH;
1372 code = ICMP_UNREACH_HOST;
1373 break;
1374
1375 case EMSGSIZE:
1376 type = ICMP_UNREACH;
1377 code = ICMP_UNREACH_NEEDFRAG;
1378
1379#if defined(IPSEC) || defined(FAST_IPSEC)
1380 mtu = ip_ipsec_mtu(m);
1381#endif /* IPSEC */
1382 /*
1383 * If the MTU wasn't set before use the interface mtu or
1384 * fall back to the next smaller mtu step compared to the
1385 * current packet size.
1386 */
1387 if (mtu == 0) {
1388 if (ia != NULL)
1389 mtu = ia->ia_ifp->if_mtu;
1390 else
1391 mtu = ip_next_mtu(ip->ip_len, 0);
1392 }
1393 ipstat.ips_cantfrag++;
1394 break;
1395
1396 case ENOBUFS:
1397 /*
1398 * A router should not generate ICMP_SOURCEQUENCH as
1399 * required in RFC1812 Requirements for IP Version 4 Routers.
1400 * Source quench could be a big problem under DoS attacks,
1401 * or if the underlying interface is rate-limited.
1402 * Those who need source quench packets may re-enable them
1403 * via the net.inet.ip.sendsourcequench sysctl.
1404 */
1405 if (ip_sendsourcequench == 0) {
1406 m_freem(mcopy);
1407 return;
1408 } else {
1409 type = ICMP_SOURCEQUENCH;
1410 code = 0;
1411 }
1412 break;
1413
1414 case EACCES: /* ipfw denied packet */
1415 m_freem(mcopy);
1416 return;
1417 }
1418 icmp_error(mcopy, type, code, dest.s_addr, mtu);
1419}
1420
1421void
1422ip_savecontrol(inp, mp, ip, m)
1423 register struct inpcb *inp;
1424 register struct mbuf **mp;
1425 register struct ip *ip;
1426 register struct mbuf *m;
1427{
1428 if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) {
1429 struct bintime bt;
1430
1431 bintime(&bt);
1432 if (inp->inp_socket->so_options & SO_BINTIME) {
1433 *mp = sbcreatecontrol((caddr_t) &bt, sizeof(bt),
1434 SCM_BINTIME, SOL_SOCKET);
1435 if (*mp)
1436 mp = &(*mp)->m_next;
1437 }
1438 if (inp->inp_socket->so_options & SO_TIMESTAMP) {
1439 struct timeval tv;
1440
1441 bintime2timeval(&bt, &tv);
1442 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv),
1443 SCM_TIMESTAMP, SOL_SOCKET);
1444 if (*mp)
1445 mp = &(*mp)->m_next;
1446 }
1447 }
1448 if (inp->inp_flags & INP_RECVDSTADDR) {
1449 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst,
1450 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP);
1451 if (*mp)
1452 mp = &(*mp)->m_next;
1453 }
1454 if (inp->inp_flags & INP_RECVTTL) {
1455 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl,
1456 sizeof(u_char), IP_RECVTTL, IPPROTO_IP);
1457 if (*mp)
1458 mp = &(*mp)->m_next;
1459 }
1460#ifdef notyet
1461 /* XXX
1462 * Moving these out of udp_input() made them even more broken
1463 * than they already were.
1464 */
1465 /* options were tossed already */
1466 if (inp->inp_flags & INP_RECVOPTS) {
1467 *mp = sbcreatecontrol((caddr_t) opts_deleted_above,
1468 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP);
1469 if (*mp)
1470 mp = &(*mp)->m_next;
1471 }
1472 /* ip_srcroute doesn't do what we want here, need to fix */
1473 if (inp->inp_flags & INP_RECVRETOPTS) {
1474 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m),
1475 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP);
1476 if (*mp)
1477 mp = &(*mp)->m_next;
1478 }
1479#endif
1480 if (inp->inp_flags & INP_RECVIF) {
1481 struct ifnet *ifp;
1482 struct sdlbuf {
1483 struct sockaddr_dl sdl;
1484 u_char pad[32];
1485 } sdlbuf;
1486 struct sockaddr_dl *sdp;
1487 struct sockaddr_dl *sdl2 = &sdlbuf.sdl;
1488
1489 if (((ifp = m->m_pkthdr.rcvif))
1490 && ( ifp->if_index && (ifp->if_index <= if_index))) {
1491 sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr;
1492 /*
1493 * Change our mind and don't try copy.
1494 */
1495 if ((sdp->sdl_family != AF_LINK)
1496 || (sdp->sdl_len > sizeof(sdlbuf))) {
1497 goto makedummy;
1498 }
1499 bcopy(sdp, sdl2, sdp->sdl_len);
1500 } else {
1501makedummy:
1502 sdl2->sdl_len
1503 = offsetof(struct sockaddr_dl, sdl_data[0]);
1504 sdl2->sdl_family = AF_LINK;
1505 sdl2->sdl_index = 0;
1506 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0;
1507 }
1508 *mp = sbcreatecontrol((caddr_t) sdl2, sdl2->sdl_len,
1509 IP_RECVIF, IPPROTO_IP);
1510 if (*mp)
1511 mp = &(*mp)->m_next;
1512 }
1513}
1514
1515/*
1516 * XXX these routines are called from the upper part of the kernel.
1517 * They need to be locked when we remove Giant.
1518 *
1519 * They could also be moved to ip_mroute.c, since all the RSVP
1520 * handling is done there already.
1521 */
1522static int ip_rsvp_on;
1523struct socket *ip_rsvpd;
1524int
1525ip_rsvp_init(struct socket *so)
1526{
1527 if (so->so_type != SOCK_RAW ||
1528 so->so_proto->pr_protocol != IPPROTO_RSVP)
1529 return EOPNOTSUPP;
1530
1531 if (ip_rsvpd != NULL)
1532 return EADDRINUSE;
1533
1534 ip_rsvpd = so;
1535 /*
1536 * This may seem silly, but we need to be sure we don't over-increment
1537 * the RSVP counter, in case something slips up.
1538 */
1539 if (!ip_rsvp_on) {
1540 ip_rsvp_on = 1;
1541 rsvp_on++;
1542 }
1543
1544 return 0;
1545}
1546
1547int
1548ip_rsvp_done(void)
1549{
1550 ip_rsvpd = NULL;
1551 /*
1552 * This may seem silly, but we need to be sure we don't over-decrement
1553 * the RSVP counter, in case something slips up.
1554 */
1555 if (ip_rsvp_on) {
1556 ip_rsvp_on = 0;
1557 rsvp_on--;
1558 }
1559 return 0;
1560}
1561
1562void
1563rsvp_input(struct mbuf *m, int off) /* XXX must fixup manually */
1564{
1565 if (rsvp_input_p) { /* call the real one if loaded */
1566 rsvp_input_p(m, off);
1567 return;
1568 }
1569
1570 /* Can still get packets with rsvp_on = 0 if there is a local member
1571 * of the group to which the RSVP packet is addressed. But in this
1572 * case we want to throw the packet away.
1573 */
1574
1575 if (!rsvp_on) {
1576 m_freem(m);
1577 return;
1578 }
1579
1580 if (ip_rsvpd != NULL) {
1581 rip_input(m, off);
1582 return;
1583 }
1584 /* Drop the packet */
1585 m_freem(m);
1586}
703static int
704sysctl_maxnipq(SYSCTL_HANDLER_ARGS)
705{
706 int error, i;
707
708 i = maxnipq;
709 error = sysctl_handle_int(oidp, &i, 0, req);
710 if (error || !req->newptr)
711 return (error);
712
713 /*
714 * XXXRW: Might be a good idea to sanity check the argument and place
715 * an extreme upper bound.
716 */
717 if (i < -1)
718 return (EINVAL);
719 maxnipq = i;
720 maxnipq_update();
721 return (0);
722}
723
724SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLTYPE_INT|CTLFLAG_RW,
725 NULL, 0, sysctl_maxnipq, "I",
726 "Maximum number of IPv4 fragment reassembly queue entries");
727
728/*
729 * Take incoming datagram fragment and try to reassemble it into
730 * whole datagram. If the argument is the first fragment or one
731 * in between the function will return NULL and store the mbuf
732 * in the fragment chain. If the argument is the last fragment
733 * the packet will be reassembled and the pointer to the new
734 * mbuf returned for further processing. Only m_tags attached
735 * to the first packet/fragment are preserved.
736 * The IP header is *NOT* adjusted out of iplen.
737 */
738
739struct mbuf *
740ip_reass(struct mbuf *m)
741{
742 struct ip *ip;
743 struct mbuf *p, *q, *nq, *t;
744 struct ipq *fp = NULL;
745 struct ipqhead *head;
746 int i, hlen, next;
747 u_int8_t ecn, ecn0;
748 u_short hash;
749
750 /* If maxnipq or maxfragsperpacket are 0, never accept fragments. */
751 if (maxnipq == 0 || maxfragsperpacket == 0) {
752 ipstat.ips_fragments++;
753 ipstat.ips_fragdropped++;
754 m_freem(m);
755 return (NULL);
756 }
757
758 ip = mtod(m, struct ip *);
759 hlen = ip->ip_hl << 2;
760
761 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
762 head = &ipq[hash];
763 IPQ_LOCK();
764
765 /*
766 * Look for queue of fragments
767 * of this datagram.
768 */
769 TAILQ_FOREACH(fp, head, ipq_list)
770 if (ip->ip_id == fp->ipq_id &&
771 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
772 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
773#ifdef MAC
774 mac_fragment_match(m, fp) &&
775#endif
776 ip->ip_p == fp->ipq_p)
777 goto found;
778
779 fp = NULL;
780
781 /*
782 * Attempt to trim the number of allocated fragment queues if it
783 * exceeds the administrative limit.
784 */
785 if ((nipq > maxnipq) && (maxnipq > 0)) {
786 /*
787 * drop something from the tail of the current queue
788 * before proceeding further
789 */
790 struct ipq *q = TAILQ_LAST(head, ipqhead);
791 if (q == NULL) { /* gak */
792 for (i = 0; i < IPREASS_NHASH; i++) {
793 struct ipq *r = TAILQ_LAST(&ipq[i], ipqhead);
794 if (r) {
795 ipstat.ips_fragtimeout += r->ipq_nfrags;
796 ip_freef(&ipq[i], r);
797 break;
798 }
799 }
800 } else {
801 ipstat.ips_fragtimeout += q->ipq_nfrags;
802 ip_freef(head, q);
803 }
804 }
805
806found:
807 /*
808 * Adjust ip_len to not reflect header,
809 * convert offset of this to bytes.
810 */
811 ip->ip_len -= hlen;
812 if (ip->ip_off & IP_MF) {
813 /*
814 * Make sure that fragments have a data length
815 * that's a non-zero multiple of 8 bytes.
816 */
817 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) {
818 ipstat.ips_toosmall++; /* XXX */
819 goto dropfrag;
820 }
821 m->m_flags |= M_FRAG;
822 } else
823 m->m_flags &= ~M_FRAG;
824 ip->ip_off <<= 3;
825
826
827 /*
828 * Attempt reassembly; if it succeeds, proceed.
829 * ip_reass() will return a different mbuf.
830 */
831 ipstat.ips_fragments++;
832 m->m_pkthdr.header = ip;
833
834 /* Previous ip_reass() started here. */
835 /*
836 * Presence of header sizes in mbufs
837 * would confuse code below.
838 */
839 m->m_data += hlen;
840 m->m_len -= hlen;
841
842 /*
843 * If first fragment to arrive, create a reassembly queue.
844 */
845 if (fp == NULL) {
846 fp = uma_zalloc(ipq_zone, M_NOWAIT);
847 if (fp == NULL)
848 goto dropfrag;
849#ifdef MAC
850 if (mac_init_ipq(fp, M_NOWAIT) != 0) {
851 uma_zfree(ipq_zone, fp);
852 goto dropfrag;
853 }
854 mac_create_ipq(m, fp);
855#endif
856 TAILQ_INSERT_HEAD(head, fp, ipq_list);
857 nipq++;
858 fp->ipq_nfrags = 1;
859 fp->ipq_ttl = IPFRAGTTL;
860 fp->ipq_p = ip->ip_p;
861 fp->ipq_id = ip->ip_id;
862 fp->ipq_src = ip->ip_src;
863 fp->ipq_dst = ip->ip_dst;
864 fp->ipq_frags = m;
865 m->m_nextpkt = NULL;
866 goto done;
867 } else {
868 fp->ipq_nfrags++;
869#ifdef MAC
870 mac_update_ipq(m, fp);
871#endif
872 }
873
874#define GETIP(m) ((struct ip*)((m)->m_pkthdr.header))
875
876 /*
877 * Handle ECN by comparing this segment with the first one;
878 * if CE is set, do not lose CE.
879 * drop if CE and not-ECT are mixed for the same packet.
880 */
881 ecn = ip->ip_tos & IPTOS_ECN_MASK;
882 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
883 if (ecn == IPTOS_ECN_CE) {
884 if (ecn0 == IPTOS_ECN_NOTECT)
885 goto dropfrag;
886 if (ecn0 != IPTOS_ECN_CE)
887 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
888 }
889 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT)
890 goto dropfrag;
891
892 /*
893 * Find a segment which begins after this one does.
894 */
895 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
896 if (GETIP(q)->ip_off > ip->ip_off)
897 break;
898
899 /*
900 * If there is a preceding segment, it may provide some of
901 * our data already. If so, drop the data from the incoming
902 * segment. If it provides all of our data, drop us, otherwise
903 * stick new segment in the proper place.
904 *
905 * If some of the data is dropped from the the preceding
906 * segment, then it's checksum is invalidated.
907 */
908 if (p) {
909 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
910 if (i > 0) {
911 if (i >= ip->ip_len)
912 goto dropfrag;
913 m_adj(m, i);
914 m->m_pkthdr.csum_flags = 0;
915 ip->ip_off += i;
916 ip->ip_len -= i;
917 }
918 m->m_nextpkt = p->m_nextpkt;
919 p->m_nextpkt = m;
920 } else {
921 m->m_nextpkt = fp->ipq_frags;
922 fp->ipq_frags = m;
923 }
924
925 /*
926 * While we overlap succeeding segments trim them or,
927 * if they are completely covered, dequeue them.
928 */
929 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
930 q = nq) {
931 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
932 if (i < GETIP(q)->ip_len) {
933 GETIP(q)->ip_len -= i;
934 GETIP(q)->ip_off += i;
935 m_adj(q, i);
936 q->m_pkthdr.csum_flags = 0;
937 break;
938 }
939 nq = q->m_nextpkt;
940 m->m_nextpkt = nq;
941 ipstat.ips_fragdropped++;
942 fp->ipq_nfrags--;
943 m_freem(q);
944 }
945
946 /*
947 * Check for complete reassembly and perform frag per packet
948 * limiting.
949 *
950 * Frag limiting is performed here so that the nth frag has
951 * a chance to complete the packet before we drop the packet.
952 * As a result, n+1 frags are actually allowed per packet, but
953 * only n will ever be stored. (n = maxfragsperpacket.)
954 *
955 */
956 next = 0;
957 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
958 if (GETIP(q)->ip_off != next) {
959 if (fp->ipq_nfrags > maxfragsperpacket) {
960 ipstat.ips_fragdropped += fp->ipq_nfrags;
961 ip_freef(head, fp);
962 }
963 goto done;
964 }
965 next += GETIP(q)->ip_len;
966 }
967 /* Make sure the last packet didn't have the IP_MF flag */
968 if (p->m_flags & M_FRAG) {
969 if (fp->ipq_nfrags > maxfragsperpacket) {
970 ipstat.ips_fragdropped += fp->ipq_nfrags;
971 ip_freef(head, fp);
972 }
973 goto done;
974 }
975
976 /*
977 * Reassembly is complete. Make sure the packet is a sane size.
978 */
979 q = fp->ipq_frags;
980 ip = GETIP(q);
981 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) {
982 ipstat.ips_toolong++;
983 ipstat.ips_fragdropped += fp->ipq_nfrags;
984 ip_freef(head, fp);
985 goto done;
986 }
987
988 /*
989 * Concatenate fragments.
990 */
991 m = q;
992 t = m->m_next;
993 m->m_next = NULL;
994 m_cat(m, t);
995 nq = q->m_nextpkt;
996 q->m_nextpkt = NULL;
997 for (q = nq; q != NULL; q = nq) {
998 nq = q->m_nextpkt;
999 q->m_nextpkt = NULL;
1000 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags;
1001 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data;
1002 m_cat(m, q);
1003 }
1004 /*
1005 * In order to do checksumming faster we do 'end-around carry' here
1006 * (and not in for{} loop), though it implies we are not going to
1007 * reassemble more than 64k fragments.
1008 */
1009 m->m_pkthdr.csum_data =
1010 (m->m_pkthdr.csum_data & 0xffff) + (m->m_pkthdr.csum_data >> 16);
1011#ifdef MAC
1012 mac_create_datagram_from_ipq(fp, m);
1013 mac_destroy_ipq(fp);
1014#endif
1015
1016 /*
1017 * Create header for new ip packet by modifying header of first
1018 * packet; dequeue and discard fragment reassembly header.
1019 * Make header visible.
1020 */
1021 ip->ip_len = (ip->ip_hl << 2) + next;
1022 ip->ip_src = fp->ipq_src;
1023 ip->ip_dst = fp->ipq_dst;
1024 TAILQ_REMOVE(head, fp, ipq_list);
1025 nipq--;
1026 uma_zfree(ipq_zone, fp);
1027 m->m_len += (ip->ip_hl << 2);
1028 m->m_data -= (ip->ip_hl << 2);
1029 /* some debugging cruft by sklower, below, will go away soon */
1030 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */
1031 m_fixhdr(m);
1032 ipstat.ips_reassembled++;
1033 IPQ_UNLOCK();
1034 return (m);
1035
1036dropfrag:
1037 ipstat.ips_fragdropped++;
1038 if (fp != NULL)
1039 fp->ipq_nfrags--;
1040 m_freem(m);
1041done:
1042 IPQ_UNLOCK();
1043 return (NULL);
1044
1045#undef GETIP
1046}
1047
1048/*
1049 * Free a fragment reassembly header and all
1050 * associated datagrams.
1051 */
1052static void
1053ip_freef(fhp, fp)
1054 struct ipqhead *fhp;
1055 struct ipq *fp;
1056{
1057 register struct mbuf *q;
1058
1059 IPQ_LOCK_ASSERT();
1060
1061 while (fp->ipq_frags) {
1062 q = fp->ipq_frags;
1063 fp->ipq_frags = q->m_nextpkt;
1064 m_freem(q);
1065 }
1066 TAILQ_REMOVE(fhp, fp, ipq_list);
1067 uma_zfree(ipq_zone, fp);
1068 nipq--;
1069}
1070
1071/*
1072 * IP timer processing;
1073 * if a timer expires on a reassembly
1074 * queue, discard it.
1075 */
1076void
1077ip_slowtimo()
1078{
1079 register struct ipq *fp;
1080 int i;
1081
1082 IPQ_LOCK();
1083 for (i = 0; i < IPREASS_NHASH; i++) {
1084 for(fp = TAILQ_FIRST(&ipq[i]); fp;) {
1085 struct ipq *fpp;
1086
1087 fpp = fp;
1088 fp = TAILQ_NEXT(fp, ipq_list);
1089 if(--fpp->ipq_ttl == 0) {
1090 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
1091 ip_freef(&ipq[i], fpp);
1092 }
1093 }
1094 }
1095 /*
1096 * If we are over the maximum number of fragments
1097 * (due to the limit being lowered), drain off
1098 * enough to get down to the new limit.
1099 */
1100 if (maxnipq >= 0 && nipq > maxnipq) {
1101 for (i = 0; i < IPREASS_NHASH; i++) {
1102 while (nipq > maxnipq && !TAILQ_EMPTY(&ipq[i])) {
1103 ipstat.ips_fragdropped +=
1104 TAILQ_FIRST(&ipq[i])->ipq_nfrags;
1105 ip_freef(&ipq[i], TAILQ_FIRST(&ipq[i]));
1106 }
1107 }
1108 }
1109 IPQ_UNLOCK();
1110}
1111
1112/*
1113 * Drain off all datagram fragments.
1114 */
1115void
1116ip_drain()
1117{
1118 int i;
1119
1120 IPQ_LOCK();
1121 for (i = 0; i < IPREASS_NHASH; i++) {
1122 while(!TAILQ_EMPTY(&ipq[i])) {
1123 ipstat.ips_fragdropped +=
1124 TAILQ_FIRST(&ipq[i])->ipq_nfrags;
1125 ip_freef(&ipq[i], TAILQ_FIRST(&ipq[i]));
1126 }
1127 }
1128 IPQ_UNLOCK();
1129 in_rtqdrain();
1130}
1131
1132/*
1133 * The protocol to be inserted into ip_protox[] must be already registered
1134 * in inetsw[], either statically or through pf_proto_register().
1135 */
1136int
1137ipproto_register(u_char ipproto)
1138{
1139 struct protosw *pr;
1140
1141 /* Sanity checks. */
1142 if (ipproto == 0)
1143 return (EPROTONOSUPPORT);
1144
1145 /*
1146 * The protocol slot must not be occupied by another protocol
1147 * already. An index pointing to IPPROTO_RAW is unused.
1148 */
1149 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
1150 if (pr == NULL)
1151 return (EPFNOSUPPORT);
1152 if (ip_protox[ipproto] != pr - inetsw) /* IPPROTO_RAW */
1153 return (EEXIST);
1154
1155 /* Find the protocol position in inetsw[] and set the index. */
1156 for (pr = inetdomain.dom_protosw;
1157 pr < inetdomain.dom_protoswNPROTOSW; pr++) {
1158 if (pr->pr_domain->dom_family == PF_INET &&
1159 pr->pr_protocol && pr->pr_protocol == ipproto) {
1160 /* Be careful to only index valid IP protocols. */
1161 if (pr->pr_protocol < IPPROTO_MAX) {
1162 ip_protox[pr->pr_protocol] = pr - inetsw;
1163 return (0);
1164 } else
1165 return (EINVAL);
1166 }
1167 }
1168 return (EPROTONOSUPPORT);
1169}
1170
1171int
1172ipproto_unregister(u_char ipproto)
1173{
1174 struct protosw *pr;
1175
1176 /* Sanity checks. */
1177 if (ipproto == 0)
1178 return (EPROTONOSUPPORT);
1179
1180 /* Check if the protocol was indeed registered. */
1181 pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
1182 if (pr == NULL)
1183 return (EPFNOSUPPORT);
1184 if (ip_protox[ipproto] == pr - inetsw) /* IPPROTO_RAW */
1185 return (ENOENT);
1186
1187 /* Reset the protocol slot to IPPROTO_RAW. */
1188 ip_protox[ipproto] = pr - inetsw;
1189 return (0);
1190}
1191
1192/*
1193 * Given address of next destination (final or next hop),
1194 * return internet address info of interface to be used to get there.
1195 */
1196struct in_ifaddr *
1197ip_rtaddr(dst)
1198 struct in_addr dst;
1199{
1200 struct route sro;
1201 struct sockaddr_in *sin;
1202 struct in_ifaddr *ifa;
1203
1204 bzero(&sro, sizeof(sro));
1205 sin = (struct sockaddr_in *)&sro.ro_dst;
1206 sin->sin_family = AF_INET;
1207 sin->sin_len = sizeof(*sin);
1208 sin->sin_addr = dst;
1209 rtalloc_ign(&sro, RTF_CLONING);
1210
1211 if (sro.ro_rt == NULL)
1212 return (NULL);
1213
1214 ifa = ifatoia(sro.ro_rt->rt_ifa);
1215 RTFREE(sro.ro_rt);
1216 return (ifa);
1217}
1218
1219u_char inetctlerrmap[PRC_NCMDS] = {
1220 0, 0, 0, 0,
1221 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH,
1222 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED,
1223 EMSGSIZE, EHOSTUNREACH, 0, 0,
1224 0, 0, EHOSTUNREACH, 0,
1225 ENOPROTOOPT, ECONNREFUSED
1226};
1227
1228/*
1229 * Forward a packet. If some error occurs return the sender
1230 * an icmp packet. Note we can't always generate a meaningful
1231 * icmp message because icmp doesn't have a large enough repertoire
1232 * of codes and types.
1233 *
1234 * If not forwarding, just drop the packet. This could be confusing
1235 * if ipforwarding was zero but some routing protocol was advancing
1236 * us as a gateway to somewhere. However, we must let the routing
1237 * protocol deal with that.
1238 *
1239 * The srcrt parameter indicates whether the packet is being forwarded
1240 * via a source route.
1241 */
1242void
1243ip_forward(struct mbuf *m, int srcrt)
1244{
1245 struct ip *ip = mtod(m, struct ip *);
1246 struct in_ifaddr *ia = NULL;
1247 struct mbuf *mcopy;
1248 struct in_addr dest;
1249 int error, type = 0, code = 0, mtu = 0;
1250
1251 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(ip->ip_dst) == 0) {
1252 ipstat.ips_cantforward++;
1253 m_freem(m);
1254 return;
1255 }
1256#ifdef IPSTEALTH
1257 if (!ipstealth) {
1258#endif
1259 if (ip->ip_ttl <= IPTTLDEC) {
1260 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS,
1261 0, 0);
1262 return;
1263 }
1264#ifdef IPSTEALTH
1265 }
1266#endif
1267
1268 if (!srcrt && (ia = ip_rtaddr(ip->ip_dst)) == NULL) {
1269 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, 0, 0);
1270 return;
1271 }
1272
1273 /*
1274 * Save the IP header and at most 8 bytes of the payload,
1275 * in case we need to generate an ICMP message to the src.
1276 *
1277 * XXX this can be optimized a lot by saving the data in a local
1278 * buffer on the stack (72 bytes at most), and only allocating the
1279 * mbuf if really necessary. The vast majority of the packets
1280 * are forwarded without having to send an ICMP back (either
1281 * because unnecessary, or because rate limited), so we are
1282 * really we are wasting a lot of work here.
1283 *
1284 * We don't use m_copy() because it might return a reference
1285 * to a shared cluster. Both this function and ip_output()
1286 * assume exclusive access to the IP header in `m', so any
1287 * data in a cluster may change before we reach icmp_error().
1288 */
1289 MGETHDR(mcopy, M_DONTWAIT, m->m_type);
1290 if (mcopy != NULL && !m_dup_pkthdr(mcopy, m, M_DONTWAIT)) {
1291 /*
1292 * It's probably ok if the pkthdr dup fails (because
1293 * the deep copy of the tag chain failed), but for now
1294 * be conservative and just discard the copy since
1295 * code below may some day want the tags.
1296 */
1297 m_free(mcopy);
1298 mcopy = NULL;
1299 }
1300 if (mcopy != NULL) {
1301 mcopy->m_len = min(ip->ip_len, M_TRAILINGSPACE(mcopy));
1302 mcopy->m_pkthdr.len = mcopy->m_len;
1303 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t));
1304 }
1305
1306#ifdef IPSTEALTH
1307 if (!ipstealth) {
1308#endif
1309 ip->ip_ttl -= IPTTLDEC;
1310#ifdef IPSTEALTH
1311 }
1312#endif
1313
1314 /*
1315 * If forwarding packet using same interface that it came in on,
1316 * perhaps should send a redirect to sender to shortcut a hop.
1317 * Only send redirect if source is sending directly to us,
1318 * and if packet was not source routed (or has any options).
1319 * Also, don't send redirect if forwarding using a default route
1320 * or a route modified by a redirect.
1321 */
1322 dest.s_addr = 0;
1323 if (!srcrt && ipsendredirects && ia->ia_ifp == m->m_pkthdr.rcvif) {
1324 struct sockaddr_in *sin;
1325 struct route ro;
1326 struct rtentry *rt;
1327
1328 bzero(&ro, sizeof(ro));
1329 sin = (struct sockaddr_in *)&ro.ro_dst;
1330 sin->sin_family = AF_INET;
1331 sin->sin_len = sizeof(*sin);
1332 sin->sin_addr = ip->ip_dst;
1333 rtalloc_ign(&ro, RTF_CLONING);
1334
1335 rt = ro.ro_rt;
1336
1337 if (rt && (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 &&
1338 satosin(rt_key(rt))->sin_addr.s_addr != 0) {
1339#define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa))
1340 u_long src = ntohl(ip->ip_src.s_addr);
1341
1342 if (RTA(rt) &&
1343 (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) {
1344 if (rt->rt_flags & RTF_GATEWAY)
1345 dest.s_addr = satosin(rt->rt_gateway)->sin_addr.s_addr;
1346 else
1347 dest.s_addr = ip->ip_dst.s_addr;
1348 /* Router requirements says to only send host redirects */
1349 type = ICMP_REDIRECT;
1350 code = ICMP_REDIRECT_HOST;
1351 }
1352 }
1353 if (rt)
1354 RTFREE(rt);
1355 }
1356
1357 error = ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
1358 if (error)
1359 ipstat.ips_cantforward++;
1360 else {
1361 ipstat.ips_forward++;
1362 if (type)
1363 ipstat.ips_redirectsent++;
1364 else {
1365 if (mcopy)
1366 m_freem(mcopy);
1367 return;
1368 }
1369 }
1370 if (mcopy == NULL)
1371 return;
1372
1373 switch (error) {
1374
1375 case 0: /* forwarded, but need redirect */
1376 /* type, code set above */
1377 break;
1378
1379 case ENETUNREACH: /* shouldn't happen, checked above */
1380 case EHOSTUNREACH:
1381 case ENETDOWN:
1382 case EHOSTDOWN:
1383 default:
1384 type = ICMP_UNREACH;
1385 code = ICMP_UNREACH_HOST;
1386 break;
1387
1388 case EMSGSIZE:
1389 type = ICMP_UNREACH;
1390 code = ICMP_UNREACH_NEEDFRAG;
1391
1392#if defined(IPSEC) || defined(FAST_IPSEC)
1393 mtu = ip_ipsec_mtu(m);
1394#endif /* IPSEC */
1395 /*
1396 * If the MTU wasn't set before use the interface mtu or
1397 * fall back to the next smaller mtu step compared to the
1398 * current packet size.
1399 */
1400 if (mtu == 0) {
1401 if (ia != NULL)
1402 mtu = ia->ia_ifp->if_mtu;
1403 else
1404 mtu = ip_next_mtu(ip->ip_len, 0);
1405 }
1406 ipstat.ips_cantfrag++;
1407 break;
1408
1409 case ENOBUFS:
1410 /*
1411 * A router should not generate ICMP_SOURCEQUENCH as
1412 * required in RFC1812 Requirements for IP Version 4 Routers.
1413 * Source quench could be a big problem under DoS attacks,
1414 * or if the underlying interface is rate-limited.
1415 * Those who need source quench packets may re-enable them
1416 * via the net.inet.ip.sendsourcequench sysctl.
1417 */
1418 if (ip_sendsourcequench == 0) {
1419 m_freem(mcopy);
1420 return;
1421 } else {
1422 type = ICMP_SOURCEQUENCH;
1423 code = 0;
1424 }
1425 break;
1426
1427 case EACCES: /* ipfw denied packet */
1428 m_freem(mcopy);
1429 return;
1430 }
1431 icmp_error(mcopy, type, code, dest.s_addr, mtu);
1432}
1433
1434void
1435ip_savecontrol(inp, mp, ip, m)
1436 register struct inpcb *inp;
1437 register struct mbuf **mp;
1438 register struct ip *ip;
1439 register struct mbuf *m;
1440{
1441 if (inp->inp_socket->so_options & (SO_BINTIME | SO_TIMESTAMP)) {
1442 struct bintime bt;
1443
1444 bintime(&bt);
1445 if (inp->inp_socket->so_options & SO_BINTIME) {
1446 *mp = sbcreatecontrol((caddr_t) &bt, sizeof(bt),
1447 SCM_BINTIME, SOL_SOCKET);
1448 if (*mp)
1449 mp = &(*mp)->m_next;
1450 }
1451 if (inp->inp_socket->so_options & SO_TIMESTAMP) {
1452 struct timeval tv;
1453
1454 bintime2timeval(&bt, &tv);
1455 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv),
1456 SCM_TIMESTAMP, SOL_SOCKET);
1457 if (*mp)
1458 mp = &(*mp)->m_next;
1459 }
1460 }
1461 if (inp->inp_flags & INP_RECVDSTADDR) {
1462 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst,
1463 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP);
1464 if (*mp)
1465 mp = &(*mp)->m_next;
1466 }
1467 if (inp->inp_flags & INP_RECVTTL) {
1468 *mp = sbcreatecontrol((caddr_t) &ip->ip_ttl,
1469 sizeof(u_char), IP_RECVTTL, IPPROTO_IP);
1470 if (*mp)
1471 mp = &(*mp)->m_next;
1472 }
1473#ifdef notyet
1474 /* XXX
1475 * Moving these out of udp_input() made them even more broken
1476 * than they already were.
1477 */
1478 /* options were tossed already */
1479 if (inp->inp_flags & INP_RECVOPTS) {
1480 *mp = sbcreatecontrol((caddr_t) opts_deleted_above,
1481 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP);
1482 if (*mp)
1483 mp = &(*mp)->m_next;
1484 }
1485 /* ip_srcroute doesn't do what we want here, need to fix */
1486 if (inp->inp_flags & INP_RECVRETOPTS) {
1487 *mp = sbcreatecontrol((caddr_t) ip_srcroute(m),
1488 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP);
1489 if (*mp)
1490 mp = &(*mp)->m_next;
1491 }
1492#endif
1493 if (inp->inp_flags & INP_RECVIF) {
1494 struct ifnet *ifp;
1495 struct sdlbuf {
1496 struct sockaddr_dl sdl;
1497 u_char pad[32];
1498 } sdlbuf;
1499 struct sockaddr_dl *sdp;
1500 struct sockaddr_dl *sdl2 = &sdlbuf.sdl;
1501
1502 if (((ifp = m->m_pkthdr.rcvif))
1503 && ( ifp->if_index && (ifp->if_index <= if_index))) {
1504 sdp = (struct sockaddr_dl *)ifp->if_addr->ifa_addr;
1505 /*
1506 * Change our mind and don't try copy.
1507 */
1508 if ((sdp->sdl_family != AF_LINK)
1509 || (sdp->sdl_len > sizeof(sdlbuf))) {
1510 goto makedummy;
1511 }
1512 bcopy(sdp, sdl2, sdp->sdl_len);
1513 } else {
1514makedummy:
1515 sdl2->sdl_len
1516 = offsetof(struct sockaddr_dl, sdl_data[0]);
1517 sdl2->sdl_family = AF_LINK;
1518 sdl2->sdl_index = 0;
1519 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0;
1520 }
1521 *mp = sbcreatecontrol((caddr_t) sdl2, sdl2->sdl_len,
1522 IP_RECVIF, IPPROTO_IP);
1523 if (*mp)
1524 mp = &(*mp)->m_next;
1525 }
1526}
1527
1528/*
1529 * XXX these routines are called from the upper part of the kernel.
1530 * They need to be locked when we remove Giant.
1531 *
1532 * They could also be moved to ip_mroute.c, since all the RSVP
1533 * handling is done there already.
1534 */
1535static int ip_rsvp_on;
1536struct socket *ip_rsvpd;
1537int
1538ip_rsvp_init(struct socket *so)
1539{
1540 if (so->so_type != SOCK_RAW ||
1541 so->so_proto->pr_protocol != IPPROTO_RSVP)
1542 return EOPNOTSUPP;
1543
1544 if (ip_rsvpd != NULL)
1545 return EADDRINUSE;
1546
1547 ip_rsvpd = so;
1548 /*
1549 * This may seem silly, but we need to be sure we don't over-increment
1550 * the RSVP counter, in case something slips up.
1551 */
1552 if (!ip_rsvp_on) {
1553 ip_rsvp_on = 1;
1554 rsvp_on++;
1555 }
1556
1557 return 0;
1558}
1559
1560int
1561ip_rsvp_done(void)
1562{
1563 ip_rsvpd = NULL;
1564 /*
1565 * This may seem silly, but we need to be sure we don't over-decrement
1566 * the RSVP counter, in case something slips up.
1567 */
1568 if (ip_rsvp_on) {
1569 ip_rsvp_on = 0;
1570 rsvp_on--;
1571 }
1572 return 0;
1573}
1574
1575void
1576rsvp_input(struct mbuf *m, int off) /* XXX must fixup manually */
1577{
1578 if (rsvp_input_p) { /* call the real one if loaded */
1579 rsvp_input_p(m, off);
1580 return;
1581 }
1582
1583 /* Can still get packets with rsvp_on = 0 if there is a local member
1584 * of the group to which the RSVP packet is addressed. But in this
1585 * case we want to throw the packet away.
1586 */
1587
1588 if (!rsvp_on) {
1589 m_freem(m);
1590 return;
1591 }
1592
1593 if (ip_rsvpd != NULL) {
1594 rip_input(m, off);
1595 return;
1596 }
1597 /* Drop the packet */
1598 m_freem(m);
1599}