Deleted Added
full compact
udp_usrreq.c (205251) udp_usrreq.c (207369)
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California.
4 * Copyright (c) 2008 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California.
4 * Copyright (c) 2008 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/netinet/udp_usrreq.c 205251 2010-03-17 18:28:27Z bz $");
35__FBSDID("$FreeBSD: head/sys/netinet/udp_usrreq.c 207369 2010-04-29 11:52:42Z bz $");
36
37#include "opt_ipfw.h"
38#include "opt_inet6.h"
39#include "opt_ipsec.h"
40
41#include <sys/param.h>
42#include <sys/domain.h>
43#include <sys/eventhandler.h>
44#include <sys/jail.h>
45#include <sys/kernel.h>
46#include <sys/lock.h>
47#include <sys/malloc.h>
48#include <sys/mbuf.h>
49#include <sys/priv.h>
50#include <sys/proc.h>
51#include <sys/protosw.h>
52#include <sys/signalvar.h>
53#include <sys/socket.h>
54#include <sys/socketvar.h>
55#include <sys/sx.h>
56#include <sys/sysctl.h>
57#include <sys/syslog.h>
58#include <sys/systm.h>
59
60#include <vm/uma.h>
61
62#include <net/if.h>
63#include <net/route.h>
64
65#include <netinet/in.h>
66#include <netinet/in_pcb.h>
67#include <netinet/in_systm.h>
68#include <netinet/in_var.h>
69#include <netinet/ip.h>
70#ifdef INET6
71#include <netinet/ip6.h>
72#endif
73#include <netinet/ip_icmp.h>
74#include <netinet/icmp_var.h>
75#include <netinet/ip_var.h>
76#include <netinet/ip_options.h>
77#ifdef INET6
78#include <netinet6/ip6_var.h>
79#endif
80#include <netinet/udp.h>
81#include <netinet/udp_var.h>
82
83#ifdef IPSEC
84#include <netipsec/ipsec.h>
85#include <netipsec/esp.h>
86#endif
87
88#include <machine/in_cksum.h>
89
90#include <security/mac/mac_framework.h>
91
92/*
93 * UDP protocol implementation.
94 * Per RFC 768, August, 1980.
95 */
96
36
37#include "opt_ipfw.h"
38#include "opt_inet6.h"
39#include "opt_ipsec.h"
40
41#include <sys/param.h>
42#include <sys/domain.h>
43#include <sys/eventhandler.h>
44#include <sys/jail.h>
45#include <sys/kernel.h>
46#include <sys/lock.h>
47#include <sys/malloc.h>
48#include <sys/mbuf.h>
49#include <sys/priv.h>
50#include <sys/proc.h>
51#include <sys/protosw.h>
52#include <sys/signalvar.h>
53#include <sys/socket.h>
54#include <sys/socketvar.h>
55#include <sys/sx.h>
56#include <sys/sysctl.h>
57#include <sys/syslog.h>
58#include <sys/systm.h>
59
60#include <vm/uma.h>
61
62#include <net/if.h>
63#include <net/route.h>
64
65#include <netinet/in.h>
66#include <netinet/in_pcb.h>
67#include <netinet/in_systm.h>
68#include <netinet/in_var.h>
69#include <netinet/ip.h>
70#ifdef INET6
71#include <netinet/ip6.h>
72#endif
73#include <netinet/ip_icmp.h>
74#include <netinet/icmp_var.h>
75#include <netinet/ip_var.h>
76#include <netinet/ip_options.h>
77#ifdef INET6
78#include <netinet6/ip6_var.h>
79#endif
80#include <netinet/udp.h>
81#include <netinet/udp_var.h>
82
83#ifdef IPSEC
84#include <netipsec/ipsec.h>
85#include <netipsec/esp.h>
86#endif
87
88#include <machine/in_cksum.h>
89
90#include <security/mac/mac_framework.h>
91
92/*
93 * UDP protocol implementation.
94 * Per RFC 768, August, 1980.
95 */
96
97VNET_DEFINE(int, udp_blackhole);
98
99/*
100 * BSD 4.2 defaulted the udp checksum to be off. Turning off udp checksums
101 * removes the only data integrity mechanism for packets and malformed
102 * packets that would otherwise be discarded due to bad checksums, and may
103 * cause problems (especially for NFS data blocks).
104 */
105static int udp_cksum = 1;
106SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW, &udp_cksum,
107 0, "compute udp checksum");
108
109int udp_log_in_vain = 0;
110SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
111 &udp_log_in_vain, 0, "Log all incoming UDP packets");
112
97/*
98 * BSD 4.2 defaulted the udp checksum to be off. Turning off udp checksums
99 * removes the only data integrity mechanism for packets and malformed
100 * packets that would otherwise be discarded due to bad checksums, and may
101 * cause problems (especially for NFS data blocks).
102 */
103static int udp_cksum = 1;
104SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW, &udp_cksum,
105 0, "compute udp checksum");
106
107int udp_log_in_vain = 0;
108SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
109 &udp_log_in_vain, 0, "Log all incoming UDP packets");
110
111VNET_DEFINE(int, udp_blackhole) = 0;
113SYSCTL_VNET_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW,
114 &VNET_NAME(udp_blackhole), 0,
115 "Do not send port unreachables for refused connects");
116
117u_long udp_sendspace = 9216; /* really max datagram size */
118 /* 40 1K datagrams */
119SYSCTL_ULONG(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW,
120 &udp_sendspace, 0, "Maximum outgoing UDP datagram size");
121
122u_long udp_recvspace = 40 * (1024 +
123#ifdef INET6
124 sizeof(struct sockaddr_in6)
125#else
126 sizeof(struct sockaddr_in)
127#endif
128 );
129
130SYSCTL_ULONG(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
131 &udp_recvspace, 0, "Maximum space for incoming UDP datagrams");
132
133VNET_DEFINE(struct inpcbhead, udb); /* from udp_var.h */
134VNET_DEFINE(struct inpcbinfo, udbinfo);
135static VNET_DEFINE(uma_zone_t, udpcb_zone);
112SYSCTL_VNET_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW,
113 &VNET_NAME(udp_blackhole), 0,
114 "Do not send port unreachables for refused connects");
115
116u_long udp_sendspace = 9216; /* really max datagram size */
117 /* 40 1K datagrams */
118SYSCTL_ULONG(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW,
119 &udp_sendspace, 0, "Maximum outgoing UDP datagram size");
120
121u_long udp_recvspace = 40 * (1024 +
122#ifdef INET6
123 sizeof(struct sockaddr_in6)
124#else
125 sizeof(struct sockaddr_in)
126#endif
127 );
128
129SYSCTL_ULONG(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
130 &udp_recvspace, 0, "Maximum space for incoming UDP datagrams");
131
132VNET_DEFINE(struct inpcbhead, udb); /* from udp_var.h */
133VNET_DEFINE(struct inpcbinfo, udbinfo);
134static VNET_DEFINE(uma_zone_t, udpcb_zone);
136VNET_DEFINE(struct udpstat, udpstat); /* from udp_var.h */
137
138#define V_udpcb_zone VNET(udpcb_zone)
139
140#ifndef UDBHASHSIZE
141#define UDBHASHSIZE 128
142#endif
143
135#define V_udpcb_zone VNET(udpcb_zone)
136
137#ifndef UDBHASHSIZE
138#define UDBHASHSIZE 128
139#endif
140
141VNET_DEFINE(struct udpstat, udpstat); /* from udp_var.h */
144SYSCTL_VNET_STRUCT(_net_inet_udp, UDPCTL_STATS, stats, CTLFLAG_RW,
145 &VNET_NAME(udpstat), udpstat,
146 "UDP statistics (struct udpstat, netinet/udp_var.h)");
147
148static void udp_detach(struct socket *so);
149static int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
150 struct mbuf *, struct thread *);
151#ifdef IPSEC
152#ifdef IPSEC_NAT_T
153#define UF_ESPINUDP_ALL (UF_ESPINUDP_NON_IKE|UF_ESPINUDP)
154#ifdef INET
155static struct mbuf *udp4_espdecap(struct inpcb *, struct mbuf *, int);
156#endif
157#endif /* IPSEC_NAT_T */
158#endif /* IPSEC */
159
160static void
161udp_zone_change(void *tag)
162{
163
164 uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets);
165 uma_zone_set_max(V_udpcb_zone, maxsockets);
166}
167
168static int
169udp_inpcb_init(void *mem, int size, int flags)
170{
171 struct inpcb *inp;
172
173 inp = mem;
174 INP_LOCK_INIT(inp, "inp", "udpinp");
175 return (0);
176}
177
178void
179udp_init(void)
180{
181
142SYSCTL_VNET_STRUCT(_net_inet_udp, UDPCTL_STATS, stats, CTLFLAG_RW,
143 &VNET_NAME(udpstat), udpstat,
144 "UDP statistics (struct udpstat, netinet/udp_var.h)");
145
146static void udp_detach(struct socket *so);
147static int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
148 struct mbuf *, struct thread *);
149#ifdef IPSEC
150#ifdef IPSEC_NAT_T
151#define UF_ESPINUDP_ALL (UF_ESPINUDP_NON_IKE|UF_ESPINUDP)
152#ifdef INET
153static struct mbuf *udp4_espdecap(struct inpcb *, struct mbuf *, int);
154#endif
155#endif /* IPSEC_NAT_T */
156#endif /* IPSEC */
157
158static void
159udp_zone_change(void *tag)
160{
161
162 uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets);
163 uma_zone_set_max(V_udpcb_zone, maxsockets);
164}
165
166static int
167udp_inpcb_init(void *mem, int size, int flags)
168{
169 struct inpcb *inp;
170
171 inp = mem;
172 INP_LOCK_INIT(inp, "inp", "udpinp");
173 return (0);
174}
175
176void
177udp_init(void)
178{
179
182 V_udp_blackhole = 0;
183 in_pcbinfo_init(&V_udbinfo, "udp", &V_udb, UDBHASHSIZE, UDBHASHSIZE,
184 "udp_inpcb", udp_inpcb_init, NULL, UMA_ZONE_NOFREE);
185 V_udpcb_zone = uma_zcreate("udpcb", sizeof(struct udpcb),
186 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
187 uma_zone_set_max(V_udpcb_zone, maxsockets);
188 EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL,
189 EVENTHANDLER_PRI_ANY);
190}
191
192/*
193 * Kernel module interface for updating udpstat. The argument is an index
194 * into udpstat treated as an array of u_long. While this encodes the
195 * general layout of udpstat into the caller, it doesn't encode its location,
196 * so that future changes to add, for example, per-CPU stats support won't
197 * cause binary compatibility problems for kernel modules.
198 */
199void
200kmod_udpstat_inc(int statnum)
201{
202
203 (*((u_long *)&V_udpstat + statnum))++;
204}
205
206int
207udp_newudpcb(struct inpcb *inp)
208{
209 struct udpcb *up;
210
211 up = uma_zalloc(V_udpcb_zone, M_NOWAIT | M_ZERO);
212 if (up == NULL)
213 return (ENOBUFS);
214 inp->inp_ppcb = up;
215 return (0);
216}
217
218void
219udp_discardcb(struct udpcb *up)
220{
221
222 uma_zfree(V_udpcb_zone, up);
223}
224
225#ifdef VIMAGE
226void
227udp_destroy(void)
228{
229
230 in_pcbinfo_destroy(&V_udbinfo);
231 uma_zdestroy(V_udpcb_zone);
232}
233#endif
234
235/*
236 * Subroutine of udp_input(), which appends the provided mbuf chain to the
237 * passed pcb/socket. The caller must provide a sockaddr_in via udp_in that
238 * contains the source address. If the socket ends up being an IPv6 socket,
239 * udp_append() will convert to a sockaddr_in6 before passing the address
240 * into the socket code.
241 */
242static void
243udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
244 struct sockaddr_in *udp_in)
245{
246 struct sockaddr *append_sa;
247 struct socket *so;
248 struct mbuf *opts = 0;
249#ifdef INET6
250 struct sockaddr_in6 udp_in6;
251#endif
252#ifdef IPSEC
253#ifdef IPSEC_NAT_T
254#ifdef INET
255 struct udpcb *up;
256#endif
257#endif
258#endif
259
260 INP_RLOCK_ASSERT(inp);
261
262#ifdef IPSEC
263 /* Check AH/ESP integrity. */
264 if (ipsec4_in_reject(n, inp)) {
265 m_freem(n);
266 V_ipsec4stat.in_polvio++;
267 return;
268 }
269#ifdef IPSEC_NAT_T
270#ifdef INET
271 up = intoudpcb(inp);
272 KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
273 if (up->u_flags & UF_ESPINUDP_ALL) { /* IPSec UDP encaps. */
274 n = udp4_espdecap(inp, n, off);
275 if (n == NULL) /* Consumed. */
276 return;
277 }
278#endif /* INET */
279#endif /* IPSEC_NAT_T */
280#endif /* IPSEC */
281#ifdef MAC
282 if (mac_inpcb_check_deliver(inp, n) != 0) {
283 m_freem(n);
284 return;
285 }
286#endif
287 if (inp->inp_flags & INP_CONTROLOPTS ||
288 inp->inp_socket->so_options & (SO_TIMESTAMP | SO_BINTIME)) {
289#ifdef INET6
290 if (inp->inp_vflag & INP_IPV6)
291 (void)ip6_savecontrol_v4(inp, n, &opts, NULL);
292 else
293#endif
294 ip_savecontrol(inp, &opts, ip, n);
295 }
296#ifdef INET6
297 if (inp->inp_vflag & INP_IPV6) {
298 bzero(&udp_in6, sizeof(udp_in6));
299 udp_in6.sin6_len = sizeof(udp_in6);
300 udp_in6.sin6_family = AF_INET6;
301 in6_sin_2_v4mapsin6(udp_in, &udp_in6);
302 append_sa = (struct sockaddr *)&udp_in6;
303 } else
304#endif
305 append_sa = (struct sockaddr *)udp_in;
306 m_adj(n, off);
307
308 so = inp->inp_socket;
309 SOCKBUF_LOCK(&so->so_rcv);
310 if (sbappendaddr_locked(&so->so_rcv, append_sa, n, opts) == 0) {
311 SOCKBUF_UNLOCK(&so->so_rcv);
312 m_freem(n);
313 if (opts)
314 m_freem(opts);
315 UDPSTAT_INC(udps_fullsock);
316 } else
317 sorwakeup_locked(so);
318}
319
320void
321udp_input(struct mbuf *m, int off)
322{
323 int iphlen = off;
324 struct ip *ip;
325 struct udphdr *uh;
326 struct ifnet *ifp;
327 struct inpcb *inp;
328 struct udpcb *up;
329 int len;
330 struct ip save_ip;
331 struct sockaddr_in udp_in;
332#ifdef IPFIREWALL_FORWARD
333 struct m_tag *fwd_tag;
334#endif
335
336 ifp = m->m_pkthdr.rcvif;
337 UDPSTAT_INC(udps_ipackets);
338
339 /*
340 * Strip IP options, if any; should skip this, make available to
341 * user, and use on returned packets, but we don't yet have a way to
342 * check the checksum with options still present.
343 */
344 if (iphlen > sizeof (struct ip)) {
345 ip_stripoptions(m, (struct mbuf *)0);
346 iphlen = sizeof(struct ip);
347 }
348
349 /*
350 * Get IP and UDP header together in first mbuf.
351 */
352 ip = mtod(m, struct ip *);
353 if (m->m_len < iphlen + sizeof(struct udphdr)) {
354 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) {
355 UDPSTAT_INC(udps_hdrops);
356 return;
357 }
358 ip = mtod(m, struct ip *);
359 }
360 uh = (struct udphdr *)((caddr_t)ip + iphlen);
361
362 /*
363 * Destination port of 0 is illegal, based on RFC768.
364 */
365 if (uh->uh_dport == 0)
366 goto badunlocked;
367
368 /*
369 * Construct sockaddr format source address. Stuff source address
370 * and datagram in user buffer.
371 */
372 bzero(&udp_in, sizeof(udp_in));
373 udp_in.sin_len = sizeof(udp_in);
374 udp_in.sin_family = AF_INET;
375 udp_in.sin_port = uh->uh_sport;
376 udp_in.sin_addr = ip->ip_src;
377
378 /*
379 * Make mbuf data length reflect UDP length. If not enough data to
380 * reflect UDP length, drop.
381 */
382 len = ntohs((u_short)uh->uh_ulen);
383 if (ip->ip_len != len) {
384 if (len > ip->ip_len || len < sizeof(struct udphdr)) {
385 UDPSTAT_INC(udps_badlen);
386 goto badunlocked;
387 }
388 m_adj(m, len - ip->ip_len);
389 /* ip->ip_len = len; */
390 }
391
392 /*
393 * Save a copy of the IP header in case we want restore it for
394 * sending an ICMP error message in response.
395 */
396 if (!V_udp_blackhole)
397 save_ip = *ip;
398 else
399 memset(&save_ip, 0, sizeof(save_ip));
400
401 /*
402 * Checksum extended UDP header and data.
403 */
404 if (uh->uh_sum) {
405 u_short uh_sum;
406
407 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
408 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
409 uh_sum = m->m_pkthdr.csum_data;
410 else
411 uh_sum = in_pseudo(ip->ip_src.s_addr,
412 ip->ip_dst.s_addr, htonl((u_short)len +
413 m->m_pkthdr.csum_data + IPPROTO_UDP));
414 uh_sum ^= 0xffff;
415 } else {
416 char b[9];
417
418 bcopy(((struct ipovly *)ip)->ih_x1, b, 9);
419 bzero(((struct ipovly *)ip)->ih_x1, 9);
420 ((struct ipovly *)ip)->ih_len = uh->uh_ulen;
421 uh_sum = in_cksum(m, len + sizeof (struct ip));
422 bcopy(b, ((struct ipovly *)ip)->ih_x1, 9);
423 }
424 if (uh_sum) {
425 UDPSTAT_INC(udps_badsum);
426 m_freem(m);
427 return;
428 }
429 } else
430 UDPSTAT_INC(udps_nosum);
431
432#ifdef IPFIREWALL_FORWARD
433 /*
434 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
435 */
436 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
437 if (fwd_tag != NULL) {
438 struct sockaddr_in *next_hop;
439
440 /*
441 * Do the hack.
442 */
443 next_hop = (struct sockaddr_in *)(fwd_tag + 1);
444 ip->ip_dst = next_hop->sin_addr;
445 uh->uh_dport = ntohs(next_hop->sin_port);
446
447 /*
448 * Remove the tag from the packet. We don't need it anymore.
449 */
450 m_tag_delete(m, fwd_tag);
451 }
452#endif
453
454 INP_INFO_RLOCK(&V_udbinfo);
455 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
456 in_broadcast(ip->ip_dst, ifp)) {
457 struct inpcb *last;
458 struct ip_moptions *imo;
459
460 last = NULL;
461 LIST_FOREACH(inp, &V_udb, inp_list) {
462 if (inp->inp_lport != uh->uh_dport)
463 continue;
464#ifdef INET6
465 if ((inp->inp_vflag & INP_IPV4) == 0)
466 continue;
467#endif
468 if (inp->inp_laddr.s_addr != INADDR_ANY &&
469 inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
470 continue;
471 if (inp->inp_faddr.s_addr != INADDR_ANY &&
472 inp->inp_faddr.s_addr != ip->ip_src.s_addr)
473 continue;
474 if (inp->inp_fport != 0 &&
475 inp->inp_fport != uh->uh_sport)
476 continue;
477
478 INP_RLOCK(inp);
479
480 /*
481 * Handle socket delivery policy for any-source
482 * and source-specific multicast. [RFC3678]
483 */
484 imo = inp->inp_moptions;
485 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
486 imo != NULL) {
487 struct sockaddr_in group;
488 int blocked;
489
490 bzero(&group, sizeof(struct sockaddr_in));
491 group.sin_len = sizeof(struct sockaddr_in);
492 group.sin_family = AF_INET;
493 group.sin_addr = ip->ip_dst;
494
495 blocked = imo_multi_filter(imo, ifp,
496 (struct sockaddr *)&group,
497 (struct sockaddr *)&udp_in);
498 if (blocked != MCAST_PASS) {
499 if (blocked == MCAST_NOTGMEMBER)
500 IPSTAT_INC(ips_notmember);
501 if (blocked == MCAST_NOTSMEMBER ||
502 blocked == MCAST_MUTED)
503 UDPSTAT_INC(udps_filtermcast);
504 INP_RUNLOCK(inp);
505 continue;
506 }
507 }
508 if (last != NULL) {
509 struct mbuf *n;
510
511 n = m_copy(m, 0, M_COPYALL);
512 up = intoudpcb(last);
513 if (up->u_tun_func == NULL) {
514 if (n != NULL)
515 udp_append(last,
516 ip, n,
517 iphlen +
518 sizeof(struct udphdr),
519 &udp_in);
520 } else {
521 /*
522 * Engage the tunneling protocol we
523 * will have to leave the info_lock
524 * up, since we are hunting through
525 * multiple UDP's.
526 */
527
528 (*up->u_tun_func)(n, iphlen, last);
529 }
530 INP_RUNLOCK(last);
531 }
532 last = inp;
533 /*
534 * Don't look for additional matches if this one does
535 * not have either the SO_REUSEPORT or SO_REUSEADDR
536 * socket options set. This heuristic avoids
537 * searching through all pcbs in the common case of a
538 * non-shared port. It assumes that an application
539 * will never clear these options after setting them.
540 */
541 if ((last->inp_socket->so_options &
542 (SO_REUSEPORT|SO_REUSEADDR)) == 0)
543 break;
544 }
545
546 if (last == NULL) {
547 /*
548 * No matching pcb found; discard datagram. (No need
549 * to send an ICMP Port Unreachable for a broadcast
550 * or multicast datgram.)
551 */
552 UDPSTAT_INC(udps_noportbcast);
553 goto badheadlocked;
554 }
555 up = intoudpcb(last);
556 if (up->u_tun_func == NULL) {
557 udp_append(last, ip, m, iphlen + sizeof(struct udphdr),
558 &udp_in);
559 } else {
560 /*
561 * Engage the tunneling protocol.
562 */
563 (*up->u_tun_func)(m, iphlen, last);
564 }
565 INP_RUNLOCK(last);
566 INP_INFO_RUNLOCK(&V_udbinfo);
567 return;
568 }
569
570 /*
571 * Locate pcb for datagram.
572 */
573 inp = in_pcblookup_hash(&V_udbinfo, ip->ip_src, uh->uh_sport,
574 ip->ip_dst, uh->uh_dport, 1, ifp);
575 if (inp == NULL) {
576 if (udp_log_in_vain) {
577 char buf[4*sizeof "123"];
578
579 strcpy(buf, inet_ntoa(ip->ip_dst));
580 log(LOG_INFO,
581 "Connection attempt to UDP %s:%d from %s:%d\n",
582 buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src),
583 ntohs(uh->uh_sport));
584 }
585 UDPSTAT_INC(udps_noport);
586 if (m->m_flags & (M_BCAST | M_MCAST)) {
587 UDPSTAT_INC(udps_noportbcast);
588 goto badheadlocked;
589 }
590 if (V_udp_blackhole)
591 goto badheadlocked;
592 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0)
593 goto badheadlocked;
594 *ip = save_ip;
595 ip->ip_len += iphlen;
596 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
597 INP_INFO_RUNLOCK(&V_udbinfo);
598 return;
599 }
600
601 /*
602 * Check the minimum TTL for socket.
603 */
604 INP_RLOCK(inp);
605 INP_INFO_RUNLOCK(&V_udbinfo);
606 if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) {
607 INP_RUNLOCK(inp);
608 goto badunlocked;
609 }
610 up = intoudpcb(inp);
611 if (up->u_tun_func == NULL) {
612 udp_append(inp, ip, m, iphlen + sizeof(struct udphdr), &udp_in);
613 } else {
614 /*
615 * Engage the tunneling protocol.
616 */
617
618 (*up->u_tun_func)(m, iphlen, inp);
619 }
620 INP_RUNLOCK(inp);
621 return;
622
623badheadlocked:
624 if (inp)
625 INP_RUNLOCK(inp);
626 INP_INFO_RUNLOCK(&V_udbinfo);
627badunlocked:
628 m_freem(m);
629}
630
631/*
632 * Notify a udp user of an asynchronous error; just wake up so that they can
633 * collect error status.
634 */
635struct inpcb *
636udp_notify(struct inpcb *inp, int errno)
637{
638
639 /*
640 * While udp_ctlinput() always calls udp_notify() with a read lock
641 * when invoking it directly, in_pcbnotifyall() currently uses write
642 * locks due to sharing code with TCP. For now, accept either a read
643 * or a write lock, but a read lock is sufficient.
644 */
645 INP_LOCK_ASSERT(inp);
646
647 inp->inp_socket->so_error = errno;
648 sorwakeup(inp->inp_socket);
649 sowwakeup(inp->inp_socket);
650 return (inp);
651}
652
653void
654udp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
655{
656 struct ip *ip = vip;
657 struct udphdr *uh;
658 struct in_addr faddr;
659 struct inpcb *inp;
660
661 faddr = ((struct sockaddr_in *)sa)->sin_addr;
662 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
663 return;
664
665 /*
666 * Redirects don't need to be handled up here.
667 */
668 if (PRC_IS_REDIRECT(cmd))
669 return;
670
671 /*
672 * Hostdead is ugly because it goes linearly through all PCBs.
673 *
674 * XXX: We never get this from ICMP, otherwise it makes an excellent
675 * DoS attack on machines with many connections.
676 */
677 if (cmd == PRC_HOSTDEAD)
678 ip = NULL;
679 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
680 return;
681 if (ip != NULL) {
682 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
683 INP_INFO_RLOCK(&V_udbinfo);
684 inp = in_pcblookup_hash(&V_udbinfo, faddr, uh->uh_dport,
685 ip->ip_src, uh->uh_sport, 0, NULL);
686 if (inp != NULL) {
687 INP_RLOCK(inp);
688 if (inp->inp_socket != NULL) {
689 udp_notify(inp, inetctlerrmap[cmd]);
690 }
691 INP_RUNLOCK(inp);
692 }
693 INP_INFO_RUNLOCK(&V_udbinfo);
694 } else
695 in_pcbnotifyall(&V_udbinfo, faddr, inetctlerrmap[cmd],
696 udp_notify);
697}
698
699static int
700udp_pcblist(SYSCTL_HANDLER_ARGS)
701{
702 int error, i, n;
703 struct inpcb *inp, **inp_list;
704 inp_gen_t gencnt;
705 struct xinpgen xig;
706
707 /*
708 * The process of preparing the PCB list is too time-consuming and
709 * resource-intensive to repeat twice on every request.
710 */
711 if (req->oldptr == 0) {
712 n = V_udbinfo.ipi_count;
713 req->oldidx = 2 * (sizeof xig)
714 + (n + n/8) * sizeof(struct xinpcb);
715 return (0);
716 }
717
718 if (req->newptr != 0)
719 return (EPERM);
720
721 /*
722 * OK, now we're committed to doing something.
723 */
724 INP_INFO_RLOCK(&V_udbinfo);
725 gencnt = V_udbinfo.ipi_gencnt;
726 n = V_udbinfo.ipi_count;
727 INP_INFO_RUNLOCK(&V_udbinfo);
728
729 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
730 + n * sizeof(struct xinpcb));
731 if (error != 0)
732 return (error);
733
734 xig.xig_len = sizeof xig;
735 xig.xig_count = n;
736 xig.xig_gen = gencnt;
737 xig.xig_sogen = so_gencnt;
738 error = SYSCTL_OUT(req, &xig, sizeof xig);
739 if (error)
740 return (error);
741
742 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
743 if (inp_list == 0)
744 return (ENOMEM);
745
746 INP_INFO_RLOCK(&V_udbinfo);
747 for (inp = LIST_FIRST(V_udbinfo.ipi_listhead), i = 0; inp && i < n;
748 inp = LIST_NEXT(inp, inp_list)) {
749 INP_WLOCK(inp);
750 if (inp->inp_gencnt <= gencnt &&
751 cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
752 in_pcbref(inp);
753 inp_list[i++] = inp;
754 }
755 INP_WUNLOCK(inp);
756 }
757 INP_INFO_RUNLOCK(&V_udbinfo);
758 n = i;
759
760 error = 0;
761 for (i = 0; i < n; i++) {
762 inp = inp_list[i];
763 INP_RLOCK(inp);
764 if (inp->inp_gencnt <= gencnt) {
765 struct xinpcb xi;
766
767 bzero(&xi, sizeof(xi));
768 xi.xi_len = sizeof xi;
769 /* XXX should avoid extra copy */
770 bcopy(inp, &xi.xi_inp, sizeof *inp);
771 if (inp->inp_socket)
772 sotoxsocket(inp->inp_socket, &xi.xi_socket);
773 xi.xi_inp.inp_gencnt = inp->inp_gencnt;
774 INP_RUNLOCK(inp);
775 error = SYSCTL_OUT(req, &xi, sizeof xi);
776 } else
777 INP_RUNLOCK(inp);
778 }
779 INP_INFO_WLOCK(&V_udbinfo);
780 for (i = 0; i < n; i++) {
781 inp = inp_list[i];
782 INP_WLOCK(inp);
783 if (!in_pcbrele(inp))
784 INP_WUNLOCK(inp);
785 }
786 INP_INFO_WUNLOCK(&V_udbinfo);
787
788 if (!error) {
789 /*
790 * Give the user an updated idea of our state. If the
791 * generation differs from what we told her before, she knows
792 * that something happened while we were processing this
793 * request, and it might be necessary to retry.
794 */
795 INP_INFO_RLOCK(&V_udbinfo);
796 xig.xig_gen = V_udbinfo.ipi_gencnt;
797 xig.xig_sogen = so_gencnt;
798 xig.xig_count = V_udbinfo.ipi_count;
799 INP_INFO_RUNLOCK(&V_udbinfo);
800 error = SYSCTL_OUT(req, &xig, sizeof xig);
801 }
802 free(inp_list, M_TEMP);
803 return (error);
804}
805
806SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
807 udp_pcblist, "S,xinpcb", "List of active UDP sockets");
808
809static int
810udp_getcred(SYSCTL_HANDLER_ARGS)
811{
812 struct xucred xuc;
813 struct sockaddr_in addrs[2];
814 struct inpcb *inp;
815 int error;
816
817 error = priv_check(req->td, PRIV_NETINET_GETCRED);
818 if (error)
819 return (error);
820 error = SYSCTL_IN(req, addrs, sizeof(addrs));
821 if (error)
822 return (error);
823 INP_INFO_RLOCK(&V_udbinfo);
824 inp = in_pcblookup_hash(&V_udbinfo, addrs[1].sin_addr, addrs[1].sin_port,
825 addrs[0].sin_addr, addrs[0].sin_port, 1, NULL);
826 if (inp != NULL) {
827 INP_RLOCK(inp);
828 INP_INFO_RUNLOCK(&V_udbinfo);
829 if (inp->inp_socket == NULL)
830 error = ENOENT;
831 if (error == 0)
832 error = cr_canseeinpcb(req->td->td_ucred, inp);
833 if (error == 0)
834 cru2x(inp->inp_cred, &xuc);
835 INP_RUNLOCK(inp);
836 } else {
837 INP_INFO_RUNLOCK(&V_udbinfo);
838 error = ENOENT;
839 }
840 if (error == 0)
841 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
842 return (error);
843}
844
845SYSCTL_PROC(_net_inet_udp, OID_AUTO, getcred,
846 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
847 udp_getcred, "S,xucred", "Get the xucred of a UDP connection");
848
849int
850udp_ctloutput(struct socket *so, struct sockopt *sopt)
851{
852 int error = 0, optval;
853 struct inpcb *inp;
854#ifdef IPSEC_NAT_T
855 struct udpcb *up;
856#endif
857
858 inp = sotoinpcb(so);
859 KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
860 INP_WLOCK(inp);
861 if (sopt->sopt_level != IPPROTO_UDP) {
862#ifdef INET6
863 if (INP_CHECK_SOCKAF(so, AF_INET6)) {
864 INP_WUNLOCK(inp);
865 error = ip6_ctloutput(so, sopt);
866 } else {
867#endif
868 INP_WUNLOCK(inp);
869 error = ip_ctloutput(so, sopt);
870#ifdef INET6
871 }
872#endif
873 return (error);
874 }
875
876 switch (sopt->sopt_dir) {
877 case SOPT_SET:
878 switch (sopt->sopt_name) {
879 case UDP_ENCAP:
880 INP_WUNLOCK(inp);
881 error = sooptcopyin(sopt, &optval, sizeof optval,
882 sizeof optval);
883 if (error)
884 break;
885 inp = sotoinpcb(so);
886 KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
887 INP_WLOCK(inp);
888#ifdef IPSEC_NAT_T
889 up = intoudpcb(inp);
890 KASSERT(up != NULL, ("%s: up == NULL", __func__));
891#endif
892 switch (optval) {
893 case 0:
894 /* Clear all UDP encap. */
895#ifdef IPSEC_NAT_T
896 up->u_flags &= ~UF_ESPINUDP_ALL;
897#endif
898 break;
899#ifdef IPSEC_NAT_T
900 case UDP_ENCAP_ESPINUDP:
901 case UDP_ENCAP_ESPINUDP_NON_IKE:
902 up->u_flags &= ~UF_ESPINUDP_ALL;
903 if (optval == UDP_ENCAP_ESPINUDP)
904 up->u_flags |= UF_ESPINUDP;
905 else if (optval == UDP_ENCAP_ESPINUDP_NON_IKE)
906 up->u_flags |= UF_ESPINUDP_NON_IKE;
907 break;
908#endif
909 default:
910 error = EINVAL;
911 break;
912 }
913 INP_WUNLOCK(inp);
914 break;
915 default:
916 INP_WUNLOCK(inp);
917 error = ENOPROTOOPT;
918 break;
919 }
920 break;
921 case SOPT_GET:
922 switch (sopt->sopt_name) {
923#ifdef IPSEC_NAT_T
924 case UDP_ENCAP:
925 up = intoudpcb(inp);
926 KASSERT(up != NULL, ("%s: up == NULL", __func__));
927 optval = up->u_flags & UF_ESPINUDP_ALL;
928 INP_WUNLOCK(inp);
929 error = sooptcopyout(sopt, &optval, sizeof optval);
930 break;
931#endif
932 default:
933 INP_WUNLOCK(inp);
934 error = ENOPROTOOPT;
935 break;
936 }
937 break;
938 }
939 return (error);
940}
941
942static int
943udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
944 struct mbuf *control, struct thread *td)
945{
946 struct udpiphdr *ui;
947 int len = m->m_pkthdr.len;
948 struct in_addr faddr, laddr;
949 struct cmsghdr *cm;
950 struct sockaddr_in *sin, src;
951 int error = 0;
952 int ipflags;
953 u_short fport, lport;
954 int unlock_udbinfo;
955
956 /*
957 * udp_output() may need to temporarily bind or connect the current
958 * inpcb. As such, we don't know up front whether we will need the
959 * pcbinfo lock or not. Do any work to decide what is needed up
960 * front before acquiring any locks.
961 */
962 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
963 if (control)
964 m_freem(control);
965 m_freem(m);
966 return (EMSGSIZE);
967 }
968
969 src.sin_family = 0;
970 if (control != NULL) {
971 /*
972 * XXX: Currently, we assume all the optional information is
973 * stored in a single mbuf.
974 */
975 if (control->m_next) {
976 m_freem(control);
977 m_freem(m);
978 return (EINVAL);
979 }
980 for (; control->m_len > 0;
981 control->m_data += CMSG_ALIGN(cm->cmsg_len),
982 control->m_len -= CMSG_ALIGN(cm->cmsg_len)) {
983 cm = mtod(control, struct cmsghdr *);
984 if (control->m_len < sizeof(*cm) || cm->cmsg_len == 0
985 || cm->cmsg_len > control->m_len) {
986 error = EINVAL;
987 break;
988 }
989 if (cm->cmsg_level != IPPROTO_IP)
990 continue;
991
992 switch (cm->cmsg_type) {
993 case IP_SENDSRCADDR:
994 if (cm->cmsg_len !=
995 CMSG_LEN(sizeof(struct in_addr))) {
996 error = EINVAL;
997 break;
998 }
999 bzero(&src, sizeof(src));
1000 src.sin_family = AF_INET;
1001 src.sin_len = sizeof(src);
1002 src.sin_port = inp->inp_lport;
1003 src.sin_addr =
1004 *(struct in_addr *)CMSG_DATA(cm);
1005 break;
1006
1007 default:
1008 error = ENOPROTOOPT;
1009 break;
1010 }
1011 if (error)
1012 break;
1013 }
1014 m_freem(control);
1015 }
1016 if (error) {
1017 m_freem(m);
1018 return (error);
1019 }
1020
1021 /*
1022 * Depending on whether or not the application has bound or connected
1023 * the socket, we may have to do varying levels of work. The optimal
1024 * case is for a connected UDP socket, as a global lock isn't
1025 * required at all.
1026 *
1027 * In order to decide which we need, we require stability of the
1028 * inpcb binding, which we ensure by acquiring a read lock on the
1029 * inpcb. This doesn't strictly follow the lock order, so we play
1030 * the trylock and retry game; note that we may end up with more
1031 * conservative locks than required the second time around, so later
1032 * assertions have to accept that. Further analysis of the number of
1033 * misses under contention is required.
1034 */
1035 sin = (struct sockaddr_in *)addr;
1036 INP_RLOCK(inp);
1037 if (sin != NULL &&
1038 (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) {
1039 INP_RUNLOCK(inp);
1040 INP_INFO_WLOCK(&V_udbinfo);
1041 INP_WLOCK(inp);
1042 unlock_udbinfo = 2;
1043 } else if ((sin != NULL && (
1044 (sin->sin_addr.s_addr == INADDR_ANY) ||
1045 (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
1046 (inp->inp_laddr.s_addr == INADDR_ANY) ||
1047 (inp->inp_lport == 0))) ||
1048 (src.sin_family == AF_INET)) {
1049 if (!INP_INFO_TRY_RLOCK(&V_udbinfo)) {
1050 INP_RUNLOCK(inp);
1051 INP_INFO_RLOCK(&V_udbinfo);
1052 INP_RLOCK(inp);
1053 }
1054 unlock_udbinfo = 1;
1055 } else
1056 unlock_udbinfo = 0;
1057
1058 /*
1059 * If the IP_SENDSRCADDR control message was specified, override the
1060 * source address for this datagram. Its use is invalidated if the
1061 * address thus specified is incomplete or clobbers other inpcbs.
1062 */
1063 laddr = inp->inp_laddr;
1064 lport = inp->inp_lport;
1065 if (src.sin_family == AF_INET) {
1066 INP_INFO_LOCK_ASSERT(&V_udbinfo);
1067 if ((lport == 0) ||
1068 (laddr.s_addr == INADDR_ANY &&
1069 src.sin_addr.s_addr == INADDR_ANY)) {
1070 error = EINVAL;
1071 goto release;
1072 }
1073 error = in_pcbbind_setup(inp, (struct sockaddr *)&src,
1074 &laddr.s_addr, &lport, td->td_ucred);
1075 if (error)
1076 goto release;
1077 }
1078
1079 /*
1080 * If a UDP socket has been connected, then a local address/port will
1081 * have been selected and bound.
1082 *
1083 * If a UDP socket has not been connected to, then an explicit
1084 * destination address must be used, in which case a local
1085 * address/port may not have been selected and bound.
1086 */
1087 if (sin != NULL) {
1088 INP_LOCK_ASSERT(inp);
1089 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1090 error = EISCONN;
1091 goto release;
1092 }
1093
1094 /*
1095 * Jail may rewrite the destination address, so let it do
1096 * that before we use it.
1097 */
1098 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1099 if (error)
1100 goto release;
1101
1102 /*
1103 * If a local address or port hasn't yet been selected, or if
1104 * the destination address needs to be rewritten due to using
1105 * a special INADDR_ constant, invoke in_pcbconnect_setup()
1106 * to do the heavy lifting. Once a port is selected, we
1107 * commit the binding back to the socket; we also commit the
1108 * binding of the address if in jail.
1109 *
1110 * If we already have a valid binding and we're not
1111 * requesting a destination address rewrite, use a fast path.
1112 */
1113 if (inp->inp_laddr.s_addr == INADDR_ANY ||
1114 inp->inp_lport == 0 ||
1115 sin->sin_addr.s_addr == INADDR_ANY ||
1116 sin->sin_addr.s_addr == INADDR_BROADCAST) {
1117 INP_INFO_LOCK_ASSERT(&V_udbinfo);
1118 error = in_pcbconnect_setup(inp, addr, &laddr.s_addr,
1119 &lport, &faddr.s_addr, &fport, NULL,
1120 td->td_ucred);
1121 if (error)
1122 goto release;
1123
1124 /*
1125 * XXXRW: Why not commit the port if the address is
1126 * !INADDR_ANY?
1127 */
1128 /* Commit the local port if newly assigned. */
1129 if (inp->inp_laddr.s_addr == INADDR_ANY &&
1130 inp->inp_lport == 0) {
1131 INP_INFO_WLOCK_ASSERT(&V_udbinfo);
1132 INP_WLOCK_ASSERT(inp);
1133 /*
1134 * Remember addr if jailed, to prevent
1135 * rebinding.
1136 */
1137 if (prison_flag(td->td_ucred, PR_IP4))
1138 inp->inp_laddr = laddr;
1139 inp->inp_lport = lport;
1140 if (in_pcbinshash(inp) != 0) {
1141 inp->inp_lport = 0;
1142 error = EAGAIN;
1143 goto release;
1144 }
1145 inp->inp_flags |= INP_ANONPORT;
1146 }
1147 } else {
1148 faddr = sin->sin_addr;
1149 fport = sin->sin_port;
1150 }
1151 } else {
1152 INP_LOCK_ASSERT(inp);
1153 faddr = inp->inp_faddr;
1154 fport = inp->inp_fport;
1155 if (faddr.s_addr == INADDR_ANY) {
1156 error = ENOTCONN;
1157 goto release;
1158 }
1159 }
1160
1161 /*
1162 * Calculate data length and get a mbuf for UDP, IP, and possible
1163 * link-layer headers. Immediate slide the data pointer back forward
1164 * since we won't use that space at this layer.
1165 */
1166 M_PREPEND(m, sizeof(struct udpiphdr) + max_linkhdr, M_DONTWAIT);
1167 if (m == NULL) {
1168 error = ENOBUFS;
1169 goto release;
1170 }
1171 m->m_data += max_linkhdr;
1172 m->m_len -= max_linkhdr;
1173 m->m_pkthdr.len -= max_linkhdr;
1174
1175 /*
1176 * Fill in mbuf with extended UDP header and addresses and length put
1177 * into network format.
1178 */
1179 ui = mtod(m, struct udpiphdr *);
1180 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */
1181 ui->ui_pr = IPPROTO_UDP;
1182 ui->ui_src = laddr;
1183 ui->ui_dst = faddr;
1184 ui->ui_sport = lport;
1185 ui->ui_dport = fport;
1186 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1187
1188 /*
1189 * Set the Don't Fragment bit in the IP header.
1190 */
1191 if (inp->inp_flags & INP_DONTFRAG) {
1192 struct ip *ip;
1193
1194 ip = (struct ip *)&ui->ui_i;
1195 ip->ip_off |= IP_DF;
1196 }
1197
1198 ipflags = 0;
1199 if (inp->inp_socket->so_options & SO_DONTROUTE)
1200 ipflags |= IP_ROUTETOIF;
1201 if (inp->inp_socket->so_options & SO_BROADCAST)
1202 ipflags |= IP_ALLOWBROADCAST;
1203 if (inp->inp_flags & INP_ONESBCAST)
1204 ipflags |= IP_SENDONES;
1205
1206#ifdef MAC
1207 mac_inpcb_create_mbuf(inp, m);
1208#endif
1209
1210 /*
1211 * Set up checksum and output datagram.
1212 */
1213 if (udp_cksum) {
1214 if (inp->inp_flags & INP_ONESBCAST)
1215 faddr.s_addr = INADDR_BROADCAST;
1216 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, faddr.s_addr,
1217 htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
1218 m->m_pkthdr.csum_flags = CSUM_UDP;
1219 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1220 } else
1221 ui->ui_sum = 0;
1222 ((struct ip *)ui)->ip_len = sizeof (struct udpiphdr) + len;
1223 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */
1224 ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */
1225 UDPSTAT_INC(udps_opackets);
1226
1227 if (unlock_udbinfo == 2)
1228 INP_INFO_WUNLOCK(&V_udbinfo);
1229 else if (unlock_udbinfo == 1)
1230 INP_INFO_RUNLOCK(&V_udbinfo);
1231 error = ip_output(m, inp->inp_options, NULL, ipflags,
1232 inp->inp_moptions, inp);
1233 if (unlock_udbinfo == 2)
1234 INP_WUNLOCK(inp);
1235 else
1236 INP_RUNLOCK(inp);
1237 return (error);
1238
1239release:
1240 if (unlock_udbinfo == 2) {
1241 INP_WUNLOCK(inp);
1242 INP_INFO_WUNLOCK(&V_udbinfo);
1243 } else if (unlock_udbinfo == 1) {
1244 INP_RUNLOCK(inp);
1245 INP_INFO_RUNLOCK(&V_udbinfo);
1246 } else
1247 INP_RUNLOCK(inp);
1248 m_freem(m);
1249 return (error);
1250}
1251
1252
1253#if defined(IPSEC) && defined(IPSEC_NAT_T)
1254#ifdef INET
1255/*
1256 * Potentially decap ESP in UDP frame. Check for an ESP header
1257 * and optional marker; if present, strip the UDP header and
1258 * push the result through IPSec.
1259 *
1260 * Returns mbuf to be processed (potentially re-allocated) or
1261 * NULL if consumed and/or processed.
1262 */
1263static struct mbuf *
1264udp4_espdecap(struct inpcb *inp, struct mbuf *m, int off)
1265{
1266 size_t minlen, payload, skip, iphlen;
1267 caddr_t data;
1268 struct udpcb *up;
1269 struct m_tag *tag;
1270 struct udphdr *udphdr;
1271 struct ip *ip;
1272
1273 INP_RLOCK_ASSERT(inp);
1274
1275 /*
1276 * Pull up data so the longest case is contiguous:
1277 * IP/UDP hdr + non ESP marker + ESP hdr.
1278 */
1279 minlen = off + sizeof(uint64_t) + sizeof(struct esp);
1280 if (minlen > m->m_pkthdr.len)
1281 minlen = m->m_pkthdr.len;
1282 if ((m = m_pullup(m, minlen)) == NULL) {
1283 V_ipsec4stat.in_inval++;
1284 return (NULL); /* Bypass caller processing. */
1285 }
1286 data = mtod(m, caddr_t); /* Points to ip header. */
1287 payload = m->m_len - off; /* Size of payload. */
1288
1289 if (payload == 1 && data[off] == '\xff')
1290 return (m); /* NB: keepalive packet, no decap. */
1291
1292 up = intoudpcb(inp);
1293 KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
1294 KASSERT((up->u_flags & UF_ESPINUDP_ALL) != 0,
1295 ("u_flags 0x%x", up->u_flags));
1296
1297 /*
1298 * Check that the payload is large enough to hold an
1299 * ESP header and compute the amount of data to remove.
1300 *
1301 * NB: the caller has already done a pullup for us.
1302 * XXX can we assume alignment and eliminate bcopys?
1303 */
1304 if (up->u_flags & UF_ESPINUDP_NON_IKE) {
1305 /*
1306 * draft-ietf-ipsec-nat-t-ike-0[01].txt and
1307 * draft-ietf-ipsec-udp-encaps-(00/)01.txt, ignoring
1308 * possible AH mode non-IKE marker+non-ESP marker
1309 * from draft-ietf-ipsec-udp-encaps-00.txt.
1310 */
1311 uint64_t marker;
1312
1313 if (payload <= sizeof(uint64_t) + sizeof(struct esp))
1314 return (m); /* NB: no decap. */
1315 bcopy(data + off, &marker, sizeof(uint64_t));
1316 if (marker != 0) /* Non-IKE marker. */
1317 return (m); /* NB: no decap. */
1318 skip = sizeof(uint64_t) + sizeof(struct udphdr);
1319 } else {
1320 uint32_t spi;
1321
1322 if (payload <= sizeof(struct esp)) {
1323 V_ipsec4stat.in_inval++;
1324 m_freem(m);
1325 return (NULL); /* Discard. */
1326 }
1327 bcopy(data + off, &spi, sizeof(uint32_t));
1328 if (spi == 0) /* Non-ESP marker. */
1329 return (m); /* NB: no decap. */
1330 skip = sizeof(struct udphdr);
1331 }
1332
1333 /*
1334 * Setup a PACKET_TAG_IPSEC_NAT_T_PORT tag to remember
1335 * the UDP ports. This is required if we want to select
1336 * the right SPD for multiple hosts behind same NAT.
1337 *
1338 * NB: ports are maintained in network byte order everywhere
1339 * in the NAT-T code.
1340 */
1341 tag = m_tag_get(PACKET_TAG_IPSEC_NAT_T_PORTS,
1342 2 * sizeof(uint16_t), M_NOWAIT);
1343 if (tag == NULL) {
1344 V_ipsec4stat.in_nomem++;
1345 m_freem(m);
1346 return (NULL); /* Discard. */
1347 }
1348 iphlen = off - sizeof(struct udphdr);
1349 udphdr = (struct udphdr *)(data + iphlen);
1350 ((uint16_t *)(tag + 1))[0] = udphdr->uh_sport;
1351 ((uint16_t *)(tag + 1))[1] = udphdr->uh_dport;
1352 m_tag_prepend(m, tag);
1353
1354 /*
1355 * Remove the UDP header (and possibly the non ESP marker)
1356 * IP header length is iphlen
1357 * Before:
1358 * <--- off --->
1359 * +----+------+-----+
1360 * | IP | UDP | ESP |
1361 * +----+------+-----+
1362 * <-skip->
1363 * After:
1364 * +----+-----+
1365 * | IP | ESP |
1366 * +----+-----+
1367 * <-skip->
1368 */
1369 ovbcopy(data, data + skip, iphlen);
1370 m_adj(m, skip);
1371
1372 ip = mtod(m, struct ip *);
1373 ip->ip_len -= skip;
1374 ip->ip_p = IPPROTO_ESP;
1375
1376 /*
1377 * We cannot yet update the cksums so clear any
1378 * h/w cksum flags as they are no longer valid.
1379 */
1380 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)
1381 m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
1382
1383 (void) ipsec4_common_input(m, iphlen, ip->ip_p);
1384 return (NULL); /* NB: consumed, bypass processing. */
1385}
1386#endif /* INET */
1387#endif /* defined(IPSEC) && defined(IPSEC_NAT_T) */
1388
1389static void
1390udp_abort(struct socket *so)
1391{
1392 struct inpcb *inp;
1393
1394 inp = sotoinpcb(so);
1395 KASSERT(inp != NULL, ("udp_abort: inp == NULL"));
1396 INP_INFO_WLOCK(&V_udbinfo);
1397 INP_WLOCK(inp);
1398 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1399 in_pcbdisconnect(inp);
1400 inp->inp_laddr.s_addr = INADDR_ANY;
1401 soisdisconnected(so);
1402 }
1403 INP_WUNLOCK(inp);
1404 INP_INFO_WUNLOCK(&V_udbinfo);
1405}
1406
1407static int
1408udp_attach(struct socket *so, int proto, struct thread *td)
1409{
1410 struct inpcb *inp;
1411 int error;
1412
1413 inp = sotoinpcb(so);
1414 KASSERT(inp == NULL, ("udp_attach: inp != NULL"));
1415 error = soreserve(so, udp_sendspace, udp_recvspace);
1416 if (error)
1417 return (error);
1418 INP_INFO_WLOCK(&V_udbinfo);
1419 error = in_pcballoc(so, &V_udbinfo);
1420 if (error) {
1421 INP_INFO_WUNLOCK(&V_udbinfo);
1422 return (error);
1423 }
1424
1425 inp = sotoinpcb(so);
1426 inp->inp_vflag |= INP_IPV4;
1427 inp->inp_ip_ttl = V_ip_defttl;
1428
1429 error = udp_newudpcb(inp);
1430 if (error) {
1431 in_pcbdetach(inp);
1432 in_pcbfree(inp);
1433 INP_INFO_WUNLOCK(&V_udbinfo);
1434 return (error);
1435 }
1436
1437 INP_WUNLOCK(inp);
1438 INP_INFO_WUNLOCK(&V_udbinfo);
1439 return (0);
1440}
1441
1442int
1443udp_set_kernel_tunneling(struct socket *so, udp_tun_func_t f)
1444{
1445 struct inpcb *inp;
1446 struct udpcb *up;
1447
1448 KASSERT(so->so_type == SOCK_DGRAM,
1449 ("udp_set_kernel_tunneling: !dgram"));
1450 inp = sotoinpcb(so);
1451 KASSERT(inp != NULL, ("udp_set_kernel_tunneling: inp == NULL"));
1452 INP_WLOCK(inp);
1453 up = intoudpcb(inp);
1454 if (up->u_tun_func != NULL) {
1455 INP_WUNLOCK(inp);
1456 return (EBUSY);
1457 }
1458 up->u_tun_func = f;
1459 INP_WUNLOCK(inp);
1460 return (0);
1461}
1462
1463static int
1464udp_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
1465{
1466 struct inpcb *inp;
1467 int error;
1468
1469 inp = sotoinpcb(so);
1470 KASSERT(inp != NULL, ("udp_bind: inp == NULL"));
1471 INP_INFO_WLOCK(&V_udbinfo);
1472 INP_WLOCK(inp);
1473 error = in_pcbbind(inp, nam, td->td_ucred);
1474 INP_WUNLOCK(inp);
1475 INP_INFO_WUNLOCK(&V_udbinfo);
1476 return (error);
1477}
1478
1479static void
1480udp_close(struct socket *so)
1481{
1482 struct inpcb *inp;
1483
1484 inp = sotoinpcb(so);
1485 KASSERT(inp != NULL, ("udp_close: inp == NULL"));
1486 INP_INFO_WLOCK(&V_udbinfo);
1487 INP_WLOCK(inp);
1488 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1489 in_pcbdisconnect(inp);
1490 inp->inp_laddr.s_addr = INADDR_ANY;
1491 soisdisconnected(so);
1492 }
1493 INP_WUNLOCK(inp);
1494 INP_INFO_WUNLOCK(&V_udbinfo);
1495}
1496
1497static int
1498udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1499{
1500 struct inpcb *inp;
1501 int error;
1502 struct sockaddr_in *sin;
1503
1504 inp = sotoinpcb(so);
1505 KASSERT(inp != NULL, ("udp_connect: inp == NULL"));
1506 INP_INFO_WLOCK(&V_udbinfo);
1507 INP_WLOCK(inp);
1508 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1509 INP_WUNLOCK(inp);
1510 INP_INFO_WUNLOCK(&V_udbinfo);
1511 return (EISCONN);
1512 }
1513 sin = (struct sockaddr_in *)nam;
1514 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1515 if (error != 0) {
1516 INP_WUNLOCK(inp);
1517 INP_INFO_WUNLOCK(&V_udbinfo);
1518 return (error);
1519 }
1520 error = in_pcbconnect(inp, nam, td->td_ucred);
1521 if (error == 0)
1522 soisconnected(so);
1523 INP_WUNLOCK(inp);
1524 INP_INFO_WUNLOCK(&V_udbinfo);
1525 return (error);
1526}
1527
1528static void
1529udp_detach(struct socket *so)
1530{
1531 struct inpcb *inp;
1532 struct udpcb *up;
1533
1534 inp = sotoinpcb(so);
1535 KASSERT(inp != NULL, ("udp_detach: inp == NULL"));
1536 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
1537 ("udp_detach: not disconnected"));
1538 INP_INFO_WLOCK(&V_udbinfo);
1539 INP_WLOCK(inp);
1540 up = intoudpcb(inp);
1541 KASSERT(up != NULL, ("%s: up == NULL", __func__));
1542 inp->inp_ppcb = NULL;
1543 in_pcbdetach(inp);
1544 in_pcbfree(inp);
1545 INP_INFO_WUNLOCK(&V_udbinfo);
1546 udp_discardcb(up);
1547}
1548
1549static int
1550udp_disconnect(struct socket *so)
1551{
1552 struct inpcb *inp;
1553
1554 inp = sotoinpcb(so);
1555 KASSERT(inp != NULL, ("udp_disconnect: inp == NULL"));
1556 INP_INFO_WLOCK(&V_udbinfo);
1557 INP_WLOCK(inp);
1558 if (inp->inp_faddr.s_addr == INADDR_ANY) {
1559 INP_WUNLOCK(inp);
1560 INP_INFO_WUNLOCK(&V_udbinfo);
1561 return (ENOTCONN);
1562 }
1563
1564 in_pcbdisconnect(inp);
1565 inp->inp_laddr.s_addr = INADDR_ANY;
1566 SOCK_LOCK(so);
1567 so->so_state &= ~SS_ISCONNECTED; /* XXX */
1568 SOCK_UNLOCK(so);
1569 INP_WUNLOCK(inp);
1570 INP_INFO_WUNLOCK(&V_udbinfo);
1571 return (0);
1572}
1573
1574static int
1575udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
1576 struct mbuf *control, struct thread *td)
1577{
1578 struct inpcb *inp;
1579
1580 inp = sotoinpcb(so);
1581 KASSERT(inp != NULL, ("udp_send: inp == NULL"));
1582 return (udp_output(inp, m, addr, control, td));
1583}
1584
1585int
1586udp_shutdown(struct socket *so)
1587{
1588 struct inpcb *inp;
1589
1590 inp = sotoinpcb(so);
1591 KASSERT(inp != NULL, ("udp_shutdown: inp == NULL"));
1592 INP_WLOCK(inp);
1593 socantsendmore(so);
1594 INP_WUNLOCK(inp);
1595 return (0);
1596}
1597
1598struct pr_usrreqs udp_usrreqs = {
1599 .pru_abort = udp_abort,
1600 .pru_attach = udp_attach,
1601 .pru_bind = udp_bind,
1602 .pru_connect = udp_connect,
1603 .pru_control = in_control,
1604 .pru_detach = udp_detach,
1605 .pru_disconnect = udp_disconnect,
1606 .pru_peeraddr = in_getpeeraddr,
1607 .pru_send = udp_send,
1608 .pru_soreceive = soreceive_dgram,
1609 .pru_sosend = sosend_dgram,
1610 .pru_shutdown = udp_shutdown,
1611 .pru_sockaddr = in_getsockaddr,
1612 .pru_sosetlabel = in_pcbsosetlabel,
1613 .pru_close = udp_close,
1614};
180 in_pcbinfo_init(&V_udbinfo, "udp", &V_udb, UDBHASHSIZE, UDBHASHSIZE,
181 "udp_inpcb", udp_inpcb_init, NULL, UMA_ZONE_NOFREE);
182 V_udpcb_zone = uma_zcreate("udpcb", sizeof(struct udpcb),
183 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
184 uma_zone_set_max(V_udpcb_zone, maxsockets);
185 EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL,
186 EVENTHANDLER_PRI_ANY);
187}
188
189/*
190 * Kernel module interface for updating udpstat. The argument is an index
191 * into udpstat treated as an array of u_long. While this encodes the
192 * general layout of udpstat into the caller, it doesn't encode its location,
193 * so that future changes to add, for example, per-CPU stats support won't
194 * cause binary compatibility problems for kernel modules.
195 */
196void
197kmod_udpstat_inc(int statnum)
198{
199
200 (*((u_long *)&V_udpstat + statnum))++;
201}
202
203int
204udp_newudpcb(struct inpcb *inp)
205{
206 struct udpcb *up;
207
208 up = uma_zalloc(V_udpcb_zone, M_NOWAIT | M_ZERO);
209 if (up == NULL)
210 return (ENOBUFS);
211 inp->inp_ppcb = up;
212 return (0);
213}
214
215void
216udp_discardcb(struct udpcb *up)
217{
218
219 uma_zfree(V_udpcb_zone, up);
220}
221
222#ifdef VIMAGE
223void
224udp_destroy(void)
225{
226
227 in_pcbinfo_destroy(&V_udbinfo);
228 uma_zdestroy(V_udpcb_zone);
229}
230#endif
231
232/*
233 * Subroutine of udp_input(), which appends the provided mbuf chain to the
234 * passed pcb/socket. The caller must provide a sockaddr_in via udp_in that
235 * contains the source address. If the socket ends up being an IPv6 socket,
236 * udp_append() will convert to a sockaddr_in6 before passing the address
237 * into the socket code.
238 */
239static void
240udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
241 struct sockaddr_in *udp_in)
242{
243 struct sockaddr *append_sa;
244 struct socket *so;
245 struct mbuf *opts = 0;
246#ifdef INET6
247 struct sockaddr_in6 udp_in6;
248#endif
249#ifdef IPSEC
250#ifdef IPSEC_NAT_T
251#ifdef INET
252 struct udpcb *up;
253#endif
254#endif
255#endif
256
257 INP_RLOCK_ASSERT(inp);
258
259#ifdef IPSEC
260 /* Check AH/ESP integrity. */
261 if (ipsec4_in_reject(n, inp)) {
262 m_freem(n);
263 V_ipsec4stat.in_polvio++;
264 return;
265 }
266#ifdef IPSEC_NAT_T
267#ifdef INET
268 up = intoudpcb(inp);
269 KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
270 if (up->u_flags & UF_ESPINUDP_ALL) { /* IPSec UDP encaps. */
271 n = udp4_espdecap(inp, n, off);
272 if (n == NULL) /* Consumed. */
273 return;
274 }
275#endif /* INET */
276#endif /* IPSEC_NAT_T */
277#endif /* IPSEC */
278#ifdef MAC
279 if (mac_inpcb_check_deliver(inp, n) != 0) {
280 m_freem(n);
281 return;
282 }
283#endif
284 if (inp->inp_flags & INP_CONTROLOPTS ||
285 inp->inp_socket->so_options & (SO_TIMESTAMP | SO_BINTIME)) {
286#ifdef INET6
287 if (inp->inp_vflag & INP_IPV6)
288 (void)ip6_savecontrol_v4(inp, n, &opts, NULL);
289 else
290#endif
291 ip_savecontrol(inp, &opts, ip, n);
292 }
293#ifdef INET6
294 if (inp->inp_vflag & INP_IPV6) {
295 bzero(&udp_in6, sizeof(udp_in6));
296 udp_in6.sin6_len = sizeof(udp_in6);
297 udp_in6.sin6_family = AF_INET6;
298 in6_sin_2_v4mapsin6(udp_in, &udp_in6);
299 append_sa = (struct sockaddr *)&udp_in6;
300 } else
301#endif
302 append_sa = (struct sockaddr *)udp_in;
303 m_adj(n, off);
304
305 so = inp->inp_socket;
306 SOCKBUF_LOCK(&so->so_rcv);
307 if (sbappendaddr_locked(&so->so_rcv, append_sa, n, opts) == 0) {
308 SOCKBUF_UNLOCK(&so->so_rcv);
309 m_freem(n);
310 if (opts)
311 m_freem(opts);
312 UDPSTAT_INC(udps_fullsock);
313 } else
314 sorwakeup_locked(so);
315}
316
317void
318udp_input(struct mbuf *m, int off)
319{
320 int iphlen = off;
321 struct ip *ip;
322 struct udphdr *uh;
323 struct ifnet *ifp;
324 struct inpcb *inp;
325 struct udpcb *up;
326 int len;
327 struct ip save_ip;
328 struct sockaddr_in udp_in;
329#ifdef IPFIREWALL_FORWARD
330 struct m_tag *fwd_tag;
331#endif
332
333 ifp = m->m_pkthdr.rcvif;
334 UDPSTAT_INC(udps_ipackets);
335
336 /*
337 * Strip IP options, if any; should skip this, make available to
338 * user, and use on returned packets, but we don't yet have a way to
339 * check the checksum with options still present.
340 */
341 if (iphlen > sizeof (struct ip)) {
342 ip_stripoptions(m, (struct mbuf *)0);
343 iphlen = sizeof(struct ip);
344 }
345
346 /*
347 * Get IP and UDP header together in first mbuf.
348 */
349 ip = mtod(m, struct ip *);
350 if (m->m_len < iphlen + sizeof(struct udphdr)) {
351 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) {
352 UDPSTAT_INC(udps_hdrops);
353 return;
354 }
355 ip = mtod(m, struct ip *);
356 }
357 uh = (struct udphdr *)((caddr_t)ip + iphlen);
358
359 /*
360 * Destination port of 0 is illegal, based on RFC768.
361 */
362 if (uh->uh_dport == 0)
363 goto badunlocked;
364
365 /*
366 * Construct sockaddr format source address. Stuff source address
367 * and datagram in user buffer.
368 */
369 bzero(&udp_in, sizeof(udp_in));
370 udp_in.sin_len = sizeof(udp_in);
371 udp_in.sin_family = AF_INET;
372 udp_in.sin_port = uh->uh_sport;
373 udp_in.sin_addr = ip->ip_src;
374
375 /*
376 * Make mbuf data length reflect UDP length. If not enough data to
377 * reflect UDP length, drop.
378 */
379 len = ntohs((u_short)uh->uh_ulen);
380 if (ip->ip_len != len) {
381 if (len > ip->ip_len || len < sizeof(struct udphdr)) {
382 UDPSTAT_INC(udps_badlen);
383 goto badunlocked;
384 }
385 m_adj(m, len - ip->ip_len);
386 /* ip->ip_len = len; */
387 }
388
389 /*
390 * Save a copy of the IP header in case we want restore it for
391 * sending an ICMP error message in response.
392 */
393 if (!V_udp_blackhole)
394 save_ip = *ip;
395 else
396 memset(&save_ip, 0, sizeof(save_ip));
397
398 /*
399 * Checksum extended UDP header and data.
400 */
401 if (uh->uh_sum) {
402 u_short uh_sum;
403
404 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
405 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
406 uh_sum = m->m_pkthdr.csum_data;
407 else
408 uh_sum = in_pseudo(ip->ip_src.s_addr,
409 ip->ip_dst.s_addr, htonl((u_short)len +
410 m->m_pkthdr.csum_data + IPPROTO_UDP));
411 uh_sum ^= 0xffff;
412 } else {
413 char b[9];
414
415 bcopy(((struct ipovly *)ip)->ih_x1, b, 9);
416 bzero(((struct ipovly *)ip)->ih_x1, 9);
417 ((struct ipovly *)ip)->ih_len = uh->uh_ulen;
418 uh_sum = in_cksum(m, len + sizeof (struct ip));
419 bcopy(b, ((struct ipovly *)ip)->ih_x1, 9);
420 }
421 if (uh_sum) {
422 UDPSTAT_INC(udps_badsum);
423 m_freem(m);
424 return;
425 }
426 } else
427 UDPSTAT_INC(udps_nosum);
428
429#ifdef IPFIREWALL_FORWARD
430 /*
431 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
432 */
433 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
434 if (fwd_tag != NULL) {
435 struct sockaddr_in *next_hop;
436
437 /*
438 * Do the hack.
439 */
440 next_hop = (struct sockaddr_in *)(fwd_tag + 1);
441 ip->ip_dst = next_hop->sin_addr;
442 uh->uh_dport = ntohs(next_hop->sin_port);
443
444 /*
445 * Remove the tag from the packet. We don't need it anymore.
446 */
447 m_tag_delete(m, fwd_tag);
448 }
449#endif
450
451 INP_INFO_RLOCK(&V_udbinfo);
452 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
453 in_broadcast(ip->ip_dst, ifp)) {
454 struct inpcb *last;
455 struct ip_moptions *imo;
456
457 last = NULL;
458 LIST_FOREACH(inp, &V_udb, inp_list) {
459 if (inp->inp_lport != uh->uh_dport)
460 continue;
461#ifdef INET6
462 if ((inp->inp_vflag & INP_IPV4) == 0)
463 continue;
464#endif
465 if (inp->inp_laddr.s_addr != INADDR_ANY &&
466 inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
467 continue;
468 if (inp->inp_faddr.s_addr != INADDR_ANY &&
469 inp->inp_faddr.s_addr != ip->ip_src.s_addr)
470 continue;
471 if (inp->inp_fport != 0 &&
472 inp->inp_fport != uh->uh_sport)
473 continue;
474
475 INP_RLOCK(inp);
476
477 /*
478 * Handle socket delivery policy for any-source
479 * and source-specific multicast. [RFC3678]
480 */
481 imo = inp->inp_moptions;
482 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
483 imo != NULL) {
484 struct sockaddr_in group;
485 int blocked;
486
487 bzero(&group, sizeof(struct sockaddr_in));
488 group.sin_len = sizeof(struct sockaddr_in);
489 group.sin_family = AF_INET;
490 group.sin_addr = ip->ip_dst;
491
492 blocked = imo_multi_filter(imo, ifp,
493 (struct sockaddr *)&group,
494 (struct sockaddr *)&udp_in);
495 if (blocked != MCAST_PASS) {
496 if (blocked == MCAST_NOTGMEMBER)
497 IPSTAT_INC(ips_notmember);
498 if (blocked == MCAST_NOTSMEMBER ||
499 blocked == MCAST_MUTED)
500 UDPSTAT_INC(udps_filtermcast);
501 INP_RUNLOCK(inp);
502 continue;
503 }
504 }
505 if (last != NULL) {
506 struct mbuf *n;
507
508 n = m_copy(m, 0, M_COPYALL);
509 up = intoudpcb(last);
510 if (up->u_tun_func == NULL) {
511 if (n != NULL)
512 udp_append(last,
513 ip, n,
514 iphlen +
515 sizeof(struct udphdr),
516 &udp_in);
517 } else {
518 /*
519 * Engage the tunneling protocol we
520 * will have to leave the info_lock
521 * up, since we are hunting through
522 * multiple UDP's.
523 */
524
525 (*up->u_tun_func)(n, iphlen, last);
526 }
527 INP_RUNLOCK(last);
528 }
529 last = inp;
530 /*
531 * Don't look for additional matches if this one does
532 * not have either the SO_REUSEPORT or SO_REUSEADDR
533 * socket options set. This heuristic avoids
534 * searching through all pcbs in the common case of a
535 * non-shared port. It assumes that an application
536 * will never clear these options after setting them.
537 */
538 if ((last->inp_socket->so_options &
539 (SO_REUSEPORT|SO_REUSEADDR)) == 0)
540 break;
541 }
542
543 if (last == NULL) {
544 /*
545 * No matching pcb found; discard datagram. (No need
546 * to send an ICMP Port Unreachable for a broadcast
547 * or multicast datgram.)
548 */
549 UDPSTAT_INC(udps_noportbcast);
550 goto badheadlocked;
551 }
552 up = intoudpcb(last);
553 if (up->u_tun_func == NULL) {
554 udp_append(last, ip, m, iphlen + sizeof(struct udphdr),
555 &udp_in);
556 } else {
557 /*
558 * Engage the tunneling protocol.
559 */
560 (*up->u_tun_func)(m, iphlen, last);
561 }
562 INP_RUNLOCK(last);
563 INP_INFO_RUNLOCK(&V_udbinfo);
564 return;
565 }
566
567 /*
568 * Locate pcb for datagram.
569 */
570 inp = in_pcblookup_hash(&V_udbinfo, ip->ip_src, uh->uh_sport,
571 ip->ip_dst, uh->uh_dport, 1, ifp);
572 if (inp == NULL) {
573 if (udp_log_in_vain) {
574 char buf[4*sizeof "123"];
575
576 strcpy(buf, inet_ntoa(ip->ip_dst));
577 log(LOG_INFO,
578 "Connection attempt to UDP %s:%d from %s:%d\n",
579 buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src),
580 ntohs(uh->uh_sport));
581 }
582 UDPSTAT_INC(udps_noport);
583 if (m->m_flags & (M_BCAST | M_MCAST)) {
584 UDPSTAT_INC(udps_noportbcast);
585 goto badheadlocked;
586 }
587 if (V_udp_blackhole)
588 goto badheadlocked;
589 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0)
590 goto badheadlocked;
591 *ip = save_ip;
592 ip->ip_len += iphlen;
593 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
594 INP_INFO_RUNLOCK(&V_udbinfo);
595 return;
596 }
597
598 /*
599 * Check the minimum TTL for socket.
600 */
601 INP_RLOCK(inp);
602 INP_INFO_RUNLOCK(&V_udbinfo);
603 if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) {
604 INP_RUNLOCK(inp);
605 goto badunlocked;
606 }
607 up = intoudpcb(inp);
608 if (up->u_tun_func == NULL) {
609 udp_append(inp, ip, m, iphlen + sizeof(struct udphdr), &udp_in);
610 } else {
611 /*
612 * Engage the tunneling protocol.
613 */
614
615 (*up->u_tun_func)(m, iphlen, inp);
616 }
617 INP_RUNLOCK(inp);
618 return;
619
620badheadlocked:
621 if (inp)
622 INP_RUNLOCK(inp);
623 INP_INFO_RUNLOCK(&V_udbinfo);
624badunlocked:
625 m_freem(m);
626}
627
628/*
629 * Notify a udp user of an asynchronous error; just wake up so that they can
630 * collect error status.
631 */
632struct inpcb *
633udp_notify(struct inpcb *inp, int errno)
634{
635
636 /*
637 * While udp_ctlinput() always calls udp_notify() with a read lock
638 * when invoking it directly, in_pcbnotifyall() currently uses write
639 * locks due to sharing code with TCP. For now, accept either a read
640 * or a write lock, but a read lock is sufficient.
641 */
642 INP_LOCK_ASSERT(inp);
643
644 inp->inp_socket->so_error = errno;
645 sorwakeup(inp->inp_socket);
646 sowwakeup(inp->inp_socket);
647 return (inp);
648}
649
650void
651udp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
652{
653 struct ip *ip = vip;
654 struct udphdr *uh;
655 struct in_addr faddr;
656 struct inpcb *inp;
657
658 faddr = ((struct sockaddr_in *)sa)->sin_addr;
659 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
660 return;
661
662 /*
663 * Redirects don't need to be handled up here.
664 */
665 if (PRC_IS_REDIRECT(cmd))
666 return;
667
668 /*
669 * Hostdead is ugly because it goes linearly through all PCBs.
670 *
671 * XXX: We never get this from ICMP, otherwise it makes an excellent
672 * DoS attack on machines with many connections.
673 */
674 if (cmd == PRC_HOSTDEAD)
675 ip = NULL;
676 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
677 return;
678 if (ip != NULL) {
679 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
680 INP_INFO_RLOCK(&V_udbinfo);
681 inp = in_pcblookup_hash(&V_udbinfo, faddr, uh->uh_dport,
682 ip->ip_src, uh->uh_sport, 0, NULL);
683 if (inp != NULL) {
684 INP_RLOCK(inp);
685 if (inp->inp_socket != NULL) {
686 udp_notify(inp, inetctlerrmap[cmd]);
687 }
688 INP_RUNLOCK(inp);
689 }
690 INP_INFO_RUNLOCK(&V_udbinfo);
691 } else
692 in_pcbnotifyall(&V_udbinfo, faddr, inetctlerrmap[cmd],
693 udp_notify);
694}
695
696static int
697udp_pcblist(SYSCTL_HANDLER_ARGS)
698{
699 int error, i, n;
700 struct inpcb *inp, **inp_list;
701 inp_gen_t gencnt;
702 struct xinpgen xig;
703
704 /*
705 * The process of preparing the PCB list is too time-consuming and
706 * resource-intensive to repeat twice on every request.
707 */
708 if (req->oldptr == 0) {
709 n = V_udbinfo.ipi_count;
710 req->oldidx = 2 * (sizeof xig)
711 + (n + n/8) * sizeof(struct xinpcb);
712 return (0);
713 }
714
715 if (req->newptr != 0)
716 return (EPERM);
717
718 /*
719 * OK, now we're committed to doing something.
720 */
721 INP_INFO_RLOCK(&V_udbinfo);
722 gencnt = V_udbinfo.ipi_gencnt;
723 n = V_udbinfo.ipi_count;
724 INP_INFO_RUNLOCK(&V_udbinfo);
725
726 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
727 + n * sizeof(struct xinpcb));
728 if (error != 0)
729 return (error);
730
731 xig.xig_len = sizeof xig;
732 xig.xig_count = n;
733 xig.xig_gen = gencnt;
734 xig.xig_sogen = so_gencnt;
735 error = SYSCTL_OUT(req, &xig, sizeof xig);
736 if (error)
737 return (error);
738
739 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
740 if (inp_list == 0)
741 return (ENOMEM);
742
743 INP_INFO_RLOCK(&V_udbinfo);
744 for (inp = LIST_FIRST(V_udbinfo.ipi_listhead), i = 0; inp && i < n;
745 inp = LIST_NEXT(inp, inp_list)) {
746 INP_WLOCK(inp);
747 if (inp->inp_gencnt <= gencnt &&
748 cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
749 in_pcbref(inp);
750 inp_list[i++] = inp;
751 }
752 INP_WUNLOCK(inp);
753 }
754 INP_INFO_RUNLOCK(&V_udbinfo);
755 n = i;
756
757 error = 0;
758 for (i = 0; i < n; i++) {
759 inp = inp_list[i];
760 INP_RLOCK(inp);
761 if (inp->inp_gencnt <= gencnt) {
762 struct xinpcb xi;
763
764 bzero(&xi, sizeof(xi));
765 xi.xi_len = sizeof xi;
766 /* XXX should avoid extra copy */
767 bcopy(inp, &xi.xi_inp, sizeof *inp);
768 if (inp->inp_socket)
769 sotoxsocket(inp->inp_socket, &xi.xi_socket);
770 xi.xi_inp.inp_gencnt = inp->inp_gencnt;
771 INP_RUNLOCK(inp);
772 error = SYSCTL_OUT(req, &xi, sizeof xi);
773 } else
774 INP_RUNLOCK(inp);
775 }
776 INP_INFO_WLOCK(&V_udbinfo);
777 for (i = 0; i < n; i++) {
778 inp = inp_list[i];
779 INP_WLOCK(inp);
780 if (!in_pcbrele(inp))
781 INP_WUNLOCK(inp);
782 }
783 INP_INFO_WUNLOCK(&V_udbinfo);
784
785 if (!error) {
786 /*
787 * Give the user an updated idea of our state. If the
788 * generation differs from what we told her before, she knows
789 * that something happened while we were processing this
790 * request, and it might be necessary to retry.
791 */
792 INP_INFO_RLOCK(&V_udbinfo);
793 xig.xig_gen = V_udbinfo.ipi_gencnt;
794 xig.xig_sogen = so_gencnt;
795 xig.xig_count = V_udbinfo.ipi_count;
796 INP_INFO_RUNLOCK(&V_udbinfo);
797 error = SYSCTL_OUT(req, &xig, sizeof xig);
798 }
799 free(inp_list, M_TEMP);
800 return (error);
801}
802
803SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
804 udp_pcblist, "S,xinpcb", "List of active UDP sockets");
805
806static int
807udp_getcred(SYSCTL_HANDLER_ARGS)
808{
809 struct xucred xuc;
810 struct sockaddr_in addrs[2];
811 struct inpcb *inp;
812 int error;
813
814 error = priv_check(req->td, PRIV_NETINET_GETCRED);
815 if (error)
816 return (error);
817 error = SYSCTL_IN(req, addrs, sizeof(addrs));
818 if (error)
819 return (error);
820 INP_INFO_RLOCK(&V_udbinfo);
821 inp = in_pcblookup_hash(&V_udbinfo, addrs[1].sin_addr, addrs[1].sin_port,
822 addrs[0].sin_addr, addrs[0].sin_port, 1, NULL);
823 if (inp != NULL) {
824 INP_RLOCK(inp);
825 INP_INFO_RUNLOCK(&V_udbinfo);
826 if (inp->inp_socket == NULL)
827 error = ENOENT;
828 if (error == 0)
829 error = cr_canseeinpcb(req->td->td_ucred, inp);
830 if (error == 0)
831 cru2x(inp->inp_cred, &xuc);
832 INP_RUNLOCK(inp);
833 } else {
834 INP_INFO_RUNLOCK(&V_udbinfo);
835 error = ENOENT;
836 }
837 if (error == 0)
838 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
839 return (error);
840}
841
842SYSCTL_PROC(_net_inet_udp, OID_AUTO, getcred,
843 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
844 udp_getcred, "S,xucred", "Get the xucred of a UDP connection");
845
846int
847udp_ctloutput(struct socket *so, struct sockopt *sopt)
848{
849 int error = 0, optval;
850 struct inpcb *inp;
851#ifdef IPSEC_NAT_T
852 struct udpcb *up;
853#endif
854
855 inp = sotoinpcb(so);
856 KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
857 INP_WLOCK(inp);
858 if (sopt->sopt_level != IPPROTO_UDP) {
859#ifdef INET6
860 if (INP_CHECK_SOCKAF(so, AF_INET6)) {
861 INP_WUNLOCK(inp);
862 error = ip6_ctloutput(so, sopt);
863 } else {
864#endif
865 INP_WUNLOCK(inp);
866 error = ip_ctloutput(so, sopt);
867#ifdef INET6
868 }
869#endif
870 return (error);
871 }
872
873 switch (sopt->sopt_dir) {
874 case SOPT_SET:
875 switch (sopt->sopt_name) {
876 case UDP_ENCAP:
877 INP_WUNLOCK(inp);
878 error = sooptcopyin(sopt, &optval, sizeof optval,
879 sizeof optval);
880 if (error)
881 break;
882 inp = sotoinpcb(so);
883 KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
884 INP_WLOCK(inp);
885#ifdef IPSEC_NAT_T
886 up = intoudpcb(inp);
887 KASSERT(up != NULL, ("%s: up == NULL", __func__));
888#endif
889 switch (optval) {
890 case 0:
891 /* Clear all UDP encap. */
892#ifdef IPSEC_NAT_T
893 up->u_flags &= ~UF_ESPINUDP_ALL;
894#endif
895 break;
896#ifdef IPSEC_NAT_T
897 case UDP_ENCAP_ESPINUDP:
898 case UDP_ENCAP_ESPINUDP_NON_IKE:
899 up->u_flags &= ~UF_ESPINUDP_ALL;
900 if (optval == UDP_ENCAP_ESPINUDP)
901 up->u_flags |= UF_ESPINUDP;
902 else if (optval == UDP_ENCAP_ESPINUDP_NON_IKE)
903 up->u_flags |= UF_ESPINUDP_NON_IKE;
904 break;
905#endif
906 default:
907 error = EINVAL;
908 break;
909 }
910 INP_WUNLOCK(inp);
911 break;
912 default:
913 INP_WUNLOCK(inp);
914 error = ENOPROTOOPT;
915 break;
916 }
917 break;
918 case SOPT_GET:
919 switch (sopt->sopt_name) {
920#ifdef IPSEC_NAT_T
921 case UDP_ENCAP:
922 up = intoudpcb(inp);
923 KASSERT(up != NULL, ("%s: up == NULL", __func__));
924 optval = up->u_flags & UF_ESPINUDP_ALL;
925 INP_WUNLOCK(inp);
926 error = sooptcopyout(sopt, &optval, sizeof optval);
927 break;
928#endif
929 default:
930 INP_WUNLOCK(inp);
931 error = ENOPROTOOPT;
932 break;
933 }
934 break;
935 }
936 return (error);
937}
938
939static int
940udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
941 struct mbuf *control, struct thread *td)
942{
943 struct udpiphdr *ui;
944 int len = m->m_pkthdr.len;
945 struct in_addr faddr, laddr;
946 struct cmsghdr *cm;
947 struct sockaddr_in *sin, src;
948 int error = 0;
949 int ipflags;
950 u_short fport, lport;
951 int unlock_udbinfo;
952
953 /*
954 * udp_output() may need to temporarily bind or connect the current
955 * inpcb. As such, we don't know up front whether we will need the
956 * pcbinfo lock or not. Do any work to decide what is needed up
957 * front before acquiring any locks.
958 */
959 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
960 if (control)
961 m_freem(control);
962 m_freem(m);
963 return (EMSGSIZE);
964 }
965
966 src.sin_family = 0;
967 if (control != NULL) {
968 /*
969 * XXX: Currently, we assume all the optional information is
970 * stored in a single mbuf.
971 */
972 if (control->m_next) {
973 m_freem(control);
974 m_freem(m);
975 return (EINVAL);
976 }
977 for (; control->m_len > 0;
978 control->m_data += CMSG_ALIGN(cm->cmsg_len),
979 control->m_len -= CMSG_ALIGN(cm->cmsg_len)) {
980 cm = mtod(control, struct cmsghdr *);
981 if (control->m_len < sizeof(*cm) || cm->cmsg_len == 0
982 || cm->cmsg_len > control->m_len) {
983 error = EINVAL;
984 break;
985 }
986 if (cm->cmsg_level != IPPROTO_IP)
987 continue;
988
989 switch (cm->cmsg_type) {
990 case IP_SENDSRCADDR:
991 if (cm->cmsg_len !=
992 CMSG_LEN(sizeof(struct in_addr))) {
993 error = EINVAL;
994 break;
995 }
996 bzero(&src, sizeof(src));
997 src.sin_family = AF_INET;
998 src.sin_len = sizeof(src);
999 src.sin_port = inp->inp_lport;
1000 src.sin_addr =
1001 *(struct in_addr *)CMSG_DATA(cm);
1002 break;
1003
1004 default:
1005 error = ENOPROTOOPT;
1006 break;
1007 }
1008 if (error)
1009 break;
1010 }
1011 m_freem(control);
1012 }
1013 if (error) {
1014 m_freem(m);
1015 return (error);
1016 }
1017
1018 /*
1019 * Depending on whether or not the application has bound or connected
1020 * the socket, we may have to do varying levels of work. The optimal
1021 * case is for a connected UDP socket, as a global lock isn't
1022 * required at all.
1023 *
1024 * In order to decide which we need, we require stability of the
1025 * inpcb binding, which we ensure by acquiring a read lock on the
1026 * inpcb. This doesn't strictly follow the lock order, so we play
1027 * the trylock and retry game; note that we may end up with more
1028 * conservative locks than required the second time around, so later
1029 * assertions have to accept that. Further analysis of the number of
1030 * misses under contention is required.
1031 */
1032 sin = (struct sockaddr_in *)addr;
1033 INP_RLOCK(inp);
1034 if (sin != NULL &&
1035 (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) {
1036 INP_RUNLOCK(inp);
1037 INP_INFO_WLOCK(&V_udbinfo);
1038 INP_WLOCK(inp);
1039 unlock_udbinfo = 2;
1040 } else if ((sin != NULL && (
1041 (sin->sin_addr.s_addr == INADDR_ANY) ||
1042 (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
1043 (inp->inp_laddr.s_addr == INADDR_ANY) ||
1044 (inp->inp_lport == 0))) ||
1045 (src.sin_family == AF_INET)) {
1046 if (!INP_INFO_TRY_RLOCK(&V_udbinfo)) {
1047 INP_RUNLOCK(inp);
1048 INP_INFO_RLOCK(&V_udbinfo);
1049 INP_RLOCK(inp);
1050 }
1051 unlock_udbinfo = 1;
1052 } else
1053 unlock_udbinfo = 0;
1054
1055 /*
1056 * If the IP_SENDSRCADDR control message was specified, override the
1057 * source address for this datagram. Its use is invalidated if the
1058 * address thus specified is incomplete or clobbers other inpcbs.
1059 */
1060 laddr = inp->inp_laddr;
1061 lport = inp->inp_lport;
1062 if (src.sin_family == AF_INET) {
1063 INP_INFO_LOCK_ASSERT(&V_udbinfo);
1064 if ((lport == 0) ||
1065 (laddr.s_addr == INADDR_ANY &&
1066 src.sin_addr.s_addr == INADDR_ANY)) {
1067 error = EINVAL;
1068 goto release;
1069 }
1070 error = in_pcbbind_setup(inp, (struct sockaddr *)&src,
1071 &laddr.s_addr, &lport, td->td_ucred);
1072 if (error)
1073 goto release;
1074 }
1075
1076 /*
1077 * If a UDP socket has been connected, then a local address/port will
1078 * have been selected and bound.
1079 *
1080 * If a UDP socket has not been connected to, then an explicit
1081 * destination address must be used, in which case a local
1082 * address/port may not have been selected and bound.
1083 */
1084 if (sin != NULL) {
1085 INP_LOCK_ASSERT(inp);
1086 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1087 error = EISCONN;
1088 goto release;
1089 }
1090
1091 /*
1092 * Jail may rewrite the destination address, so let it do
1093 * that before we use it.
1094 */
1095 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1096 if (error)
1097 goto release;
1098
1099 /*
1100 * If a local address or port hasn't yet been selected, or if
1101 * the destination address needs to be rewritten due to using
1102 * a special INADDR_ constant, invoke in_pcbconnect_setup()
1103 * to do the heavy lifting. Once a port is selected, we
1104 * commit the binding back to the socket; we also commit the
1105 * binding of the address if in jail.
1106 *
1107 * If we already have a valid binding and we're not
1108 * requesting a destination address rewrite, use a fast path.
1109 */
1110 if (inp->inp_laddr.s_addr == INADDR_ANY ||
1111 inp->inp_lport == 0 ||
1112 sin->sin_addr.s_addr == INADDR_ANY ||
1113 sin->sin_addr.s_addr == INADDR_BROADCAST) {
1114 INP_INFO_LOCK_ASSERT(&V_udbinfo);
1115 error = in_pcbconnect_setup(inp, addr, &laddr.s_addr,
1116 &lport, &faddr.s_addr, &fport, NULL,
1117 td->td_ucred);
1118 if (error)
1119 goto release;
1120
1121 /*
1122 * XXXRW: Why not commit the port if the address is
1123 * !INADDR_ANY?
1124 */
1125 /* Commit the local port if newly assigned. */
1126 if (inp->inp_laddr.s_addr == INADDR_ANY &&
1127 inp->inp_lport == 0) {
1128 INP_INFO_WLOCK_ASSERT(&V_udbinfo);
1129 INP_WLOCK_ASSERT(inp);
1130 /*
1131 * Remember addr if jailed, to prevent
1132 * rebinding.
1133 */
1134 if (prison_flag(td->td_ucred, PR_IP4))
1135 inp->inp_laddr = laddr;
1136 inp->inp_lport = lport;
1137 if (in_pcbinshash(inp) != 0) {
1138 inp->inp_lport = 0;
1139 error = EAGAIN;
1140 goto release;
1141 }
1142 inp->inp_flags |= INP_ANONPORT;
1143 }
1144 } else {
1145 faddr = sin->sin_addr;
1146 fport = sin->sin_port;
1147 }
1148 } else {
1149 INP_LOCK_ASSERT(inp);
1150 faddr = inp->inp_faddr;
1151 fport = inp->inp_fport;
1152 if (faddr.s_addr == INADDR_ANY) {
1153 error = ENOTCONN;
1154 goto release;
1155 }
1156 }
1157
1158 /*
1159 * Calculate data length and get a mbuf for UDP, IP, and possible
1160 * link-layer headers. Immediate slide the data pointer back forward
1161 * since we won't use that space at this layer.
1162 */
1163 M_PREPEND(m, sizeof(struct udpiphdr) + max_linkhdr, M_DONTWAIT);
1164 if (m == NULL) {
1165 error = ENOBUFS;
1166 goto release;
1167 }
1168 m->m_data += max_linkhdr;
1169 m->m_len -= max_linkhdr;
1170 m->m_pkthdr.len -= max_linkhdr;
1171
1172 /*
1173 * Fill in mbuf with extended UDP header and addresses and length put
1174 * into network format.
1175 */
1176 ui = mtod(m, struct udpiphdr *);
1177 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */
1178 ui->ui_pr = IPPROTO_UDP;
1179 ui->ui_src = laddr;
1180 ui->ui_dst = faddr;
1181 ui->ui_sport = lport;
1182 ui->ui_dport = fport;
1183 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1184
1185 /*
1186 * Set the Don't Fragment bit in the IP header.
1187 */
1188 if (inp->inp_flags & INP_DONTFRAG) {
1189 struct ip *ip;
1190
1191 ip = (struct ip *)&ui->ui_i;
1192 ip->ip_off |= IP_DF;
1193 }
1194
1195 ipflags = 0;
1196 if (inp->inp_socket->so_options & SO_DONTROUTE)
1197 ipflags |= IP_ROUTETOIF;
1198 if (inp->inp_socket->so_options & SO_BROADCAST)
1199 ipflags |= IP_ALLOWBROADCAST;
1200 if (inp->inp_flags & INP_ONESBCAST)
1201 ipflags |= IP_SENDONES;
1202
1203#ifdef MAC
1204 mac_inpcb_create_mbuf(inp, m);
1205#endif
1206
1207 /*
1208 * Set up checksum and output datagram.
1209 */
1210 if (udp_cksum) {
1211 if (inp->inp_flags & INP_ONESBCAST)
1212 faddr.s_addr = INADDR_BROADCAST;
1213 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, faddr.s_addr,
1214 htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
1215 m->m_pkthdr.csum_flags = CSUM_UDP;
1216 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1217 } else
1218 ui->ui_sum = 0;
1219 ((struct ip *)ui)->ip_len = sizeof (struct udpiphdr) + len;
1220 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */
1221 ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */
1222 UDPSTAT_INC(udps_opackets);
1223
1224 if (unlock_udbinfo == 2)
1225 INP_INFO_WUNLOCK(&V_udbinfo);
1226 else if (unlock_udbinfo == 1)
1227 INP_INFO_RUNLOCK(&V_udbinfo);
1228 error = ip_output(m, inp->inp_options, NULL, ipflags,
1229 inp->inp_moptions, inp);
1230 if (unlock_udbinfo == 2)
1231 INP_WUNLOCK(inp);
1232 else
1233 INP_RUNLOCK(inp);
1234 return (error);
1235
1236release:
1237 if (unlock_udbinfo == 2) {
1238 INP_WUNLOCK(inp);
1239 INP_INFO_WUNLOCK(&V_udbinfo);
1240 } else if (unlock_udbinfo == 1) {
1241 INP_RUNLOCK(inp);
1242 INP_INFO_RUNLOCK(&V_udbinfo);
1243 } else
1244 INP_RUNLOCK(inp);
1245 m_freem(m);
1246 return (error);
1247}
1248
1249
1250#if defined(IPSEC) && defined(IPSEC_NAT_T)
1251#ifdef INET
1252/*
1253 * Potentially decap ESP in UDP frame. Check for an ESP header
1254 * and optional marker; if present, strip the UDP header and
1255 * push the result through IPSec.
1256 *
1257 * Returns mbuf to be processed (potentially re-allocated) or
1258 * NULL if consumed and/or processed.
1259 */
1260static struct mbuf *
1261udp4_espdecap(struct inpcb *inp, struct mbuf *m, int off)
1262{
1263 size_t minlen, payload, skip, iphlen;
1264 caddr_t data;
1265 struct udpcb *up;
1266 struct m_tag *tag;
1267 struct udphdr *udphdr;
1268 struct ip *ip;
1269
1270 INP_RLOCK_ASSERT(inp);
1271
1272 /*
1273 * Pull up data so the longest case is contiguous:
1274 * IP/UDP hdr + non ESP marker + ESP hdr.
1275 */
1276 minlen = off + sizeof(uint64_t) + sizeof(struct esp);
1277 if (minlen > m->m_pkthdr.len)
1278 minlen = m->m_pkthdr.len;
1279 if ((m = m_pullup(m, minlen)) == NULL) {
1280 V_ipsec4stat.in_inval++;
1281 return (NULL); /* Bypass caller processing. */
1282 }
1283 data = mtod(m, caddr_t); /* Points to ip header. */
1284 payload = m->m_len - off; /* Size of payload. */
1285
1286 if (payload == 1 && data[off] == '\xff')
1287 return (m); /* NB: keepalive packet, no decap. */
1288
1289 up = intoudpcb(inp);
1290 KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
1291 KASSERT((up->u_flags & UF_ESPINUDP_ALL) != 0,
1292 ("u_flags 0x%x", up->u_flags));
1293
1294 /*
1295 * Check that the payload is large enough to hold an
1296 * ESP header and compute the amount of data to remove.
1297 *
1298 * NB: the caller has already done a pullup for us.
1299 * XXX can we assume alignment and eliminate bcopys?
1300 */
1301 if (up->u_flags & UF_ESPINUDP_NON_IKE) {
1302 /*
1303 * draft-ietf-ipsec-nat-t-ike-0[01].txt and
1304 * draft-ietf-ipsec-udp-encaps-(00/)01.txt, ignoring
1305 * possible AH mode non-IKE marker+non-ESP marker
1306 * from draft-ietf-ipsec-udp-encaps-00.txt.
1307 */
1308 uint64_t marker;
1309
1310 if (payload <= sizeof(uint64_t) + sizeof(struct esp))
1311 return (m); /* NB: no decap. */
1312 bcopy(data + off, &marker, sizeof(uint64_t));
1313 if (marker != 0) /* Non-IKE marker. */
1314 return (m); /* NB: no decap. */
1315 skip = sizeof(uint64_t) + sizeof(struct udphdr);
1316 } else {
1317 uint32_t spi;
1318
1319 if (payload <= sizeof(struct esp)) {
1320 V_ipsec4stat.in_inval++;
1321 m_freem(m);
1322 return (NULL); /* Discard. */
1323 }
1324 bcopy(data + off, &spi, sizeof(uint32_t));
1325 if (spi == 0) /* Non-ESP marker. */
1326 return (m); /* NB: no decap. */
1327 skip = sizeof(struct udphdr);
1328 }
1329
1330 /*
1331 * Setup a PACKET_TAG_IPSEC_NAT_T_PORT tag to remember
1332 * the UDP ports. This is required if we want to select
1333 * the right SPD for multiple hosts behind same NAT.
1334 *
1335 * NB: ports are maintained in network byte order everywhere
1336 * in the NAT-T code.
1337 */
1338 tag = m_tag_get(PACKET_TAG_IPSEC_NAT_T_PORTS,
1339 2 * sizeof(uint16_t), M_NOWAIT);
1340 if (tag == NULL) {
1341 V_ipsec4stat.in_nomem++;
1342 m_freem(m);
1343 return (NULL); /* Discard. */
1344 }
1345 iphlen = off - sizeof(struct udphdr);
1346 udphdr = (struct udphdr *)(data + iphlen);
1347 ((uint16_t *)(tag + 1))[0] = udphdr->uh_sport;
1348 ((uint16_t *)(tag + 1))[1] = udphdr->uh_dport;
1349 m_tag_prepend(m, tag);
1350
1351 /*
1352 * Remove the UDP header (and possibly the non ESP marker)
1353 * IP header length is iphlen
1354 * Before:
1355 * <--- off --->
1356 * +----+------+-----+
1357 * | IP | UDP | ESP |
1358 * +----+------+-----+
1359 * <-skip->
1360 * After:
1361 * +----+-----+
1362 * | IP | ESP |
1363 * +----+-----+
1364 * <-skip->
1365 */
1366 ovbcopy(data, data + skip, iphlen);
1367 m_adj(m, skip);
1368
1369 ip = mtod(m, struct ip *);
1370 ip->ip_len -= skip;
1371 ip->ip_p = IPPROTO_ESP;
1372
1373 /*
1374 * We cannot yet update the cksums so clear any
1375 * h/w cksum flags as they are no longer valid.
1376 */
1377 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)
1378 m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
1379
1380 (void) ipsec4_common_input(m, iphlen, ip->ip_p);
1381 return (NULL); /* NB: consumed, bypass processing. */
1382}
1383#endif /* INET */
1384#endif /* defined(IPSEC) && defined(IPSEC_NAT_T) */
1385
1386static void
1387udp_abort(struct socket *so)
1388{
1389 struct inpcb *inp;
1390
1391 inp = sotoinpcb(so);
1392 KASSERT(inp != NULL, ("udp_abort: inp == NULL"));
1393 INP_INFO_WLOCK(&V_udbinfo);
1394 INP_WLOCK(inp);
1395 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1396 in_pcbdisconnect(inp);
1397 inp->inp_laddr.s_addr = INADDR_ANY;
1398 soisdisconnected(so);
1399 }
1400 INP_WUNLOCK(inp);
1401 INP_INFO_WUNLOCK(&V_udbinfo);
1402}
1403
1404static int
1405udp_attach(struct socket *so, int proto, struct thread *td)
1406{
1407 struct inpcb *inp;
1408 int error;
1409
1410 inp = sotoinpcb(so);
1411 KASSERT(inp == NULL, ("udp_attach: inp != NULL"));
1412 error = soreserve(so, udp_sendspace, udp_recvspace);
1413 if (error)
1414 return (error);
1415 INP_INFO_WLOCK(&V_udbinfo);
1416 error = in_pcballoc(so, &V_udbinfo);
1417 if (error) {
1418 INP_INFO_WUNLOCK(&V_udbinfo);
1419 return (error);
1420 }
1421
1422 inp = sotoinpcb(so);
1423 inp->inp_vflag |= INP_IPV4;
1424 inp->inp_ip_ttl = V_ip_defttl;
1425
1426 error = udp_newudpcb(inp);
1427 if (error) {
1428 in_pcbdetach(inp);
1429 in_pcbfree(inp);
1430 INP_INFO_WUNLOCK(&V_udbinfo);
1431 return (error);
1432 }
1433
1434 INP_WUNLOCK(inp);
1435 INP_INFO_WUNLOCK(&V_udbinfo);
1436 return (0);
1437}
1438
1439int
1440udp_set_kernel_tunneling(struct socket *so, udp_tun_func_t f)
1441{
1442 struct inpcb *inp;
1443 struct udpcb *up;
1444
1445 KASSERT(so->so_type == SOCK_DGRAM,
1446 ("udp_set_kernel_tunneling: !dgram"));
1447 inp = sotoinpcb(so);
1448 KASSERT(inp != NULL, ("udp_set_kernel_tunneling: inp == NULL"));
1449 INP_WLOCK(inp);
1450 up = intoudpcb(inp);
1451 if (up->u_tun_func != NULL) {
1452 INP_WUNLOCK(inp);
1453 return (EBUSY);
1454 }
1455 up->u_tun_func = f;
1456 INP_WUNLOCK(inp);
1457 return (0);
1458}
1459
1460static int
1461udp_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
1462{
1463 struct inpcb *inp;
1464 int error;
1465
1466 inp = sotoinpcb(so);
1467 KASSERT(inp != NULL, ("udp_bind: inp == NULL"));
1468 INP_INFO_WLOCK(&V_udbinfo);
1469 INP_WLOCK(inp);
1470 error = in_pcbbind(inp, nam, td->td_ucred);
1471 INP_WUNLOCK(inp);
1472 INP_INFO_WUNLOCK(&V_udbinfo);
1473 return (error);
1474}
1475
1476static void
1477udp_close(struct socket *so)
1478{
1479 struct inpcb *inp;
1480
1481 inp = sotoinpcb(so);
1482 KASSERT(inp != NULL, ("udp_close: inp == NULL"));
1483 INP_INFO_WLOCK(&V_udbinfo);
1484 INP_WLOCK(inp);
1485 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1486 in_pcbdisconnect(inp);
1487 inp->inp_laddr.s_addr = INADDR_ANY;
1488 soisdisconnected(so);
1489 }
1490 INP_WUNLOCK(inp);
1491 INP_INFO_WUNLOCK(&V_udbinfo);
1492}
1493
1494static int
1495udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1496{
1497 struct inpcb *inp;
1498 int error;
1499 struct sockaddr_in *sin;
1500
1501 inp = sotoinpcb(so);
1502 KASSERT(inp != NULL, ("udp_connect: inp == NULL"));
1503 INP_INFO_WLOCK(&V_udbinfo);
1504 INP_WLOCK(inp);
1505 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1506 INP_WUNLOCK(inp);
1507 INP_INFO_WUNLOCK(&V_udbinfo);
1508 return (EISCONN);
1509 }
1510 sin = (struct sockaddr_in *)nam;
1511 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1512 if (error != 0) {
1513 INP_WUNLOCK(inp);
1514 INP_INFO_WUNLOCK(&V_udbinfo);
1515 return (error);
1516 }
1517 error = in_pcbconnect(inp, nam, td->td_ucred);
1518 if (error == 0)
1519 soisconnected(so);
1520 INP_WUNLOCK(inp);
1521 INP_INFO_WUNLOCK(&V_udbinfo);
1522 return (error);
1523}
1524
1525static void
1526udp_detach(struct socket *so)
1527{
1528 struct inpcb *inp;
1529 struct udpcb *up;
1530
1531 inp = sotoinpcb(so);
1532 KASSERT(inp != NULL, ("udp_detach: inp == NULL"));
1533 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
1534 ("udp_detach: not disconnected"));
1535 INP_INFO_WLOCK(&V_udbinfo);
1536 INP_WLOCK(inp);
1537 up = intoudpcb(inp);
1538 KASSERT(up != NULL, ("%s: up == NULL", __func__));
1539 inp->inp_ppcb = NULL;
1540 in_pcbdetach(inp);
1541 in_pcbfree(inp);
1542 INP_INFO_WUNLOCK(&V_udbinfo);
1543 udp_discardcb(up);
1544}
1545
1546static int
1547udp_disconnect(struct socket *so)
1548{
1549 struct inpcb *inp;
1550
1551 inp = sotoinpcb(so);
1552 KASSERT(inp != NULL, ("udp_disconnect: inp == NULL"));
1553 INP_INFO_WLOCK(&V_udbinfo);
1554 INP_WLOCK(inp);
1555 if (inp->inp_faddr.s_addr == INADDR_ANY) {
1556 INP_WUNLOCK(inp);
1557 INP_INFO_WUNLOCK(&V_udbinfo);
1558 return (ENOTCONN);
1559 }
1560
1561 in_pcbdisconnect(inp);
1562 inp->inp_laddr.s_addr = INADDR_ANY;
1563 SOCK_LOCK(so);
1564 so->so_state &= ~SS_ISCONNECTED; /* XXX */
1565 SOCK_UNLOCK(so);
1566 INP_WUNLOCK(inp);
1567 INP_INFO_WUNLOCK(&V_udbinfo);
1568 return (0);
1569}
1570
1571static int
1572udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
1573 struct mbuf *control, struct thread *td)
1574{
1575 struct inpcb *inp;
1576
1577 inp = sotoinpcb(so);
1578 KASSERT(inp != NULL, ("udp_send: inp == NULL"));
1579 return (udp_output(inp, m, addr, control, td));
1580}
1581
1582int
1583udp_shutdown(struct socket *so)
1584{
1585 struct inpcb *inp;
1586
1587 inp = sotoinpcb(so);
1588 KASSERT(inp != NULL, ("udp_shutdown: inp == NULL"));
1589 INP_WLOCK(inp);
1590 socantsendmore(so);
1591 INP_WUNLOCK(inp);
1592 return (0);
1593}
1594
1595struct pr_usrreqs udp_usrreqs = {
1596 .pru_abort = udp_abort,
1597 .pru_attach = udp_attach,
1598 .pru_bind = udp_bind,
1599 .pru_connect = udp_connect,
1600 .pru_control = in_control,
1601 .pru_detach = udp_detach,
1602 .pru_disconnect = udp_disconnect,
1603 .pru_peeraddr = in_getpeeraddr,
1604 .pru_send = udp_send,
1605 .pru_soreceive = soreceive_dgram,
1606 .pru_sosend = sosend_dgram,
1607 .pru_shutdown = udp_shutdown,
1608 .pru_sockaddr = in_getsockaddr,
1609 .pru_sosetlabel = in_pcbsosetlabel,
1610 .pru_close = udp_close,
1611};