Deleted Added
full compact
udp_usrreq.c (215317) udp_usrreq.c (215701)
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California.
4 * Copyright (c) 2008 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California.
4 * Copyright (c) 2008 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/netinet/udp_usrreq.c 215317 2010-11-14 20:38:11Z dim $");
35__FBSDID("$FreeBSD: head/sys/netinet/udp_usrreq.c 215701 2010-11-22 19:32:54Z dim $");
36
37#include "opt_ipfw.h"
38#include "opt_inet6.h"
39#include "opt_ipsec.h"
40
41#include <sys/param.h>
42#include <sys/domain.h>
43#include <sys/eventhandler.h>
44#include <sys/jail.h>
45#include <sys/kernel.h>
46#include <sys/lock.h>
47#include <sys/malloc.h>
48#include <sys/mbuf.h>
49#include <sys/priv.h>
50#include <sys/proc.h>
51#include <sys/protosw.h>
52#include <sys/signalvar.h>
53#include <sys/socket.h>
54#include <sys/socketvar.h>
55#include <sys/sx.h>
56#include <sys/sysctl.h>
57#include <sys/syslog.h>
58#include <sys/systm.h>
59
60#include <vm/uma.h>
61
62#include <net/if.h>
63#include <net/route.h>
64
65#include <netinet/in.h>
66#include <netinet/in_pcb.h>
67#include <netinet/in_systm.h>
68#include <netinet/in_var.h>
69#include <netinet/ip.h>
70#ifdef INET6
71#include <netinet/ip6.h>
72#endif
73#include <netinet/ip_icmp.h>
74#include <netinet/icmp_var.h>
75#include <netinet/ip_var.h>
76#include <netinet/ip_options.h>
77#ifdef INET6
78#include <netinet6/ip6_var.h>
79#endif
80#include <netinet/udp.h>
81#include <netinet/udp_var.h>
82
83#ifdef IPSEC
84#include <netipsec/ipsec.h>
85#include <netipsec/esp.h>
86#endif
87
88#include <machine/in_cksum.h>
89
90#include <security/mac/mac_framework.h>
91
92/*
93 * UDP protocol implementation.
94 * Per RFC 768, August, 1980.
95 */
96
97/*
98 * BSD 4.2 defaulted the udp checksum to be off. Turning off udp checksums
99 * removes the only data integrity mechanism for packets and malformed
100 * packets that would otherwise be discarded due to bad checksums, and may
101 * cause problems (especially for NFS data blocks).
102 */
103static int udp_cksum = 1;
104SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW, &udp_cksum,
105 0, "compute udp checksum");
106
107int udp_log_in_vain = 0;
108SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
109 &udp_log_in_vain, 0, "Log all incoming UDP packets");
110
111VNET_DEFINE(int, udp_blackhole) = 0;
112SYSCTL_VNET_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW,
113 &VNET_NAME(udp_blackhole), 0,
114 "Do not send port unreachables for refused connects");
115
116u_long udp_sendspace = 9216; /* really max datagram size */
117 /* 40 1K datagrams */
118SYSCTL_ULONG(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW,
119 &udp_sendspace, 0, "Maximum outgoing UDP datagram size");
120
121u_long udp_recvspace = 40 * (1024 +
122#ifdef INET6
123 sizeof(struct sockaddr_in6)
124#else
125 sizeof(struct sockaddr_in)
126#endif
127 );
128
129SYSCTL_ULONG(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
130 &udp_recvspace, 0, "Maximum space for incoming UDP datagrams");
131
132VNET_DEFINE(struct inpcbhead, udb); /* from udp_var.h */
133VNET_DEFINE(struct inpcbinfo, udbinfo);
36
37#include "opt_ipfw.h"
38#include "opt_inet6.h"
39#include "opt_ipsec.h"
40
41#include <sys/param.h>
42#include <sys/domain.h>
43#include <sys/eventhandler.h>
44#include <sys/jail.h>
45#include <sys/kernel.h>
46#include <sys/lock.h>
47#include <sys/malloc.h>
48#include <sys/mbuf.h>
49#include <sys/priv.h>
50#include <sys/proc.h>
51#include <sys/protosw.h>
52#include <sys/signalvar.h>
53#include <sys/socket.h>
54#include <sys/socketvar.h>
55#include <sys/sx.h>
56#include <sys/sysctl.h>
57#include <sys/syslog.h>
58#include <sys/systm.h>
59
60#include <vm/uma.h>
61
62#include <net/if.h>
63#include <net/route.h>
64
65#include <netinet/in.h>
66#include <netinet/in_pcb.h>
67#include <netinet/in_systm.h>
68#include <netinet/in_var.h>
69#include <netinet/ip.h>
70#ifdef INET6
71#include <netinet/ip6.h>
72#endif
73#include <netinet/ip_icmp.h>
74#include <netinet/icmp_var.h>
75#include <netinet/ip_var.h>
76#include <netinet/ip_options.h>
77#ifdef INET6
78#include <netinet6/ip6_var.h>
79#endif
80#include <netinet/udp.h>
81#include <netinet/udp_var.h>
82
83#ifdef IPSEC
84#include <netipsec/ipsec.h>
85#include <netipsec/esp.h>
86#endif
87
88#include <machine/in_cksum.h>
89
90#include <security/mac/mac_framework.h>
91
92/*
93 * UDP protocol implementation.
94 * Per RFC 768, August, 1980.
95 */
96
97/*
98 * BSD 4.2 defaulted the udp checksum to be off. Turning off udp checksums
99 * removes the only data integrity mechanism for packets and malformed
100 * packets that would otherwise be discarded due to bad checksums, and may
101 * cause problems (especially for NFS data blocks).
102 */
103static int udp_cksum = 1;
104SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW, &udp_cksum,
105 0, "compute udp checksum");
106
107int udp_log_in_vain = 0;
108SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
109 &udp_log_in_vain, 0, "Log all incoming UDP packets");
110
111VNET_DEFINE(int, udp_blackhole) = 0;
112SYSCTL_VNET_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW,
113 &VNET_NAME(udp_blackhole), 0,
114 "Do not send port unreachables for refused connects");
115
116u_long udp_sendspace = 9216; /* really max datagram size */
117 /* 40 1K datagrams */
118SYSCTL_ULONG(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW,
119 &udp_sendspace, 0, "Maximum outgoing UDP datagram size");
120
121u_long udp_recvspace = 40 * (1024 +
122#ifdef INET6
123 sizeof(struct sockaddr_in6)
124#else
125 sizeof(struct sockaddr_in)
126#endif
127 );
128
129SYSCTL_ULONG(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
130 &udp_recvspace, 0, "Maximum space for incoming UDP datagrams");
131
132VNET_DEFINE(struct inpcbhead, udb); /* from udp_var.h */
133VNET_DEFINE(struct inpcbinfo, udbinfo);
134STATIC_VNET_DEFINE(uma_zone_t, udpcb_zone);
134static VNET_DEFINE(uma_zone_t, udpcb_zone);
135#define V_udpcb_zone VNET(udpcb_zone)
136
137#ifndef UDBHASHSIZE
138#define UDBHASHSIZE 128
139#endif
140
141VNET_DEFINE(struct udpstat, udpstat); /* from udp_var.h */
142SYSCTL_VNET_STRUCT(_net_inet_udp, UDPCTL_STATS, stats, CTLFLAG_RW,
143 &VNET_NAME(udpstat), udpstat,
144 "UDP statistics (struct udpstat, netinet/udp_var.h)");
145
146static void udp_detach(struct socket *so);
147static int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
148 struct mbuf *, struct thread *);
149#ifdef IPSEC
150#ifdef IPSEC_NAT_T
151#define UF_ESPINUDP_ALL (UF_ESPINUDP_NON_IKE|UF_ESPINUDP)
152#ifdef INET
153static struct mbuf *udp4_espdecap(struct inpcb *, struct mbuf *, int);
154#endif
155#endif /* IPSEC_NAT_T */
156#endif /* IPSEC */
157
158static void
159udp_zone_change(void *tag)
160{
161
162 uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets);
163 uma_zone_set_max(V_udpcb_zone, maxsockets);
164}
165
166static int
167udp_inpcb_init(void *mem, int size, int flags)
168{
169 struct inpcb *inp;
170
171 inp = mem;
172 INP_LOCK_INIT(inp, "inp", "udpinp");
173 return (0);
174}
175
176void
177udp_init(void)
178{
179
180 in_pcbinfo_init(&V_udbinfo, "udp", &V_udb, UDBHASHSIZE, UDBHASHSIZE,
181 "udp_inpcb", udp_inpcb_init, NULL, UMA_ZONE_NOFREE);
182 V_udpcb_zone = uma_zcreate("udpcb", sizeof(struct udpcb),
183 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
184 uma_zone_set_max(V_udpcb_zone, maxsockets);
185 EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL,
186 EVENTHANDLER_PRI_ANY);
187}
188
189/*
190 * Kernel module interface for updating udpstat. The argument is an index
191 * into udpstat treated as an array of u_long. While this encodes the
192 * general layout of udpstat into the caller, it doesn't encode its location,
193 * so that future changes to add, for example, per-CPU stats support won't
194 * cause binary compatibility problems for kernel modules.
195 */
196void
197kmod_udpstat_inc(int statnum)
198{
199
200 (*((u_long *)&V_udpstat + statnum))++;
201}
202
203int
204udp_newudpcb(struct inpcb *inp)
205{
206 struct udpcb *up;
207
208 up = uma_zalloc(V_udpcb_zone, M_NOWAIT | M_ZERO);
209 if (up == NULL)
210 return (ENOBUFS);
211 inp->inp_ppcb = up;
212 return (0);
213}
214
215void
216udp_discardcb(struct udpcb *up)
217{
218
219 uma_zfree(V_udpcb_zone, up);
220}
221
222#ifdef VIMAGE
223void
224udp_destroy(void)
225{
226
227 in_pcbinfo_destroy(&V_udbinfo);
228 uma_zdestroy(V_udpcb_zone);
229}
230#endif
231
232/*
233 * Subroutine of udp_input(), which appends the provided mbuf chain to the
234 * passed pcb/socket. The caller must provide a sockaddr_in via udp_in that
235 * contains the source address. If the socket ends up being an IPv6 socket,
236 * udp_append() will convert to a sockaddr_in6 before passing the address
237 * into the socket code.
238 */
239static void
240udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
241 struct sockaddr_in *udp_in)
242{
243 struct sockaddr *append_sa;
244 struct socket *so;
245 struct mbuf *opts = 0;
246#ifdef INET6
247 struct sockaddr_in6 udp_in6;
248#endif
249#ifdef IPSEC
250#ifdef IPSEC_NAT_T
251#ifdef INET
252 struct udpcb *up;
253#endif
254#endif
255#endif
256
257 INP_RLOCK_ASSERT(inp);
258
259#ifdef IPSEC
260 /* Check AH/ESP integrity. */
261 if (ipsec4_in_reject(n, inp)) {
262 m_freem(n);
263 V_ipsec4stat.in_polvio++;
264 return;
265 }
266#ifdef IPSEC_NAT_T
267#ifdef INET
268 up = intoudpcb(inp);
269 KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
270 if (up->u_flags & UF_ESPINUDP_ALL) { /* IPSec UDP encaps. */
271 n = udp4_espdecap(inp, n, off);
272 if (n == NULL) /* Consumed. */
273 return;
274 }
275#endif /* INET */
276#endif /* IPSEC_NAT_T */
277#endif /* IPSEC */
278#ifdef MAC
279 if (mac_inpcb_check_deliver(inp, n) != 0) {
280 m_freem(n);
281 return;
282 }
283#endif
284 if (inp->inp_flags & INP_CONTROLOPTS ||
285 inp->inp_socket->so_options & (SO_TIMESTAMP | SO_BINTIME)) {
286#ifdef INET6
287 if (inp->inp_vflag & INP_IPV6)
288 (void)ip6_savecontrol_v4(inp, n, &opts, NULL);
289 else
290#endif
291 ip_savecontrol(inp, &opts, ip, n);
292 }
293#ifdef INET6
294 if (inp->inp_vflag & INP_IPV6) {
295 bzero(&udp_in6, sizeof(udp_in6));
296 udp_in6.sin6_len = sizeof(udp_in6);
297 udp_in6.sin6_family = AF_INET6;
298 in6_sin_2_v4mapsin6(udp_in, &udp_in6);
299 append_sa = (struct sockaddr *)&udp_in6;
300 } else
301#endif
302 append_sa = (struct sockaddr *)udp_in;
303 m_adj(n, off);
304
305 so = inp->inp_socket;
306 SOCKBUF_LOCK(&so->so_rcv);
307 if (sbappendaddr_locked(&so->so_rcv, append_sa, n, opts) == 0) {
308 SOCKBUF_UNLOCK(&so->so_rcv);
309 m_freem(n);
310 if (opts)
311 m_freem(opts);
312 UDPSTAT_INC(udps_fullsock);
313 } else
314 sorwakeup_locked(so);
315}
316
317void
318udp_input(struct mbuf *m, int off)
319{
320 int iphlen = off;
321 struct ip *ip;
322 struct udphdr *uh;
323 struct ifnet *ifp;
324 struct inpcb *inp;
325 struct udpcb *up;
326 int len;
327 struct ip save_ip;
328 struct sockaddr_in udp_in;
329#ifdef IPFIREWALL_FORWARD
330 struct m_tag *fwd_tag;
331#endif
332
333 ifp = m->m_pkthdr.rcvif;
334 UDPSTAT_INC(udps_ipackets);
335
336 /*
337 * Strip IP options, if any; should skip this, make available to
338 * user, and use on returned packets, but we don't yet have a way to
339 * check the checksum with options still present.
340 */
341 if (iphlen > sizeof (struct ip)) {
342 ip_stripoptions(m, (struct mbuf *)0);
343 iphlen = sizeof(struct ip);
344 }
345
346 /*
347 * Get IP and UDP header together in first mbuf.
348 */
349 ip = mtod(m, struct ip *);
350 if (m->m_len < iphlen + sizeof(struct udphdr)) {
351 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) {
352 UDPSTAT_INC(udps_hdrops);
353 return;
354 }
355 ip = mtod(m, struct ip *);
356 }
357 uh = (struct udphdr *)((caddr_t)ip + iphlen);
358
359 /*
360 * Destination port of 0 is illegal, based on RFC768.
361 */
362 if (uh->uh_dport == 0)
363 goto badunlocked;
364
365 /*
366 * Construct sockaddr format source address. Stuff source address
367 * and datagram in user buffer.
368 */
369 bzero(&udp_in, sizeof(udp_in));
370 udp_in.sin_len = sizeof(udp_in);
371 udp_in.sin_family = AF_INET;
372 udp_in.sin_port = uh->uh_sport;
373 udp_in.sin_addr = ip->ip_src;
374
375 /*
376 * Make mbuf data length reflect UDP length. If not enough data to
377 * reflect UDP length, drop.
378 */
379 len = ntohs((u_short)uh->uh_ulen);
380 if (ip->ip_len != len) {
381 if (len > ip->ip_len || len < sizeof(struct udphdr)) {
382 UDPSTAT_INC(udps_badlen);
383 goto badunlocked;
384 }
385 m_adj(m, len - ip->ip_len);
386 /* ip->ip_len = len; */
387 }
388
389 /*
390 * Save a copy of the IP header in case we want restore it for
391 * sending an ICMP error message in response.
392 */
393 if (!V_udp_blackhole)
394 save_ip = *ip;
395 else
396 memset(&save_ip, 0, sizeof(save_ip));
397
398 /*
399 * Checksum extended UDP header and data.
400 */
401 if (uh->uh_sum) {
402 u_short uh_sum;
403
404 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
405 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
406 uh_sum = m->m_pkthdr.csum_data;
407 else
408 uh_sum = in_pseudo(ip->ip_src.s_addr,
409 ip->ip_dst.s_addr, htonl((u_short)len +
410 m->m_pkthdr.csum_data + IPPROTO_UDP));
411 uh_sum ^= 0xffff;
412 } else {
413 char b[9];
414
415 bcopy(((struct ipovly *)ip)->ih_x1, b, 9);
416 bzero(((struct ipovly *)ip)->ih_x1, 9);
417 ((struct ipovly *)ip)->ih_len = uh->uh_ulen;
418 uh_sum = in_cksum(m, len + sizeof (struct ip));
419 bcopy(b, ((struct ipovly *)ip)->ih_x1, 9);
420 }
421 if (uh_sum) {
422 UDPSTAT_INC(udps_badsum);
423 m_freem(m);
424 return;
425 }
426 } else
427 UDPSTAT_INC(udps_nosum);
428
429#ifdef IPFIREWALL_FORWARD
430 /*
431 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
432 */
433 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
434 if (fwd_tag != NULL) {
435 struct sockaddr_in *next_hop;
436
437 /*
438 * Do the hack.
439 */
440 next_hop = (struct sockaddr_in *)(fwd_tag + 1);
441 ip->ip_dst = next_hop->sin_addr;
442 uh->uh_dport = ntohs(next_hop->sin_port);
443
444 /*
445 * Remove the tag from the packet. We don't need it anymore.
446 */
447 m_tag_delete(m, fwd_tag);
448 }
449#endif
450
451 INP_INFO_RLOCK(&V_udbinfo);
452 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
453 in_broadcast(ip->ip_dst, ifp)) {
454 struct inpcb *last;
455 struct ip_moptions *imo;
456
457 last = NULL;
458 LIST_FOREACH(inp, &V_udb, inp_list) {
459 if (inp->inp_lport != uh->uh_dport)
460 continue;
461#ifdef INET6
462 if ((inp->inp_vflag & INP_IPV4) == 0)
463 continue;
464#endif
465 if (inp->inp_laddr.s_addr != INADDR_ANY &&
466 inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
467 continue;
468 if (inp->inp_faddr.s_addr != INADDR_ANY &&
469 inp->inp_faddr.s_addr != ip->ip_src.s_addr)
470 continue;
471 if (inp->inp_fport != 0 &&
472 inp->inp_fport != uh->uh_sport)
473 continue;
474
475 INP_RLOCK(inp);
476
477 /*
478 * Handle socket delivery policy for any-source
479 * and source-specific multicast. [RFC3678]
480 */
481 imo = inp->inp_moptions;
482 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
483 imo != NULL) {
484 struct sockaddr_in group;
485 int blocked;
486
487 bzero(&group, sizeof(struct sockaddr_in));
488 group.sin_len = sizeof(struct sockaddr_in);
489 group.sin_family = AF_INET;
490 group.sin_addr = ip->ip_dst;
491
492 blocked = imo_multi_filter(imo, ifp,
493 (struct sockaddr *)&group,
494 (struct sockaddr *)&udp_in);
495 if (blocked != MCAST_PASS) {
496 if (blocked == MCAST_NOTGMEMBER)
497 IPSTAT_INC(ips_notmember);
498 if (blocked == MCAST_NOTSMEMBER ||
499 blocked == MCAST_MUTED)
500 UDPSTAT_INC(udps_filtermcast);
501 INP_RUNLOCK(inp);
502 continue;
503 }
504 }
505 if (last != NULL) {
506 struct mbuf *n;
507
508 n = m_copy(m, 0, M_COPYALL);
509 up = intoudpcb(last);
510 if (up->u_tun_func == NULL) {
511 if (n != NULL)
512 udp_append(last,
513 ip, n,
514 iphlen +
515 sizeof(struct udphdr),
516 &udp_in);
517 } else {
518 /*
519 * Engage the tunneling protocol we
520 * will have to leave the info_lock
521 * up, since we are hunting through
522 * multiple UDP's.
523 */
524
525 (*up->u_tun_func)(n, iphlen, last);
526 }
527 INP_RUNLOCK(last);
528 }
529 last = inp;
530 /*
531 * Don't look for additional matches if this one does
532 * not have either the SO_REUSEPORT or SO_REUSEADDR
533 * socket options set. This heuristic avoids
534 * searching through all pcbs in the common case of a
535 * non-shared port. It assumes that an application
536 * will never clear these options after setting them.
537 */
538 if ((last->inp_socket->so_options &
539 (SO_REUSEPORT|SO_REUSEADDR)) == 0)
540 break;
541 }
542
543 if (last == NULL) {
544 /*
545 * No matching pcb found; discard datagram. (No need
546 * to send an ICMP Port Unreachable for a broadcast
547 * or multicast datgram.)
548 */
549 UDPSTAT_INC(udps_noportbcast);
550 goto badheadlocked;
551 }
552 up = intoudpcb(last);
553 if (up->u_tun_func == NULL) {
554 udp_append(last, ip, m, iphlen + sizeof(struct udphdr),
555 &udp_in);
556 } else {
557 /*
558 * Engage the tunneling protocol.
559 */
560 (*up->u_tun_func)(m, iphlen, last);
561 }
562 INP_RUNLOCK(last);
563 INP_INFO_RUNLOCK(&V_udbinfo);
564 return;
565 }
566
567 /*
568 * Locate pcb for datagram.
569 */
570 inp = in_pcblookup_hash(&V_udbinfo, ip->ip_src, uh->uh_sport,
571 ip->ip_dst, uh->uh_dport, 1, ifp);
572 if (inp == NULL) {
573 if (udp_log_in_vain) {
574 char buf[4*sizeof "123"];
575
576 strcpy(buf, inet_ntoa(ip->ip_dst));
577 log(LOG_INFO,
578 "Connection attempt to UDP %s:%d from %s:%d\n",
579 buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src),
580 ntohs(uh->uh_sport));
581 }
582 UDPSTAT_INC(udps_noport);
583 if (m->m_flags & (M_BCAST | M_MCAST)) {
584 UDPSTAT_INC(udps_noportbcast);
585 goto badheadlocked;
586 }
587 if (V_udp_blackhole)
588 goto badheadlocked;
589 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0)
590 goto badheadlocked;
591 *ip = save_ip;
592 ip->ip_len += iphlen;
593 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
594 INP_INFO_RUNLOCK(&V_udbinfo);
595 return;
596 }
597
598 /*
599 * Check the minimum TTL for socket.
600 */
601 INP_RLOCK(inp);
602 INP_INFO_RUNLOCK(&V_udbinfo);
603 if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) {
604 INP_RUNLOCK(inp);
605 goto badunlocked;
606 }
607 up = intoudpcb(inp);
608 if (up->u_tun_func == NULL) {
609 udp_append(inp, ip, m, iphlen + sizeof(struct udphdr), &udp_in);
610 } else {
611 /*
612 * Engage the tunneling protocol.
613 */
614
615 (*up->u_tun_func)(m, iphlen, inp);
616 }
617 INP_RUNLOCK(inp);
618 return;
619
620badheadlocked:
621 if (inp)
622 INP_RUNLOCK(inp);
623 INP_INFO_RUNLOCK(&V_udbinfo);
624badunlocked:
625 m_freem(m);
626}
627
628/*
629 * Notify a udp user of an asynchronous error; just wake up so that they can
630 * collect error status.
631 */
632struct inpcb *
633udp_notify(struct inpcb *inp, int errno)
634{
635
636 /*
637 * While udp_ctlinput() always calls udp_notify() with a read lock
638 * when invoking it directly, in_pcbnotifyall() currently uses write
639 * locks due to sharing code with TCP. For now, accept either a read
640 * or a write lock, but a read lock is sufficient.
641 */
642 INP_LOCK_ASSERT(inp);
643
644 inp->inp_socket->so_error = errno;
645 sorwakeup(inp->inp_socket);
646 sowwakeup(inp->inp_socket);
647 return (inp);
648}
649
650void
651udp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
652{
653 struct ip *ip = vip;
654 struct udphdr *uh;
655 struct in_addr faddr;
656 struct inpcb *inp;
657
658 faddr = ((struct sockaddr_in *)sa)->sin_addr;
659 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
660 return;
661
662 /*
663 * Redirects don't need to be handled up here.
664 */
665 if (PRC_IS_REDIRECT(cmd))
666 return;
667
668 /*
669 * Hostdead is ugly because it goes linearly through all PCBs.
670 *
671 * XXX: We never get this from ICMP, otherwise it makes an excellent
672 * DoS attack on machines with many connections.
673 */
674 if (cmd == PRC_HOSTDEAD)
675 ip = NULL;
676 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
677 return;
678 if (ip != NULL) {
679 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
680 INP_INFO_RLOCK(&V_udbinfo);
681 inp = in_pcblookup_hash(&V_udbinfo, faddr, uh->uh_dport,
682 ip->ip_src, uh->uh_sport, 0, NULL);
683 if (inp != NULL) {
684 INP_RLOCK(inp);
685 if (inp->inp_socket != NULL) {
686 udp_notify(inp, inetctlerrmap[cmd]);
687 }
688 INP_RUNLOCK(inp);
689 }
690 INP_INFO_RUNLOCK(&V_udbinfo);
691 } else
692 in_pcbnotifyall(&V_udbinfo, faddr, inetctlerrmap[cmd],
693 udp_notify);
694}
695
696static int
697udp_pcblist(SYSCTL_HANDLER_ARGS)
698{
699 int error, i, n;
700 struct inpcb *inp, **inp_list;
701 inp_gen_t gencnt;
702 struct xinpgen xig;
703
704 /*
705 * The process of preparing the PCB list is too time-consuming and
706 * resource-intensive to repeat twice on every request.
707 */
708 if (req->oldptr == 0) {
709 n = V_udbinfo.ipi_count;
710 n += imax(n / 8, 10);
711 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
712 return (0);
713 }
714
715 if (req->newptr != 0)
716 return (EPERM);
717
718 /*
719 * OK, now we're committed to doing something.
720 */
721 INP_INFO_RLOCK(&V_udbinfo);
722 gencnt = V_udbinfo.ipi_gencnt;
723 n = V_udbinfo.ipi_count;
724 INP_INFO_RUNLOCK(&V_udbinfo);
725
726 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
727 + n * sizeof(struct xinpcb));
728 if (error != 0)
729 return (error);
730
731 xig.xig_len = sizeof xig;
732 xig.xig_count = n;
733 xig.xig_gen = gencnt;
734 xig.xig_sogen = so_gencnt;
735 error = SYSCTL_OUT(req, &xig, sizeof xig);
736 if (error)
737 return (error);
738
739 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
740 if (inp_list == 0)
741 return (ENOMEM);
742
743 INP_INFO_RLOCK(&V_udbinfo);
744 for (inp = LIST_FIRST(V_udbinfo.ipi_listhead), i = 0; inp && i < n;
745 inp = LIST_NEXT(inp, inp_list)) {
746 INP_WLOCK(inp);
747 if (inp->inp_gencnt <= gencnt &&
748 cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
749 in_pcbref(inp);
750 inp_list[i++] = inp;
751 }
752 INP_WUNLOCK(inp);
753 }
754 INP_INFO_RUNLOCK(&V_udbinfo);
755 n = i;
756
757 error = 0;
758 for (i = 0; i < n; i++) {
759 inp = inp_list[i];
760 INP_RLOCK(inp);
761 if (inp->inp_gencnt <= gencnt) {
762 struct xinpcb xi;
763
764 bzero(&xi, sizeof(xi));
765 xi.xi_len = sizeof xi;
766 /* XXX should avoid extra copy */
767 bcopy(inp, &xi.xi_inp, sizeof *inp);
768 if (inp->inp_socket)
769 sotoxsocket(inp->inp_socket, &xi.xi_socket);
770 xi.xi_inp.inp_gencnt = inp->inp_gencnt;
771 INP_RUNLOCK(inp);
772 error = SYSCTL_OUT(req, &xi, sizeof xi);
773 } else
774 INP_RUNLOCK(inp);
775 }
776 INP_INFO_WLOCK(&V_udbinfo);
777 for (i = 0; i < n; i++) {
778 inp = inp_list[i];
779 INP_WLOCK(inp);
780 if (!in_pcbrele(inp))
781 INP_WUNLOCK(inp);
782 }
783 INP_INFO_WUNLOCK(&V_udbinfo);
784
785 if (!error) {
786 /*
787 * Give the user an updated idea of our state. If the
788 * generation differs from what we told her before, she knows
789 * that something happened while we were processing this
790 * request, and it might be necessary to retry.
791 */
792 INP_INFO_RLOCK(&V_udbinfo);
793 xig.xig_gen = V_udbinfo.ipi_gencnt;
794 xig.xig_sogen = so_gencnt;
795 xig.xig_count = V_udbinfo.ipi_count;
796 INP_INFO_RUNLOCK(&V_udbinfo);
797 error = SYSCTL_OUT(req, &xig, sizeof xig);
798 }
799 free(inp_list, M_TEMP);
800 return (error);
801}
802
803SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
804 udp_pcblist, "S,xinpcb", "List of active UDP sockets");
805
806static int
807udp_getcred(SYSCTL_HANDLER_ARGS)
808{
809 struct xucred xuc;
810 struct sockaddr_in addrs[2];
811 struct inpcb *inp;
812 int error;
813
814 error = priv_check(req->td, PRIV_NETINET_GETCRED);
815 if (error)
816 return (error);
817 error = SYSCTL_IN(req, addrs, sizeof(addrs));
818 if (error)
819 return (error);
820 INP_INFO_RLOCK(&V_udbinfo);
821 inp = in_pcblookup_hash(&V_udbinfo, addrs[1].sin_addr, addrs[1].sin_port,
822 addrs[0].sin_addr, addrs[0].sin_port, 1, NULL);
823 if (inp != NULL) {
824 INP_RLOCK(inp);
825 INP_INFO_RUNLOCK(&V_udbinfo);
826 if (inp->inp_socket == NULL)
827 error = ENOENT;
828 if (error == 0)
829 error = cr_canseeinpcb(req->td->td_ucred, inp);
830 if (error == 0)
831 cru2x(inp->inp_cred, &xuc);
832 INP_RUNLOCK(inp);
833 } else {
834 INP_INFO_RUNLOCK(&V_udbinfo);
835 error = ENOENT;
836 }
837 if (error == 0)
838 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
839 return (error);
840}
841
842SYSCTL_PROC(_net_inet_udp, OID_AUTO, getcred,
843 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
844 udp_getcred, "S,xucred", "Get the xucred of a UDP connection");
845
846int
847udp_ctloutput(struct socket *so, struct sockopt *sopt)
848{
849 int error = 0, optval;
850 struct inpcb *inp;
851#ifdef IPSEC_NAT_T
852 struct udpcb *up;
853#endif
854
855 inp = sotoinpcb(so);
856 KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
857 INP_WLOCK(inp);
858 if (sopt->sopt_level != IPPROTO_UDP) {
859#ifdef INET6
860 if (INP_CHECK_SOCKAF(so, AF_INET6)) {
861 INP_WUNLOCK(inp);
862 error = ip6_ctloutput(so, sopt);
863 } else {
864#endif
865 INP_WUNLOCK(inp);
866 error = ip_ctloutput(so, sopt);
867#ifdef INET6
868 }
869#endif
870 return (error);
871 }
872
873 switch (sopt->sopt_dir) {
874 case SOPT_SET:
875 switch (sopt->sopt_name) {
876 case UDP_ENCAP:
877 INP_WUNLOCK(inp);
878 error = sooptcopyin(sopt, &optval, sizeof optval,
879 sizeof optval);
880 if (error)
881 break;
882 inp = sotoinpcb(so);
883 KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
884 INP_WLOCK(inp);
885#ifdef IPSEC_NAT_T
886 up = intoudpcb(inp);
887 KASSERT(up != NULL, ("%s: up == NULL", __func__));
888#endif
889 switch (optval) {
890 case 0:
891 /* Clear all UDP encap. */
892#ifdef IPSEC_NAT_T
893 up->u_flags &= ~UF_ESPINUDP_ALL;
894#endif
895 break;
896#ifdef IPSEC_NAT_T
897 case UDP_ENCAP_ESPINUDP:
898 case UDP_ENCAP_ESPINUDP_NON_IKE:
899 up->u_flags &= ~UF_ESPINUDP_ALL;
900 if (optval == UDP_ENCAP_ESPINUDP)
901 up->u_flags |= UF_ESPINUDP;
902 else if (optval == UDP_ENCAP_ESPINUDP_NON_IKE)
903 up->u_flags |= UF_ESPINUDP_NON_IKE;
904 break;
905#endif
906 default:
907 error = EINVAL;
908 break;
909 }
910 INP_WUNLOCK(inp);
911 break;
912 default:
913 INP_WUNLOCK(inp);
914 error = ENOPROTOOPT;
915 break;
916 }
917 break;
918 case SOPT_GET:
919 switch (sopt->sopt_name) {
920#ifdef IPSEC_NAT_T
921 case UDP_ENCAP:
922 up = intoudpcb(inp);
923 KASSERT(up != NULL, ("%s: up == NULL", __func__));
924 optval = up->u_flags & UF_ESPINUDP_ALL;
925 INP_WUNLOCK(inp);
926 error = sooptcopyout(sopt, &optval, sizeof optval);
927 break;
928#endif
929 default:
930 INP_WUNLOCK(inp);
931 error = ENOPROTOOPT;
932 break;
933 }
934 break;
935 }
936 return (error);
937}
938
939static int
940udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
941 struct mbuf *control, struct thread *td)
942{
943 struct udpiphdr *ui;
944 int len = m->m_pkthdr.len;
945 struct in_addr faddr, laddr;
946 struct cmsghdr *cm;
947 struct sockaddr_in *sin, src;
948 int error = 0;
949 int ipflags;
950 u_short fport, lport;
951 int unlock_udbinfo;
952
953 /*
954 * udp_output() may need to temporarily bind or connect the current
955 * inpcb. As such, we don't know up front whether we will need the
956 * pcbinfo lock or not. Do any work to decide what is needed up
957 * front before acquiring any locks.
958 */
959 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
960 if (control)
961 m_freem(control);
962 m_freem(m);
963 return (EMSGSIZE);
964 }
965
966 src.sin_family = 0;
967 if (control != NULL) {
968 /*
969 * XXX: Currently, we assume all the optional information is
970 * stored in a single mbuf.
971 */
972 if (control->m_next) {
973 m_freem(control);
974 m_freem(m);
975 return (EINVAL);
976 }
977 for (; control->m_len > 0;
978 control->m_data += CMSG_ALIGN(cm->cmsg_len),
979 control->m_len -= CMSG_ALIGN(cm->cmsg_len)) {
980 cm = mtod(control, struct cmsghdr *);
981 if (control->m_len < sizeof(*cm) || cm->cmsg_len == 0
982 || cm->cmsg_len > control->m_len) {
983 error = EINVAL;
984 break;
985 }
986 if (cm->cmsg_level != IPPROTO_IP)
987 continue;
988
989 switch (cm->cmsg_type) {
990 case IP_SENDSRCADDR:
991 if (cm->cmsg_len !=
992 CMSG_LEN(sizeof(struct in_addr))) {
993 error = EINVAL;
994 break;
995 }
996 bzero(&src, sizeof(src));
997 src.sin_family = AF_INET;
998 src.sin_len = sizeof(src);
999 src.sin_port = inp->inp_lport;
1000 src.sin_addr =
1001 *(struct in_addr *)CMSG_DATA(cm);
1002 break;
1003
1004 default:
1005 error = ENOPROTOOPT;
1006 break;
1007 }
1008 if (error)
1009 break;
1010 }
1011 m_freem(control);
1012 }
1013 if (error) {
1014 m_freem(m);
1015 return (error);
1016 }
1017
1018 /*
1019 * Depending on whether or not the application has bound or connected
1020 * the socket, we may have to do varying levels of work. The optimal
1021 * case is for a connected UDP socket, as a global lock isn't
1022 * required at all.
1023 *
1024 * In order to decide which we need, we require stability of the
1025 * inpcb binding, which we ensure by acquiring a read lock on the
1026 * inpcb. This doesn't strictly follow the lock order, so we play
1027 * the trylock and retry game; note that we may end up with more
1028 * conservative locks than required the second time around, so later
1029 * assertions have to accept that. Further analysis of the number of
1030 * misses under contention is required.
1031 */
1032 sin = (struct sockaddr_in *)addr;
1033 INP_RLOCK(inp);
1034 if (sin != NULL &&
1035 (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) {
1036 INP_RUNLOCK(inp);
1037 INP_INFO_WLOCK(&V_udbinfo);
1038 INP_WLOCK(inp);
1039 unlock_udbinfo = 2;
1040 } else if ((sin != NULL && (
1041 (sin->sin_addr.s_addr == INADDR_ANY) ||
1042 (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
1043 (inp->inp_laddr.s_addr == INADDR_ANY) ||
1044 (inp->inp_lport == 0))) ||
1045 (src.sin_family == AF_INET)) {
1046 if (!INP_INFO_TRY_RLOCK(&V_udbinfo)) {
1047 INP_RUNLOCK(inp);
1048 INP_INFO_RLOCK(&V_udbinfo);
1049 INP_RLOCK(inp);
1050 }
1051 unlock_udbinfo = 1;
1052 } else
1053 unlock_udbinfo = 0;
1054
1055 /*
1056 * If the IP_SENDSRCADDR control message was specified, override the
1057 * source address for this datagram. Its use is invalidated if the
1058 * address thus specified is incomplete or clobbers other inpcbs.
1059 */
1060 laddr = inp->inp_laddr;
1061 lport = inp->inp_lport;
1062 if (src.sin_family == AF_INET) {
1063 INP_INFO_LOCK_ASSERT(&V_udbinfo);
1064 if ((lport == 0) ||
1065 (laddr.s_addr == INADDR_ANY &&
1066 src.sin_addr.s_addr == INADDR_ANY)) {
1067 error = EINVAL;
1068 goto release;
1069 }
1070 error = in_pcbbind_setup(inp, (struct sockaddr *)&src,
1071 &laddr.s_addr, &lport, td->td_ucred);
1072 if (error)
1073 goto release;
1074 }
1075
1076 /*
1077 * If a UDP socket has been connected, then a local address/port will
1078 * have been selected and bound.
1079 *
1080 * If a UDP socket has not been connected to, then an explicit
1081 * destination address must be used, in which case a local
1082 * address/port may not have been selected and bound.
1083 */
1084 if (sin != NULL) {
1085 INP_LOCK_ASSERT(inp);
1086 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1087 error = EISCONN;
1088 goto release;
1089 }
1090
1091 /*
1092 * Jail may rewrite the destination address, so let it do
1093 * that before we use it.
1094 */
1095 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1096 if (error)
1097 goto release;
1098
1099 /*
1100 * If a local address or port hasn't yet been selected, or if
1101 * the destination address needs to be rewritten due to using
1102 * a special INADDR_ constant, invoke in_pcbconnect_setup()
1103 * to do the heavy lifting. Once a port is selected, we
1104 * commit the binding back to the socket; we also commit the
1105 * binding of the address if in jail.
1106 *
1107 * If we already have a valid binding and we're not
1108 * requesting a destination address rewrite, use a fast path.
1109 */
1110 if (inp->inp_laddr.s_addr == INADDR_ANY ||
1111 inp->inp_lport == 0 ||
1112 sin->sin_addr.s_addr == INADDR_ANY ||
1113 sin->sin_addr.s_addr == INADDR_BROADCAST) {
1114 INP_INFO_LOCK_ASSERT(&V_udbinfo);
1115 error = in_pcbconnect_setup(inp, addr, &laddr.s_addr,
1116 &lport, &faddr.s_addr, &fport, NULL,
1117 td->td_ucred);
1118 if (error)
1119 goto release;
1120
1121 /*
1122 * XXXRW: Why not commit the port if the address is
1123 * !INADDR_ANY?
1124 */
1125 /* Commit the local port if newly assigned. */
1126 if (inp->inp_laddr.s_addr == INADDR_ANY &&
1127 inp->inp_lport == 0) {
1128 INP_INFO_WLOCK_ASSERT(&V_udbinfo);
1129 INP_WLOCK_ASSERT(inp);
1130 /*
1131 * Remember addr if jailed, to prevent
1132 * rebinding.
1133 */
1134 if (prison_flag(td->td_ucred, PR_IP4))
1135 inp->inp_laddr = laddr;
1136 inp->inp_lport = lport;
1137 if (in_pcbinshash(inp) != 0) {
1138 inp->inp_lport = 0;
1139 error = EAGAIN;
1140 goto release;
1141 }
1142 inp->inp_flags |= INP_ANONPORT;
1143 }
1144 } else {
1145 faddr = sin->sin_addr;
1146 fport = sin->sin_port;
1147 }
1148 } else {
1149 INP_LOCK_ASSERT(inp);
1150 faddr = inp->inp_faddr;
1151 fport = inp->inp_fport;
1152 if (faddr.s_addr == INADDR_ANY) {
1153 error = ENOTCONN;
1154 goto release;
1155 }
1156 }
1157
1158 /*
1159 * Calculate data length and get a mbuf for UDP, IP, and possible
1160 * link-layer headers. Immediate slide the data pointer back forward
1161 * since we won't use that space at this layer.
1162 */
1163 M_PREPEND(m, sizeof(struct udpiphdr) + max_linkhdr, M_DONTWAIT);
1164 if (m == NULL) {
1165 error = ENOBUFS;
1166 goto release;
1167 }
1168 m->m_data += max_linkhdr;
1169 m->m_len -= max_linkhdr;
1170 m->m_pkthdr.len -= max_linkhdr;
1171
1172 /*
1173 * Fill in mbuf with extended UDP header and addresses and length put
1174 * into network format.
1175 */
1176 ui = mtod(m, struct udpiphdr *);
1177 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */
1178 ui->ui_pr = IPPROTO_UDP;
1179 ui->ui_src = laddr;
1180 ui->ui_dst = faddr;
1181 ui->ui_sport = lport;
1182 ui->ui_dport = fport;
1183 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1184
1185 /*
1186 * Set the Don't Fragment bit in the IP header.
1187 */
1188 if (inp->inp_flags & INP_DONTFRAG) {
1189 struct ip *ip;
1190
1191 ip = (struct ip *)&ui->ui_i;
1192 ip->ip_off |= IP_DF;
1193 }
1194
1195 ipflags = 0;
1196 if (inp->inp_socket->so_options & SO_DONTROUTE)
1197 ipflags |= IP_ROUTETOIF;
1198 if (inp->inp_socket->so_options & SO_BROADCAST)
1199 ipflags |= IP_ALLOWBROADCAST;
1200 if (inp->inp_flags & INP_ONESBCAST)
1201 ipflags |= IP_SENDONES;
1202
1203#ifdef MAC
1204 mac_inpcb_create_mbuf(inp, m);
1205#endif
1206
1207 /*
1208 * Set up checksum and output datagram.
1209 */
1210 if (udp_cksum) {
1211 if (inp->inp_flags & INP_ONESBCAST)
1212 faddr.s_addr = INADDR_BROADCAST;
1213 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, faddr.s_addr,
1214 htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
1215 m->m_pkthdr.csum_flags = CSUM_UDP;
1216 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1217 } else
1218 ui->ui_sum = 0;
1219 ((struct ip *)ui)->ip_len = sizeof (struct udpiphdr) + len;
1220 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */
1221 ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */
1222 UDPSTAT_INC(udps_opackets);
1223
1224 if (unlock_udbinfo == 2)
1225 INP_INFO_WUNLOCK(&V_udbinfo);
1226 else if (unlock_udbinfo == 1)
1227 INP_INFO_RUNLOCK(&V_udbinfo);
1228 error = ip_output(m, inp->inp_options, NULL, ipflags,
1229 inp->inp_moptions, inp);
1230 if (unlock_udbinfo == 2)
1231 INP_WUNLOCK(inp);
1232 else
1233 INP_RUNLOCK(inp);
1234 return (error);
1235
1236release:
1237 if (unlock_udbinfo == 2) {
1238 INP_WUNLOCK(inp);
1239 INP_INFO_WUNLOCK(&V_udbinfo);
1240 } else if (unlock_udbinfo == 1) {
1241 INP_RUNLOCK(inp);
1242 INP_INFO_RUNLOCK(&V_udbinfo);
1243 } else
1244 INP_RUNLOCK(inp);
1245 m_freem(m);
1246 return (error);
1247}
1248
1249
1250#if defined(IPSEC) && defined(IPSEC_NAT_T)
1251#ifdef INET
1252/*
1253 * Potentially decap ESP in UDP frame. Check for an ESP header
1254 * and optional marker; if present, strip the UDP header and
1255 * push the result through IPSec.
1256 *
1257 * Returns mbuf to be processed (potentially re-allocated) or
1258 * NULL if consumed and/or processed.
1259 */
1260static struct mbuf *
1261udp4_espdecap(struct inpcb *inp, struct mbuf *m, int off)
1262{
1263 size_t minlen, payload, skip, iphlen;
1264 caddr_t data;
1265 struct udpcb *up;
1266 struct m_tag *tag;
1267 struct udphdr *udphdr;
1268 struct ip *ip;
1269
1270 INP_RLOCK_ASSERT(inp);
1271
1272 /*
1273 * Pull up data so the longest case is contiguous:
1274 * IP/UDP hdr + non ESP marker + ESP hdr.
1275 */
1276 minlen = off + sizeof(uint64_t) + sizeof(struct esp);
1277 if (minlen > m->m_pkthdr.len)
1278 minlen = m->m_pkthdr.len;
1279 if ((m = m_pullup(m, minlen)) == NULL) {
1280 V_ipsec4stat.in_inval++;
1281 return (NULL); /* Bypass caller processing. */
1282 }
1283 data = mtod(m, caddr_t); /* Points to ip header. */
1284 payload = m->m_len - off; /* Size of payload. */
1285
1286 if (payload == 1 && data[off] == '\xff')
1287 return (m); /* NB: keepalive packet, no decap. */
1288
1289 up = intoudpcb(inp);
1290 KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
1291 KASSERT((up->u_flags & UF_ESPINUDP_ALL) != 0,
1292 ("u_flags 0x%x", up->u_flags));
1293
1294 /*
1295 * Check that the payload is large enough to hold an
1296 * ESP header and compute the amount of data to remove.
1297 *
1298 * NB: the caller has already done a pullup for us.
1299 * XXX can we assume alignment and eliminate bcopys?
1300 */
1301 if (up->u_flags & UF_ESPINUDP_NON_IKE) {
1302 /*
1303 * draft-ietf-ipsec-nat-t-ike-0[01].txt and
1304 * draft-ietf-ipsec-udp-encaps-(00/)01.txt, ignoring
1305 * possible AH mode non-IKE marker+non-ESP marker
1306 * from draft-ietf-ipsec-udp-encaps-00.txt.
1307 */
1308 uint64_t marker;
1309
1310 if (payload <= sizeof(uint64_t) + sizeof(struct esp))
1311 return (m); /* NB: no decap. */
1312 bcopy(data + off, &marker, sizeof(uint64_t));
1313 if (marker != 0) /* Non-IKE marker. */
1314 return (m); /* NB: no decap. */
1315 skip = sizeof(uint64_t) + sizeof(struct udphdr);
1316 } else {
1317 uint32_t spi;
1318
1319 if (payload <= sizeof(struct esp)) {
1320 V_ipsec4stat.in_inval++;
1321 m_freem(m);
1322 return (NULL); /* Discard. */
1323 }
1324 bcopy(data + off, &spi, sizeof(uint32_t));
1325 if (spi == 0) /* Non-ESP marker. */
1326 return (m); /* NB: no decap. */
1327 skip = sizeof(struct udphdr);
1328 }
1329
1330 /*
1331 * Setup a PACKET_TAG_IPSEC_NAT_T_PORT tag to remember
1332 * the UDP ports. This is required if we want to select
1333 * the right SPD for multiple hosts behind same NAT.
1334 *
1335 * NB: ports are maintained in network byte order everywhere
1336 * in the NAT-T code.
1337 */
1338 tag = m_tag_get(PACKET_TAG_IPSEC_NAT_T_PORTS,
1339 2 * sizeof(uint16_t), M_NOWAIT);
1340 if (tag == NULL) {
1341 V_ipsec4stat.in_nomem++;
1342 m_freem(m);
1343 return (NULL); /* Discard. */
1344 }
1345 iphlen = off - sizeof(struct udphdr);
1346 udphdr = (struct udphdr *)(data + iphlen);
1347 ((uint16_t *)(tag + 1))[0] = udphdr->uh_sport;
1348 ((uint16_t *)(tag + 1))[1] = udphdr->uh_dport;
1349 m_tag_prepend(m, tag);
1350
1351 /*
1352 * Remove the UDP header (and possibly the non ESP marker)
1353 * IP header length is iphlen
1354 * Before:
1355 * <--- off --->
1356 * +----+------+-----+
1357 * | IP | UDP | ESP |
1358 * +----+------+-----+
1359 * <-skip->
1360 * After:
1361 * +----+-----+
1362 * | IP | ESP |
1363 * +----+-----+
1364 * <-skip->
1365 */
1366 ovbcopy(data, data + skip, iphlen);
1367 m_adj(m, skip);
1368
1369 ip = mtod(m, struct ip *);
1370 ip->ip_len -= skip;
1371 ip->ip_p = IPPROTO_ESP;
1372
1373 /*
1374 * We cannot yet update the cksums so clear any
1375 * h/w cksum flags as they are no longer valid.
1376 */
1377 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)
1378 m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
1379
1380 (void) ipsec4_common_input(m, iphlen, ip->ip_p);
1381 return (NULL); /* NB: consumed, bypass processing. */
1382}
1383#endif /* INET */
1384#endif /* defined(IPSEC) && defined(IPSEC_NAT_T) */
1385
1386static void
1387udp_abort(struct socket *so)
1388{
1389 struct inpcb *inp;
1390
1391 inp = sotoinpcb(so);
1392 KASSERT(inp != NULL, ("udp_abort: inp == NULL"));
1393 INP_INFO_WLOCK(&V_udbinfo);
1394 INP_WLOCK(inp);
1395 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1396 in_pcbdisconnect(inp);
1397 inp->inp_laddr.s_addr = INADDR_ANY;
1398 soisdisconnected(so);
1399 }
1400 INP_WUNLOCK(inp);
1401 INP_INFO_WUNLOCK(&V_udbinfo);
1402}
1403
1404static int
1405udp_attach(struct socket *so, int proto, struct thread *td)
1406{
1407 struct inpcb *inp;
1408 int error;
1409
1410 inp = sotoinpcb(so);
1411 KASSERT(inp == NULL, ("udp_attach: inp != NULL"));
1412 error = soreserve(so, udp_sendspace, udp_recvspace);
1413 if (error)
1414 return (error);
1415 INP_INFO_WLOCK(&V_udbinfo);
1416 error = in_pcballoc(so, &V_udbinfo);
1417 if (error) {
1418 INP_INFO_WUNLOCK(&V_udbinfo);
1419 return (error);
1420 }
1421
1422 inp = sotoinpcb(so);
1423 inp->inp_vflag |= INP_IPV4;
1424 inp->inp_ip_ttl = V_ip_defttl;
1425
1426 error = udp_newudpcb(inp);
1427 if (error) {
1428 in_pcbdetach(inp);
1429 in_pcbfree(inp);
1430 INP_INFO_WUNLOCK(&V_udbinfo);
1431 return (error);
1432 }
1433
1434 INP_WUNLOCK(inp);
1435 INP_INFO_WUNLOCK(&V_udbinfo);
1436 return (0);
1437}
1438
1439int
1440udp_set_kernel_tunneling(struct socket *so, udp_tun_func_t f)
1441{
1442 struct inpcb *inp;
1443 struct udpcb *up;
1444
1445 KASSERT(so->so_type == SOCK_DGRAM,
1446 ("udp_set_kernel_tunneling: !dgram"));
1447 inp = sotoinpcb(so);
1448 KASSERT(inp != NULL, ("udp_set_kernel_tunneling: inp == NULL"));
1449 INP_WLOCK(inp);
1450 up = intoudpcb(inp);
1451 if (up->u_tun_func != NULL) {
1452 INP_WUNLOCK(inp);
1453 return (EBUSY);
1454 }
1455 up->u_tun_func = f;
1456 INP_WUNLOCK(inp);
1457 return (0);
1458}
1459
1460static int
1461udp_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
1462{
1463 struct inpcb *inp;
1464 int error;
1465
1466 inp = sotoinpcb(so);
1467 KASSERT(inp != NULL, ("udp_bind: inp == NULL"));
1468 INP_INFO_WLOCK(&V_udbinfo);
1469 INP_WLOCK(inp);
1470 error = in_pcbbind(inp, nam, td->td_ucred);
1471 INP_WUNLOCK(inp);
1472 INP_INFO_WUNLOCK(&V_udbinfo);
1473 return (error);
1474}
1475
1476static void
1477udp_close(struct socket *so)
1478{
1479 struct inpcb *inp;
1480
1481 inp = sotoinpcb(so);
1482 KASSERT(inp != NULL, ("udp_close: inp == NULL"));
1483 INP_INFO_WLOCK(&V_udbinfo);
1484 INP_WLOCK(inp);
1485 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1486 in_pcbdisconnect(inp);
1487 inp->inp_laddr.s_addr = INADDR_ANY;
1488 soisdisconnected(so);
1489 }
1490 INP_WUNLOCK(inp);
1491 INP_INFO_WUNLOCK(&V_udbinfo);
1492}
1493
1494static int
1495udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1496{
1497 struct inpcb *inp;
1498 int error;
1499 struct sockaddr_in *sin;
1500
1501 inp = sotoinpcb(so);
1502 KASSERT(inp != NULL, ("udp_connect: inp == NULL"));
1503 INP_INFO_WLOCK(&V_udbinfo);
1504 INP_WLOCK(inp);
1505 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1506 INP_WUNLOCK(inp);
1507 INP_INFO_WUNLOCK(&V_udbinfo);
1508 return (EISCONN);
1509 }
1510 sin = (struct sockaddr_in *)nam;
1511 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1512 if (error != 0) {
1513 INP_WUNLOCK(inp);
1514 INP_INFO_WUNLOCK(&V_udbinfo);
1515 return (error);
1516 }
1517 error = in_pcbconnect(inp, nam, td->td_ucred);
1518 if (error == 0)
1519 soisconnected(so);
1520 INP_WUNLOCK(inp);
1521 INP_INFO_WUNLOCK(&V_udbinfo);
1522 return (error);
1523}
1524
1525static void
1526udp_detach(struct socket *so)
1527{
1528 struct inpcb *inp;
1529 struct udpcb *up;
1530
1531 inp = sotoinpcb(so);
1532 KASSERT(inp != NULL, ("udp_detach: inp == NULL"));
1533 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
1534 ("udp_detach: not disconnected"));
1535 INP_INFO_WLOCK(&V_udbinfo);
1536 INP_WLOCK(inp);
1537 up = intoudpcb(inp);
1538 KASSERT(up != NULL, ("%s: up == NULL", __func__));
1539 inp->inp_ppcb = NULL;
1540 in_pcbdetach(inp);
1541 in_pcbfree(inp);
1542 INP_INFO_WUNLOCK(&V_udbinfo);
1543 udp_discardcb(up);
1544}
1545
1546static int
1547udp_disconnect(struct socket *so)
1548{
1549 struct inpcb *inp;
1550
1551 inp = sotoinpcb(so);
1552 KASSERT(inp != NULL, ("udp_disconnect: inp == NULL"));
1553 INP_INFO_WLOCK(&V_udbinfo);
1554 INP_WLOCK(inp);
1555 if (inp->inp_faddr.s_addr == INADDR_ANY) {
1556 INP_WUNLOCK(inp);
1557 INP_INFO_WUNLOCK(&V_udbinfo);
1558 return (ENOTCONN);
1559 }
1560
1561 in_pcbdisconnect(inp);
1562 inp->inp_laddr.s_addr = INADDR_ANY;
1563 SOCK_LOCK(so);
1564 so->so_state &= ~SS_ISCONNECTED; /* XXX */
1565 SOCK_UNLOCK(so);
1566 INP_WUNLOCK(inp);
1567 INP_INFO_WUNLOCK(&V_udbinfo);
1568 return (0);
1569}
1570
1571static int
1572udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
1573 struct mbuf *control, struct thread *td)
1574{
1575 struct inpcb *inp;
1576
1577 inp = sotoinpcb(so);
1578 KASSERT(inp != NULL, ("udp_send: inp == NULL"));
1579 return (udp_output(inp, m, addr, control, td));
1580}
1581
1582int
1583udp_shutdown(struct socket *so)
1584{
1585 struct inpcb *inp;
1586
1587 inp = sotoinpcb(so);
1588 KASSERT(inp != NULL, ("udp_shutdown: inp == NULL"));
1589 INP_WLOCK(inp);
1590 socantsendmore(so);
1591 INP_WUNLOCK(inp);
1592 return (0);
1593}
1594
1595struct pr_usrreqs udp_usrreqs = {
1596 .pru_abort = udp_abort,
1597 .pru_attach = udp_attach,
1598 .pru_bind = udp_bind,
1599 .pru_connect = udp_connect,
1600 .pru_control = in_control,
1601 .pru_detach = udp_detach,
1602 .pru_disconnect = udp_disconnect,
1603 .pru_peeraddr = in_getpeeraddr,
1604 .pru_send = udp_send,
1605 .pru_soreceive = soreceive_dgram,
1606 .pru_sosend = sosend_dgram,
1607 .pru_shutdown = udp_shutdown,
1608 .pru_sockaddr = in_getsockaddr,
1609 .pru_sosetlabel = in_pcbsosetlabel,
1610 .pru_close = udp_close,
1611};
135#define V_udpcb_zone VNET(udpcb_zone)
136
137#ifndef UDBHASHSIZE
138#define UDBHASHSIZE 128
139#endif
140
141VNET_DEFINE(struct udpstat, udpstat); /* from udp_var.h */
142SYSCTL_VNET_STRUCT(_net_inet_udp, UDPCTL_STATS, stats, CTLFLAG_RW,
143 &VNET_NAME(udpstat), udpstat,
144 "UDP statistics (struct udpstat, netinet/udp_var.h)");
145
146static void udp_detach(struct socket *so);
147static int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
148 struct mbuf *, struct thread *);
149#ifdef IPSEC
150#ifdef IPSEC_NAT_T
151#define UF_ESPINUDP_ALL (UF_ESPINUDP_NON_IKE|UF_ESPINUDP)
152#ifdef INET
153static struct mbuf *udp4_espdecap(struct inpcb *, struct mbuf *, int);
154#endif
155#endif /* IPSEC_NAT_T */
156#endif /* IPSEC */
157
158static void
159udp_zone_change(void *tag)
160{
161
162 uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets);
163 uma_zone_set_max(V_udpcb_zone, maxsockets);
164}
165
166static int
167udp_inpcb_init(void *mem, int size, int flags)
168{
169 struct inpcb *inp;
170
171 inp = mem;
172 INP_LOCK_INIT(inp, "inp", "udpinp");
173 return (0);
174}
175
176void
177udp_init(void)
178{
179
180 in_pcbinfo_init(&V_udbinfo, "udp", &V_udb, UDBHASHSIZE, UDBHASHSIZE,
181 "udp_inpcb", udp_inpcb_init, NULL, UMA_ZONE_NOFREE);
182 V_udpcb_zone = uma_zcreate("udpcb", sizeof(struct udpcb),
183 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
184 uma_zone_set_max(V_udpcb_zone, maxsockets);
185 EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL,
186 EVENTHANDLER_PRI_ANY);
187}
188
189/*
190 * Kernel module interface for updating udpstat. The argument is an index
191 * into udpstat treated as an array of u_long. While this encodes the
192 * general layout of udpstat into the caller, it doesn't encode its location,
193 * so that future changes to add, for example, per-CPU stats support won't
194 * cause binary compatibility problems for kernel modules.
195 */
196void
197kmod_udpstat_inc(int statnum)
198{
199
200 (*((u_long *)&V_udpstat + statnum))++;
201}
202
203int
204udp_newudpcb(struct inpcb *inp)
205{
206 struct udpcb *up;
207
208 up = uma_zalloc(V_udpcb_zone, M_NOWAIT | M_ZERO);
209 if (up == NULL)
210 return (ENOBUFS);
211 inp->inp_ppcb = up;
212 return (0);
213}
214
215void
216udp_discardcb(struct udpcb *up)
217{
218
219 uma_zfree(V_udpcb_zone, up);
220}
221
222#ifdef VIMAGE
223void
224udp_destroy(void)
225{
226
227 in_pcbinfo_destroy(&V_udbinfo);
228 uma_zdestroy(V_udpcb_zone);
229}
230#endif
231
232/*
233 * Subroutine of udp_input(), which appends the provided mbuf chain to the
234 * passed pcb/socket. The caller must provide a sockaddr_in via udp_in that
235 * contains the source address. If the socket ends up being an IPv6 socket,
236 * udp_append() will convert to a sockaddr_in6 before passing the address
237 * into the socket code.
238 */
239static void
240udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
241 struct sockaddr_in *udp_in)
242{
243 struct sockaddr *append_sa;
244 struct socket *so;
245 struct mbuf *opts = 0;
246#ifdef INET6
247 struct sockaddr_in6 udp_in6;
248#endif
249#ifdef IPSEC
250#ifdef IPSEC_NAT_T
251#ifdef INET
252 struct udpcb *up;
253#endif
254#endif
255#endif
256
257 INP_RLOCK_ASSERT(inp);
258
259#ifdef IPSEC
260 /* Check AH/ESP integrity. */
261 if (ipsec4_in_reject(n, inp)) {
262 m_freem(n);
263 V_ipsec4stat.in_polvio++;
264 return;
265 }
266#ifdef IPSEC_NAT_T
267#ifdef INET
268 up = intoudpcb(inp);
269 KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
270 if (up->u_flags & UF_ESPINUDP_ALL) { /* IPSec UDP encaps. */
271 n = udp4_espdecap(inp, n, off);
272 if (n == NULL) /* Consumed. */
273 return;
274 }
275#endif /* INET */
276#endif /* IPSEC_NAT_T */
277#endif /* IPSEC */
278#ifdef MAC
279 if (mac_inpcb_check_deliver(inp, n) != 0) {
280 m_freem(n);
281 return;
282 }
283#endif
284 if (inp->inp_flags & INP_CONTROLOPTS ||
285 inp->inp_socket->so_options & (SO_TIMESTAMP | SO_BINTIME)) {
286#ifdef INET6
287 if (inp->inp_vflag & INP_IPV6)
288 (void)ip6_savecontrol_v4(inp, n, &opts, NULL);
289 else
290#endif
291 ip_savecontrol(inp, &opts, ip, n);
292 }
293#ifdef INET6
294 if (inp->inp_vflag & INP_IPV6) {
295 bzero(&udp_in6, sizeof(udp_in6));
296 udp_in6.sin6_len = sizeof(udp_in6);
297 udp_in6.sin6_family = AF_INET6;
298 in6_sin_2_v4mapsin6(udp_in, &udp_in6);
299 append_sa = (struct sockaddr *)&udp_in6;
300 } else
301#endif
302 append_sa = (struct sockaddr *)udp_in;
303 m_adj(n, off);
304
305 so = inp->inp_socket;
306 SOCKBUF_LOCK(&so->so_rcv);
307 if (sbappendaddr_locked(&so->so_rcv, append_sa, n, opts) == 0) {
308 SOCKBUF_UNLOCK(&so->so_rcv);
309 m_freem(n);
310 if (opts)
311 m_freem(opts);
312 UDPSTAT_INC(udps_fullsock);
313 } else
314 sorwakeup_locked(so);
315}
316
317void
318udp_input(struct mbuf *m, int off)
319{
320 int iphlen = off;
321 struct ip *ip;
322 struct udphdr *uh;
323 struct ifnet *ifp;
324 struct inpcb *inp;
325 struct udpcb *up;
326 int len;
327 struct ip save_ip;
328 struct sockaddr_in udp_in;
329#ifdef IPFIREWALL_FORWARD
330 struct m_tag *fwd_tag;
331#endif
332
333 ifp = m->m_pkthdr.rcvif;
334 UDPSTAT_INC(udps_ipackets);
335
336 /*
337 * Strip IP options, if any; should skip this, make available to
338 * user, and use on returned packets, but we don't yet have a way to
339 * check the checksum with options still present.
340 */
341 if (iphlen > sizeof (struct ip)) {
342 ip_stripoptions(m, (struct mbuf *)0);
343 iphlen = sizeof(struct ip);
344 }
345
346 /*
347 * Get IP and UDP header together in first mbuf.
348 */
349 ip = mtod(m, struct ip *);
350 if (m->m_len < iphlen + sizeof(struct udphdr)) {
351 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) {
352 UDPSTAT_INC(udps_hdrops);
353 return;
354 }
355 ip = mtod(m, struct ip *);
356 }
357 uh = (struct udphdr *)((caddr_t)ip + iphlen);
358
359 /*
360 * Destination port of 0 is illegal, based on RFC768.
361 */
362 if (uh->uh_dport == 0)
363 goto badunlocked;
364
365 /*
366 * Construct sockaddr format source address. Stuff source address
367 * and datagram in user buffer.
368 */
369 bzero(&udp_in, sizeof(udp_in));
370 udp_in.sin_len = sizeof(udp_in);
371 udp_in.sin_family = AF_INET;
372 udp_in.sin_port = uh->uh_sport;
373 udp_in.sin_addr = ip->ip_src;
374
375 /*
376 * Make mbuf data length reflect UDP length. If not enough data to
377 * reflect UDP length, drop.
378 */
379 len = ntohs((u_short)uh->uh_ulen);
380 if (ip->ip_len != len) {
381 if (len > ip->ip_len || len < sizeof(struct udphdr)) {
382 UDPSTAT_INC(udps_badlen);
383 goto badunlocked;
384 }
385 m_adj(m, len - ip->ip_len);
386 /* ip->ip_len = len; */
387 }
388
389 /*
390 * Save a copy of the IP header in case we want restore it for
391 * sending an ICMP error message in response.
392 */
393 if (!V_udp_blackhole)
394 save_ip = *ip;
395 else
396 memset(&save_ip, 0, sizeof(save_ip));
397
398 /*
399 * Checksum extended UDP header and data.
400 */
401 if (uh->uh_sum) {
402 u_short uh_sum;
403
404 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
405 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
406 uh_sum = m->m_pkthdr.csum_data;
407 else
408 uh_sum = in_pseudo(ip->ip_src.s_addr,
409 ip->ip_dst.s_addr, htonl((u_short)len +
410 m->m_pkthdr.csum_data + IPPROTO_UDP));
411 uh_sum ^= 0xffff;
412 } else {
413 char b[9];
414
415 bcopy(((struct ipovly *)ip)->ih_x1, b, 9);
416 bzero(((struct ipovly *)ip)->ih_x1, 9);
417 ((struct ipovly *)ip)->ih_len = uh->uh_ulen;
418 uh_sum = in_cksum(m, len + sizeof (struct ip));
419 bcopy(b, ((struct ipovly *)ip)->ih_x1, 9);
420 }
421 if (uh_sum) {
422 UDPSTAT_INC(udps_badsum);
423 m_freem(m);
424 return;
425 }
426 } else
427 UDPSTAT_INC(udps_nosum);
428
429#ifdef IPFIREWALL_FORWARD
430 /*
431 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
432 */
433 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL);
434 if (fwd_tag != NULL) {
435 struct sockaddr_in *next_hop;
436
437 /*
438 * Do the hack.
439 */
440 next_hop = (struct sockaddr_in *)(fwd_tag + 1);
441 ip->ip_dst = next_hop->sin_addr;
442 uh->uh_dport = ntohs(next_hop->sin_port);
443
444 /*
445 * Remove the tag from the packet. We don't need it anymore.
446 */
447 m_tag_delete(m, fwd_tag);
448 }
449#endif
450
451 INP_INFO_RLOCK(&V_udbinfo);
452 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
453 in_broadcast(ip->ip_dst, ifp)) {
454 struct inpcb *last;
455 struct ip_moptions *imo;
456
457 last = NULL;
458 LIST_FOREACH(inp, &V_udb, inp_list) {
459 if (inp->inp_lport != uh->uh_dport)
460 continue;
461#ifdef INET6
462 if ((inp->inp_vflag & INP_IPV4) == 0)
463 continue;
464#endif
465 if (inp->inp_laddr.s_addr != INADDR_ANY &&
466 inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
467 continue;
468 if (inp->inp_faddr.s_addr != INADDR_ANY &&
469 inp->inp_faddr.s_addr != ip->ip_src.s_addr)
470 continue;
471 if (inp->inp_fport != 0 &&
472 inp->inp_fport != uh->uh_sport)
473 continue;
474
475 INP_RLOCK(inp);
476
477 /*
478 * Handle socket delivery policy for any-source
479 * and source-specific multicast. [RFC3678]
480 */
481 imo = inp->inp_moptions;
482 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
483 imo != NULL) {
484 struct sockaddr_in group;
485 int blocked;
486
487 bzero(&group, sizeof(struct sockaddr_in));
488 group.sin_len = sizeof(struct sockaddr_in);
489 group.sin_family = AF_INET;
490 group.sin_addr = ip->ip_dst;
491
492 blocked = imo_multi_filter(imo, ifp,
493 (struct sockaddr *)&group,
494 (struct sockaddr *)&udp_in);
495 if (blocked != MCAST_PASS) {
496 if (blocked == MCAST_NOTGMEMBER)
497 IPSTAT_INC(ips_notmember);
498 if (blocked == MCAST_NOTSMEMBER ||
499 blocked == MCAST_MUTED)
500 UDPSTAT_INC(udps_filtermcast);
501 INP_RUNLOCK(inp);
502 continue;
503 }
504 }
505 if (last != NULL) {
506 struct mbuf *n;
507
508 n = m_copy(m, 0, M_COPYALL);
509 up = intoudpcb(last);
510 if (up->u_tun_func == NULL) {
511 if (n != NULL)
512 udp_append(last,
513 ip, n,
514 iphlen +
515 sizeof(struct udphdr),
516 &udp_in);
517 } else {
518 /*
519 * Engage the tunneling protocol we
520 * will have to leave the info_lock
521 * up, since we are hunting through
522 * multiple UDP's.
523 */
524
525 (*up->u_tun_func)(n, iphlen, last);
526 }
527 INP_RUNLOCK(last);
528 }
529 last = inp;
530 /*
531 * Don't look for additional matches if this one does
532 * not have either the SO_REUSEPORT or SO_REUSEADDR
533 * socket options set. This heuristic avoids
534 * searching through all pcbs in the common case of a
535 * non-shared port. It assumes that an application
536 * will never clear these options after setting them.
537 */
538 if ((last->inp_socket->so_options &
539 (SO_REUSEPORT|SO_REUSEADDR)) == 0)
540 break;
541 }
542
543 if (last == NULL) {
544 /*
545 * No matching pcb found; discard datagram. (No need
546 * to send an ICMP Port Unreachable for a broadcast
547 * or multicast datgram.)
548 */
549 UDPSTAT_INC(udps_noportbcast);
550 goto badheadlocked;
551 }
552 up = intoudpcb(last);
553 if (up->u_tun_func == NULL) {
554 udp_append(last, ip, m, iphlen + sizeof(struct udphdr),
555 &udp_in);
556 } else {
557 /*
558 * Engage the tunneling protocol.
559 */
560 (*up->u_tun_func)(m, iphlen, last);
561 }
562 INP_RUNLOCK(last);
563 INP_INFO_RUNLOCK(&V_udbinfo);
564 return;
565 }
566
567 /*
568 * Locate pcb for datagram.
569 */
570 inp = in_pcblookup_hash(&V_udbinfo, ip->ip_src, uh->uh_sport,
571 ip->ip_dst, uh->uh_dport, 1, ifp);
572 if (inp == NULL) {
573 if (udp_log_in_vain) {
574 char buf[4*sizeof "123"];
575
576 strcpy(buf, inet_ntoa(ip->ip_dst));
577 log(LOG_INFO,
578 "Connection attempt to UDP %s:%d from %s:%d\n",
579 buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src),
580 ntohs(uh->uh_sport));
581 }
582 UDPSTAT_INC(udps_noport);
583 if (m->m_flags & (M_BCAST | M_MCAST)) {
584 UDPSTAT_INC(udps_noportbcast);
585 goto badheadlocked;
586 }
587 if (V_udp_blackhole)
588 goto badheadlocked;
589 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0)
590 goto badheadlocked;
591 *ip = save_ip;
592 ip->ip_len += iphlen;
593 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
594 INP_INFO_RUNLOCK(&V_udbinfo);
595 return;
596 }
597
598 /*
599 * Check the minimum TTL for socket.
600 */
601 INP_RLOCK(inp);
602 INP_INFO_RUNLOCK(&V_udbinfo);
603 if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) {
604 INP_RUNLOCK(inp);
605 goto badunlocked;
606 }
607 up = intoudpcb(inp);
608 if (up->u_tun_func == NULL) {
609 udp_append(inp, ip, m, iphlen + sizeof(struct udphdr), &udp_in);
610 } else {
611 /*
612 * Engage the tunneling protocol.
613 */
614
615 (*up->u_tun_func)(m, iphlen, inp);
616 }
617 INP_RUNLOCK(inp);
618 return;
619
620badheadlocked:
621 if (inp)
622 INP_RUNLOCK(inp);
623 INP_INFO_RUNLOCK(&V_udbinfo);
624badunlocked:
625 m_freem(m);
626}
627
628/*
629 * Notify a udp user of an asynchronous error; just wake up so that they can
630 * collect error status.
631 */
632struct inpcb *
633udp_notify(struct inpcb *inp, int errno)
634{
635
636 /*
637 * While udp_ctlinput() always calls udp_notify() with a read lock
638 * when invoking it directly, in_pcbnotifyall() currently uses write
639 * locks due to sharing code with TCP. For now, accept either a read
640 * or a write lock, but a read lock is sufficient.
641 */
642 INP_LOCK_ASSERT(inp);
643
644 inp->inp_socket->so_error = errno;
645 sorwakeup(inp->inp_socket);
646 sowwakeup(inp->inp_socket);
647 return (inp);
648}
649
650void
651udp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
652{
653 struct ip *ip = vip;
654 struct udphdr *uh;
655 struct in_addr faddr;
656 struct inpcb *inp;
657
658 faddr = ((struct sockaddr_in *)sa)->sin_addr;
659 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
660 return;
661
662 /*
663 * Redirects don't need to be handled up here.
664 */
665 if (PRC_IS_REDIRECT(cmd))
666 return;
667
668 /*
669 * Hostdead is ugly because it goes linearly through all PCBs.
670 *
671 * XXX: We never get this from ICMP, otherwise it makes an excellent
672 * DoS attack on machines with many connections.
673 */
674 if (cmd == PRC_HOSTDEAD)
675 ip = NULL;
676 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
677 return;
678 if (ip != NULL) {
679 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
680 INP_INFO_RLOCK(&V_udbinfo);
681 inp = in_pcblookup_hash(&V_udbinfo, faddr, uh->uh_dport,
682 ip->ip_src, uh->uh_sport, 0, NULL);
683 if (inp != NULL) {
684 INP_RLOCK(inp);
685 if (inp->inp_socket != NULL) {
686 udp_notify(inp, inetctlerrmap[cmd]);
687 }
688 INP_RUNLOCK(inp);
689 }
690 INP_INFO_RUNLOCK(&V_udbinfo);
691 } else
692 in_pcbnotifyall(&V_udbinfo, faddr, inetctlerrmap[cmd],
693 udp_notify);
694}
695
696static int
697udp_pcblist(SYSCTL_HANDLER_ARGS)
698{
699 int error, i, n;
700 struct inpcb *inp, **inp_list;
701 inp_gen_t gencnt;
702 struct xinpgen xig;
703
704 /*
705 * The process of preparing the PCB list is too time-consuming and
706 * resource-intensive to repeat twice on every request.
707 */
708 if (req->oldptr == 0) {
709 n = V_udbinfo.ipi_count;
710 n += imax(n / 8, 10);
711 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
712 return (0);
713 }
714
715 if (req->newptr != 0)
716 return (EPERM);
717
718 /*
719 * OK, now we're committed to doing something.
720 */
721 INP_INFO_RLOCK(&V_udbinfo);
722 gencnt = V_udbinfo.ipi_gencnt;
723 n = V_udbinfo.ipi_count;
724 INP_INFO_RUNLOCK(&V_udbinfo);
725
726 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
727 + n * sizeof(struct xinpcb));
728 if (error != 0)
729 return (error);
730
731 xig.xig_len = sizeof xig;
732 xig.xig_count = n;
733 xig.xig_gen = gencnt;
734 xig.xig_sogen = so_gencnt;
735 error = SYSCTL_OUT(req, &xig, sizeof xig);
736 if (error)
737 return (error);
738
739 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
740 if (inp_list == 0)
741 return (ENOMEM);
742
743 INP_INFO_RLOCK(&V_udbinfo);
744 for (inp = LIST_FIRST(V_udbinfo.ipi_listhead), i = 0; inp && i < n;
745 inp = LIST_NEXT(inp, inp_list)) {
746 INP_WLOCK(inp);
747 if (inp->inp_gencnt <= gencnt &&
748 cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
749 in_pcbref(inp);
750 inp_list[i++] = inp;
751 }
752 INP_WUNLOCK(inp);
753 }
754 INP_INFO_RUNLOCK(&V_udbinfo);
755 n = i;
756
757 error = 0;
758 for (i = 0; i < n; i++) {
759 inp = inp_list[i];
760 INP_RLOCK(inp);
761 if (inp->inp_gencnt <= gencnt) {
762 struct xinpcb xi;
763
764 bzero(&xi, sizeof(xi));
765 xi.xi_len = sizeof xi;
766 /* XXX should avoid extra copy */
767 bcopy(inp, &xi.xi_inp, sizeof *inp);
768 if (inp->inp_socket)
769 sotoxsocket(inp->inp_socket, &xi.xi_socket);
770 xi.xi_inp.inp_gencnt = inp->inp_gencnt;
771 INP_RUNLOCK(inp);
772 error = SYSCTL_OUT(req, &xi, sizeof xi);
773 } else
774 INP_RUNLOCK(inp);
775 }
776 INP_INFO_WLOCK(&V_udbinfo);
777 for (i = 0; i < n; i++) {
778 inp = inp_list[i];
779 INP_WLOCK(inp);
780 if (!in_pcbrele(inp))
781 INP_WUNLOCK(inp);
782 }
783 INP_INFO_WUNLOCK(&V_udbinfo);
784
785 if (!error) {
786 /*
787 * Give the user an updated idea of our state. If the
788 * generation differs from what we told her before, she knows
789 * that something happened while we were processing this
790 * request, and it might be necessary to retry.
791 */
792 INP_INFO_RLOCK(&V_udbinfo);
793 xig.xig_gen = V_udbinfo.ipi_gencnt;
794 xig.xig_sogen = so_gencnt;
795 xig.xig_count = V_udbinfo.ipi_count;
796 INP_INFO_RUNLOCK(&V_udbinfo);
797 error = SYSCTL_OUT(req, &xig, sizeof xig);
798 }
799 free(inp_list, M_TEMP);
800 return (error);
801}
802
803SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0,
804 udp_pcblist, "S,xinpcb", "List of active UDP sockets");
805
806static int
807udp_getcred(SYSCTL_HANDLER_ARGS)
808{
809 struct xucred xuc;
810 struct sockaddr_in addrs[2];
811 struct inpcb *inp;
812 int error;
813
814 error = priv_check(req->td, PRIV_NETINET_GETCRED);
815 if (error)
816 return (error);
817 error = SYSCTL_IN(req, addrs, sizeof(addrs));
818 if (error)
819 return (error);
820 INP_INFO_RLOCK(&V_udbinfo);
821 inp = in_pcblookup_hash(&V_udbinfo, addrs[1].sin_addr, addrs[1].sin_port,
822 addrs[0].sin_addr, addrs[0].sin_port, 1, NULL);
823 if (inp != NULL) {
824 INP_RLOCK(inp);
825 INP_INFO_RUNLOCK(&V_udbinfo);
826 if (inp->inp_socket == NULL)
827 error = ENOENT;
828 if (error == 0)
829 error = cr_canseeinpcb(req->td->td_ucred, inp);
830 if (error == 0)
831 cru2x(inp->inp_cred, &xuc);
832 INP_RUNLOCK(inp);
833 } else {
834 INP_INFO_RUNLOCK(&V_udbinfo);
835 error = ENOENT;
836 }
837 if (error == 0)
838 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
839 return (error);
840}
841
842SYSCTL_PROC(_net_inet_udp, OID_AUTO, getcred,
843 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
844 udp_getcred, "S,xucred", "Get the xucred of a UDP connection");
845
846int
847udp_ctloutput(struct socket *so, struct sockopt *sopt)
848{
849 int error = 0, optval;
850 struct inpcb *inp;
851#ifdef IPSEC_NAT_T
852 struct udpcb *up;
853#endif
854
855 inp = sotoinpcb(so);
856 KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
857 INP_WLOCK(inp);
858 if (sopt->sopt_level != IPPROTO_UDP) {
859#ifdef INET6
860 if (INP_CHECK_SOCKAF(so, AF_INET6)) {
861 INP_WUNLOCK(inp);
862 error = ip6_ctloutput(so, sopt);
863 } else {
864#endif
865 INP_WUNLOCK(inp);
866 error = ip_ctloutput(so, sopt);
867#ifdef INET6
868 }
869#endif
870 return (error);
871 }
872
873 switch (sopt->sopt_dir) {
874 case SOPT_SET:
875 switch (sopt->sopt_name) {
876 case UDP_ENCAP:
877 INP_WUNLOCK(inp);
878 error = sooptcopyin(sopt, &optval, sizeof optval,
879 sizeof optval);
880 if (error)
881 break;
882 inp = sotoinpcb(so);
883 KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
884 INP_WLOCK(inp);
885#ifdef IPSEC_NAT_T
886 up = intoudpcb(inp);
887 KASSERT(up != NULL, ("%s: up == NULL", __func__));
888#endif
889 switch (optval) {
890 case 0:
891 /* Clear all UDP encap. */
892#ifdef IPSEC_NAT_T
893 up->u_flags &= ~UF_ESPINUDP_ALL;
894#endif
895 break;
896#ifdef IPSEC_NAT_T
897 case UDP_ENCAP_ESPINUDP:
898 case UDP_ENCAP_ESPINUDP_NON_IKE:
899 up->u_flags &= ~UF_ESPINUDP_ALL;
900 if (optval == UDP_ENCAP_ESPINUDP)
901 up->u_flags |= UF_ESPINUDP;
902 else if (optval == UDP_ENCAP_ESPINUDP_NON_IKE)
903 up->u_flags |= UF_ESPINUDP_NON_IKE;
904 break;
905#endif
906 default:
907 error = EINVAL;
908 break;
909 }
910 INP_WUNLOCK(inp);
911 break;
912 default:
913 INP_WUNLOCK(inp);
914 error = ENOPROTOOPT;
915 break;
916 }
917 break;
918 case SOPT_GET:
919 switch (sopt->sopt_name) {
920#ifdef IPSEC_NAT_T
921 case UDP_ENCAP:
922 up = intoudpcb(inp);
923 KASSERT(up != NULL, ("%s: up == NULL", __func__));
924 optval = up->u_flags & UF_ESPINUDP_ALL;
925 INP_WUNLOCK(inp);
926 error = sooptcopyout(sopt, &optval, sizeof optval);
927 break;
928#endif
929 default:
930 INP_WUNLOCK(inp);
931 error = ENOPROTOOPT;
932 break;
933 }
934 break;
935 }
936 return (error);
937}
938
939static int
940udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
941 struct mbuf *control, struct thread *td)
942{
943 struct udpiphdr *ui;
944 int len = m->m_pkthdr.len;
945 struct in_addr faddr, laddr;
946 struct cmsghdr *cm;
947 struct sockaddr_in *sin, src;
948 int error = 0;
949 int ipflags;
950 u_short fport, lport;
951 int unlock_udbinfo;
952
953 /*
954 * udp_output() may need to temporarily bind or connect the current
955 * inpcb. As such, we don't know up front whether we will need the
956 * pcbinfo lock or not. Do any work to decide what is needed up
957 * front before acquiring any locks.
958 */
959 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
960 if (control)
961 m_freem(control);
962 m_freem(m);
963 return (EMSGSIZE);
964 }
965
966 src.sin_family = 0;
967 if (control != NULL) {
968 /*
969 * XXX: Currently, we assume all the optional information is
970 * stored in a single mbuf.
971 */
972 if (control->m_next) {
973 m_freem(control);
974 m_freem(m);
975 return (EINVAL);
976 }
977 for (; control->m_len > 0;
978 control->m_data += CMSG_ALIGN(cm->cmsg_len),
979 control->m_len -= CMSG_ALIGN(cm->cmsg_len)) {
980 cm = mtod(control, struct cmsghdr *);
981 if (control->m_len < sizeof(*cm) || cm->cmsg_len == 0
982 || cm->cmsg_len > control->m_len) {
983 error = EINVAL;
984 break;
985 }
986 if (cm->cmsg_level != IPPROTO_IP)
987 continue;
988
989 switch (cm->cmsg_type) {
990 case IP_SENDSRCADDR:
991 if (cm->cmsg_len !=
992 CMSG_LEN(sizeof(struct in_addr))) {
993 error = EINVAL;
994 break;
995 }
996 bzero(&src, sizeof(src));
997 src.sin_family = AF_INET;
998 src.sin_len = sizeof(src);
999 src.sin_port = inp->inp_lport;
1000 src.sin_addr =
1001 *(struct in_addr *)CMSG_DATA(cm);
1002 break;
1003
1004 default:
1005 error = ENOPROTOOPT;
1006 break;
1007 }
1008 if (error)
1009 break;
1010 }
1011 m_freem(control);
1012 }
1013 if (error) {
1014 m_freem(m);
1015 return (error);
1016 }
1017
1018 /*
1019 * Depending on whether or not the application has bound or connected
1020 * the socket, we may have to do varying levels of work. The optimal
1021 * case is for a connected UDP socket, as a global lock isn't
1022 * required at all.
1023 *
1024 * In order to decide which we need, we require stability of the
1025 * inpcb binding, which we ensure by acquiring a read lock on the
1026 * inpcb. This doesn't strictly follow the lock order, so we play
1027 * the trylock and retry game; note that we may end up with more
1028 * conservative locks than required the second time around, so later
1029 * assertions have to accept that. Further analysis of the number of
1030 * misses under contention is required.
1031 */
1032 sin = (struct sockaddr_in *)addr;
1033 INP_RLOCK(inp);
1034 if (sin != NULL &&
1035 (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) {
1036 INP_RUNLOCK(inp);
1037 INP_INFO_WLOCK(&V_udbinfo);
1038 INP_WLOCK(inp);
1039 unlock_udbinfo = 2;
1040 } else if ((sin != NULL && (
1041 (sin->sin_addr.s_addr == INADDR_ANY) ||
1042 (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
1043 (inp->inp_laddr.s_addr == INADDR_ANY) ||
1044 (inp->inp_lport == 0))) ||
1045 (src.sin_family == AF_INET)) {
1046 if (!INP_INFO_TRY_RLOCK(&V_udbinfo)) {
1047 INP_RUNLOCK(inp);
1048 INP_INFO_RLOCK(&V_udbinfo);
1049 INP_RLOCK(inp);
1050 }
1051 unlock_udbinfo = 1;
1052 } else
1053 unlock_udbinfo = 0;
1054
1055 /*
1056 * If the IP_SENDSRCADDR control message was specified, override the
1057 * source address for this datagram. Its use is invalidated if the
1058 * address thus specified is incomplete or clobbers other inpcbs.
1059 */
1060 laddr = inp->inp_laddr;
1061 lport = inp->inp_lport;
1062 if (src.sin_family == AF_INET) {
1063 INP_INFO_LOCK_ASSERT(&V_udbinfo);
1064 if ((lport == 0) ||
1065 (laddr.s_addr == INADDR_ANY &&
1066 src.sin_addr.s_addr == INADDR_ANY)) {
1067 error = EINVAL;
1068 goto release;
1069 }
1070 error = in_pcbbind_setup(inp, (struct sockaddr *)&src,
1071 &laddr.s_addr, &lport, td->td_ucred);
1072 if (error)
1073 goto release;
1074 }
1075
1076 /*
1077 * If a UDP socket has been connected, then a local address/port will
1078 * have been selected and bound.
1079 *
1080 * If a UDP socket has not been connected to, then an explicit
1081 * destination address must be used, in which case a local
1082 * address/port may not have been selected and bound.
1083 */
1084 if (sin != NULL) {
1085 INP_LOCK_ASSERT(inp);
1086 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1087 error = EISCONN;
1088 goto release;
1089 }
1090
1091 /*
1092 * Jail may rewrite the destination address, so let it do
1093 * that before we use it.
1094 */
1095 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1096 if (error)
1097 goto release;
1098
1099 /*
1100 * If a local address or port hasn't yet been selected, or if
1101 * the destination address needs to be rewritten due to using
1102 * a special INADDR_ constant, invoke in_pcbconnect_setup()
1103 * to do the heavy lifting. Once a port is selected, we
1104 * commit the binding back to the socket; we also commit the
1105 * binding of the address if in jail.
1106 *
1107 * If we already have a valid binding and we're not
1108 * requesting a destination address rewrite, use a fast path.
1109 */
1110 if (inp->inp_laddr.s_addr == INADDR_ANY ||
1111 inp->inp_lport == 0 ||
1112 sin->sin_addr.s_addr == INADDR_ANY ||
1113 sin->sin_addr.s_addr == INADDR_BROADCAST) {
1114 INP_INFO_LOCK_ASSERT(&V_udbinfo);
1115 error = in_pcbconnect_setup(inp, addr, &laddr.s_addr,
1116 &lport, &faddr.s_addr, &fport, NULL,
1117 td->td_ucred);
1118 if (error)
1119 goto release;
1120
1121 /*
1122 * XXXRW: Why not commit the port if the address is
1123 * !INADDR_ANY?
1124 */
1125 /* Commit the local port if newly assigned. */
1126 if (inp->inp_laddr.s_addr == INADDR_ANY &&
1127 inp->inp_lport == 0) {
1128 INP_INFO_WLOCK_ASSERT(&V_udbinfo);
1129 INP_WLOCK_ASSERT(inp);
1130 /*
1131 * Remember addr if jailed, to prevent
1132 * rebinding.
1133 */
1134 if (prison_flag(td->td_ucred, PR_IP4))
1135 inp->inp_laddr = laddr;
1136 inp->inp_lport = lport;
1137 if (in_pcbinshash(inp) != 0) {
1138 inp->inp_lport = 0;
1139 error = EAGAIN;
1140 goto release;
1141 }
1142 inp->inp_flags |= INP_ANONPORT;
1143 }
1144 } else {
1145 faddr = sin->sin_addr;
1146 fport = sin->sin_port;
1147 }
1148 } else {
1149 INP_LOCK_ASSERT(inp);
1150 faddr = inp->inp_faddr;
1151 fport = inp->inp_fport;
1152 if (faddr.s_addr == INADDR_ANY) {
1153 error = ENOTCONN;
1154 goto release;
1155 }
1156 }
1157
1158 /*
1159 * Calculate data length and get a mbuf for UDP, IP, and possible
1160 * link-layer headers. Immediate slide the data pointer back forward
1161 * since we won't use that space at this layer.
1162 */
1163 M_PREPEND(m, sizeof(struct udpiphdr) + max_linkhdr, M_DONTWAIT);
1164 if (m == NULL) {
1165 error = ENOBUFS;
1166 goto release;
1167 }
1168 m->m_data += max_linkhdr;
1169 m->m_len -= max_linkhdr;
1170 m->m_pkthdr.len -= max_linkhdr;
1171
1172 /*
1173 * Fill in mbuf with extended UDP header and addresses and length put
1174 * into network format.
1175 */
1176 ui = mtod(m, struct udpiphdr *);
1177 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */
1178 ui->ui_pr = IPPROTO_UDP;
1179 ui->ui_src = laddr;
1180 ui->ui_dst = faddr;
1181 ui->ui_sport = lport;
1182 ui->ui_dport = fport;
1183 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1184
1185 /*
1186 * Set the Don't Fragment bit in the IP header.
1187 */
1188 if (inp->inp_flags & INP_DONTFRAG) {
1189 struct ip *ip;
1190
1191 ip = (struct ip *)&ui->ui_i;
1192 ip->ip_off |= IP_DF;
1193 }
1194
1195 ipflags = 0;
1196 if (inp->inp_socket->so_options & SO_DONTROUTE)
1197 ipflags |= IP_ROUTETOIF;
1198 if (inp->inp_socket->so_options & SO_BROADCAST)
1199 ipflags |= IP_ALLOWBROADCAST;
1200 if (inp->inp_flags & INP_ONESBCAST)
1201 ipflags |= IP_SENDONES;
1202
1203#ifdef MAC
1204 mac_inpcb_create_mbuf(inp, m);
1205#endif
1206
1207 /*
1208 * Set up checksum and output datagram.
1209 */
1210 if (udp_cksum) {
1211 if (inp->inp_flags & INP_ONESBCAST)
1212 faddr.s_addr = INADDR_BROADCAST;
1213 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, faddr.s_addr,
1214 htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
1215 m->m_pkthdr.csum_flags = CSUM_UDP;
1216 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1217 } else
1218 ui->ui_sum = 0;
1219 ((struct ip *)ui)->ip_len = sizeof (struct udpiphdr) + len;
1220 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */
1221 ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */
1222 UDPSTAT_INC(udps_opackets);
1223
1224 if (unlock_udbinfo == 2)
1225 INP_INFO_WUNLOCK(&V_udbinfo);
1226 else if (unlock_udbinfo == 1)
1227 INP_INFO_RUNLOCK(&V_udbinfo);
1228 error = ip_output(m, inp->inp_options, NULL, ipflags,
1229 inp->inp_moptions, inp);
1230 if (unlock_udbinfo == 2)
1231 INP_WUNLOCK(inp);
1232 else
1233 INP_RUNLOCK(inp);
1234 return (error);
1235
1236release:
1237 if (unlock_udbinfo == 2) {
1238 INP_WUNLOCK(inp);
1239 INP_INFO_WUNLOCK(&V_udbinfo);
1240 } else if (unlock_udbinfo == 1) {
1241 INP_RUNLOCK(inp);
1242 INP_INFO_RUNLOCK(&V_udbinfo);
1243 } else
1244 INP_RUNLOCK(inp);
1245 m_freem(m);
1246 return (error);
1247}
1248
1249
1250#if defined(IPSEC) && defined(IPSEC_NAT_T)
1251#ifdef INET
1252/*
1253 * Potentially decap ESP in UDP frame. Check for an ESP header
1254 * and optional marker; if present, strip the UDP header and
1255 * push the result through IPSec.
1256 *
1257 * Returns mbuf to be processed (potentially re-allocated) or
1258 * NULL if consumed and/or processed.
1259 */
1260static struct mbuf *
1261udp4_espdecap(struct inpcb *inp, struct mbuf *m, int off)
1262{
1263 size_t minlen, payload, skip, iphlen;
1264 caddr_t data;
1265 struct udpcb *up;
1266 struct m_tag *tag;
1267 struct udphdr *udphdr;
1268 struct ip *ip;
1269
1270 INP_RLOCK_ASSERT(inp);
1271
1272 /*
1273 * Pull up data so the longest case is contiguous:
1274 * IP/UDP hdr + non ESP marker + ESP hdr.
1275 */
1276 minlen = off + sizeof(uint64_t) + sizeof(struct esp);
1277 if (minlen > m->m_pkthdr.len)
1278 minlen = m->m_pkthdr.len;
1279 if ((m = m_pullup(m, minlen)) == NULL) {
1280 V_ipsec4stat.in_inval++;
1281 return (NULL); /* Bypass caller processing. */
1282 }
1283 data = mtod(m, caddr_t); /* Points to ip header. */
1284 payload = m->m_len - off; /* Size of payload. */
1285
1286 if (payload == 1 && data[off] == '\xff')
1287 return (m); /* NB: keepalive packet, no decap. */
1288
1289 up = intoudpcb(inp);
1290 KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
1291 KASSERT((up->u_flags & UF_ESPINUDP_ALL) != 0,
1292 ("u_flags 0x%x", up->u_flags));
1293
1294 /*
1295 * Check that the payload is large enough to hold an
1296 * ESP header and compute the amount of data to remove.
1297 *
1298 * NB: the caller has already done a pullup for us.
1299 * XXX can we assume alignment and eliminate bcopys?
1300 */
1301 if (up->u_flags & UF_ESPINUDP_NON_IKE) {
1302 /*
1303 * draft-ietf-ipsec-nat-t-ike-0[01].txt and
1304 * draft-ietf-ipsec-udp-encaps-(00/)01.txt, ignoring
1305 * possible AH mode non-IKE marker+non-ESP marker
1306 * from draft-ietf-ipsec-udp-encaps-00.txt.
1307 */
1308 uint64_t marker;
1309
1310 if (payload <= sizeof(uint64_t) + sizeof(struct esp))
1311 return (m); /* NB: no decap. */
1312 bcopy(data + off, &marker, sizeof(uint64_t));
1313 if (marker != 0) /* Non-IKE marker. */
1314 return (m); /* NB: no decap. */
1315 skip = sizeof(uint64_t) + sizeof(struct udphdr);
1316 } else {
1317 uint32_t spi;
1318
1319 if (payload <= sizeof(struct esp)) {
1320 V_ipsec4stat.in_inval++;
1321 m_freem(m);
1322 return (NULL); /* Discard. */
1323 }
1324 bcopy(data + off, &spi, sizeof(uint32_t));
1325 if (spi == 0) /* Non-ESP marker. */
1326 return (m); /* NB: no decap. */
1327 skip = sizeof(struct udphdr);
1328 }
1329
1330 /*
1331 * Setup a PACKET_TAG_IPSEC_NAT_T_PORT tag to remember
1332 * the UDP ports. This is required if we want to select
1333 * the right SPD for multiple hosts behind same NAT.
1334 *
1335 * NB: ports are maintained in network byte order everywhere
1336 * in the NAT-T code.
1337 */
1338 tag = m_tag_get(PACKET_TAG_IPSEC_NAT_T_PORTS,
1339 2 * sizeof(uint16_t), M_NOWAIT);
1340 if (tag == NULL) {
1341 V_ipsec4stat.in_nomem++;
1342 m_freem(m);
1343 return (NULL); /* Discard. */
1344 }
1345 iphlen = off - sizeof(struct udphdr);
1346 udphdr = (struct udphdr *)(data + iphlen);
1347 ((uint16_t *)(tag + 1))[0] = udphdr->uh_sport;
1348 ((uint16_t *)(tag + 1))[1] = udphdr->uh_dport;
1349 m_tag_prepend(m, tag);
1350
1351 /*
1352 * Remove the UDP header (and possibly the non ESP marker)
1353 * IP header length is iphlen
1354 * Before:
1355 * <--- off --->
1356 * +----+------+-----+
1357 * | IP | UDP | ESP |
1358 * +----+------+-----+
1359 * <-skip->
1360 * After:
1361 * +----+-----+
1362 * | IP | ESP |
1363 * +----+-----+
1364 * <-skip->
1365 */
1366 ovbcopy(data, data + skip, iphlen);
1367 m_adj(m, skip);
1368
1369 ip = mtod(m, struct ip *);
1370 ip->ip_len -= skip;
1371 ip->ip_p = IPPROTO_ESP;
1372
1373 /*
1374 * We cannot yet update the cksums so clear any
1375 * h/w cksum flags as they are no longer valid.
1376 */
1377 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)
1378 m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
1379
1380 (void) ipsec4_common_input(m, iphlen, ip->ip_p);
1381 return (NULL); /* NB: consumed, bypass processing. */
1382}
1383#endif /* INET */
1384#endif /* defined(IPSEC) && defined(IPSEC_NAT_T) */
1385
1386static void
1387udp_abort(struct socket *so)
1388{
1389 struct inpcb *inp;
1390
1391 inp = sotoinpcb(so);
1392 KASSERT(inp != NULL, ("udp_abort: inp == NULL"));
1393 INP_INFO_WLOCK(&V_udbinfo);
1394 INP_WLOCK(inp);
1395 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1396 in_pcbdisconnect(inp);
1397 inp->inp_laddr.s_addr = INADDR_ANY;
1398 soisdisconnected(so);
1399 }
1400 INP_WUNLOCK(inp);
1401 INP_INFO_WUNLOCK(&V_udbinfo);
1402}
1403
1404static int
1405udp_attach(struct socket *so, int proto, struct thread *td)
1406{
1407 struct inpcb *inp;
1408 int error;
1409
1410 inp = sotoinpcb(so);
1411 KASSERT(inp == NULL, ("udp_attach: inp != NULL"));
1412 error = soreserve(so, udp_sendspace, udp_recvspace);
1413 if (error)
1414 return (error);
1415 INP_INFO_WLOCK(&V_udbinfo);
1416 error = in_pcballoc(so, &V_udbinfo);
1417 if (error) {
1418 INP_INFO_WUNLOCK(&V_udbinfo);
1419 return (error);
1420 }
1421
1422 inp = sotoinpcb(so);
1423 inp->inp_vflag |= INP_IPV4;
1424 inp->inp_ip_ttl = V_ip_defttl;
1425
1426 error = udp_newudpcb(inp);
1427 if (error) {
1428 in_pcbdetach(inp);
1429 in_pcbfree(inp);
1430 INP_INFO_WUNLOCK(&V_udbinfo);
1431 return (error);
1432 }
1433
1434 INP_WUNLOCK(inp);
1435 INP_INFO_WUNLOCK(&V_udbinfo);
1436 return (0);
1437}
1438
1439int
1440udp_set_kernel_tunneling(struct socket *so, udp_tun_func_t f)
1441{
1442 struct inpcb *inp;
1443 struct udpcb *up;
1444
1445 KASSERT(so->so_type == SOCK_DGRAM,
1446 ("udp_set_kernel_tunneling: !dgram"));
1447 inp = sotoinpcb(so);
1448 KASSERT(inp != NULL, ("udp_set_kernel_tunneling: inp == NULL"));
1449 INP_WLOCK(inp);
1450 up = intoudpcb(inp);
1451 if (up->u_tun_func != NULL) {
1452 INP_WUNLOCK(inp);
1453 return (EBUSY);
1454 }
1455 up->u_tun_func = f;
1456 INP_WUNLOCK(inp);
1457 return (0);
1458}
1459
1460static int
1461udp_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
1462{
1463 struct inpcb *inp;
1464 int error;
1465
1466 inp = sotoinpcb(so);
1467 KASSERT(inp != NULL, ("udp_bind: inp == NULL"));
1468 INP_INFO_WLOCK(&V_udbinfo);
1469 INP_WLOCK(inp);
1470 error = in_pcbbind(inp, nam, td->td_ucred);
1471 INP_WUNLOCK(inp);
1472 INP_INFO_WUNLOCK(&V_udbinfo);
1473 return (error);
1474}
1475
1476static void
1477udp_close(struct socket *so)
1478{
1479 struct inpcb *inp;
1480
1481 inp = sotoinpcb(so);
1482 KASSERT(inp != NULL, ("udp_close: inp == NULL"));
1483 INP_INFO_WLOCK(&V_udbinfo);
1484 INP_WLOCK(inp);
1485 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1486 in_pcbdisconnect(inp);
1487 inp->inp_laddr.s_addr = INADDR_ANY;
1488 soisdisconnected(so);
1489 }
1490 INP_WUNLOCK(inp);
1491 INP_INFO_WUNLOCK(&V_udbinfo);
1492}
1493
1494static int
1495udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1496{
1497 struct inpcb *inp;
1498 int error;
1499 struct sockaddr_in *sin;
1500
1501 inp = sotoinpcb(so);
1502 KASSERT(inp != NULL, ("udp_connect: inp == NULL"));
1503 INP_INFO_WLOCK(&V_udbinfo);
1504 INP_WLOCK(inp);
1505 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1506 INP_WUNLOCK(inp);
1507 INP_INFO_WUNLOCK(&V_udbinfo);
1508 return (EISCONN);
1509 }
1510 sin = (struct sockaddr_in *)nam;
1511 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1512 if (error != 0) {
1513 INP_WUNLOCK(inp);
1514 INP_INFO_WUNLOCK(&V_udbinfo);
1515 return (error);
1516 }
1517 error = in_pcbconnect(inp, nam, td->td_ucred);
1518 if (error == 0)
1519 soisconnected(so);
1520 INP_WUNLOCK(inp);
1521 INP_INFO_WUNLOCK(&V_udbinfo);
1522 return (error);
1523}
1524
1525static void
1526udp_detach(struct socket *so)
1527{
1528 struct inpcb *inp;
1529 struct udpcb *up;
1530
1531 inp = sotoinpcb(so);
1532 KASSERT(inp != NULL, ("udp_detach: inp == NULL"));
1533 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
1534 ("udp_detach: not disconnected"));
1535 INP_INFO_WLOCK(&V_udbinfo);
1536 INP_WLOCK(inp);
1537 up = intoudpcb(inp);
1538 KASSERT(up != NULL, ("%s: up == NULL", __func__));
1539 inp->inp_ppcb = NULL;
1540 in_pcbdetach(inp);
1541 in_pcbfree(inp);
1542 INP_INFO_WUNLOCK(&V_udbinfo);
1543 udp_discardcb(up);
1544}
1545
1546static int
1547udp_disconnect(struct socket *so)
1548{
1549 struct inpcb *inp;
1550
1551 inp = sotoinpcb(so);
1552 KASSERT(inp != NULL, ("udp_disconnect: inp == NULL"));
1553 INP_INFO_WLOCK(&V_udbinfo);
1554 INP_WLOCK(inp);
1555 if (inp->inp_faddr.s_addr == INADDR_ANY) {
1556 INP_WUNLOCK(inp);
1557 INP_INFO_WUNLOCK(&V_udbinfo);
1558 return (ENOTCONN);
1559 }
1560
1561 in_pcbdisconnect(inp);
1562 inp->inp_laddr.s_addr = INADDR_ANY;
1563 SOCK_LOCK(so);
1564 so->so_state &= ~SS_ISCONNECTED; /* XXX */
1565 SOCK_UNLOCK(so);
1566 INP_WUNLOCK(inp);
1567 INP_INFO_WUNLOCK(&V_udbinfo);
1568 return (0);
1569}
1570
1571static int
1572udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
1573 struct mbuf *control, struct thread *td)
1574{
1575 struct inpcb *inp;
1576
1577 inp = sotoinpcb(so);
1578 KASSERT(inp != NULL, ("udp_send: inp == NULL"));
1579 return (udp_output(inp, m, addr, control, td));
1580}
1581
1582int
1583udp_shutdown(struct socket *so)
1584{
1585 struct inpcb *inp;
1586
1587 inp = sotoinpcb(so);
1588 KASSERT(inp != NULL, ("udp_shutdown: inp == NULL"));
1589 INP_WLOCK(inp);
1590 socantsendmore(so);
1591 INP_WUNLOCK(inp);
1592 return (0);
1593}
1594
1595struct pr_usrreqs udp_usrreqs = {
1596 .pru_abort = udp_abort,
1597 .pru_attach = udp_attach,
1598 .pru_bind = udp_bind,
1599 .pru_connect = udp_connect,
1600 .pru_control = in_control,
1601 .pru_detach = udp_detach,
1602 .pru_disconnect = udp_disconnect,
1603 .pru_peeraddr = in_getpeeraddr,
1604 .pru_send = udp_send,
1605 .pru_soreceive = soreceive_dgram,
1606 .pru_sosend = sosend_dgram,
1607 .pru_shutdown = udp_shutdown,
1608 .pru_sockaddr = in_getsockaddr,
1609 .pru_sosetlabel = in_pcbsosetlabel,
1610 .pru_close = udp_close,
1611};