Deleted Added
full compact
udp_usrreq.c (253571) udp_usrreq.c (254889)
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California.
4 * Copyright (c) 2008 Robert N. M. Watson
5 * Copyright (c) 2010-2011 Juniper Networks, Inc.
6 * All rights reserved.
7 *
8 * Portions of this software were developed by Robert N. M. Watson under
9 * contract to Juniper Networks, Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95
36 */
37
38#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3 * The Regents of the University of California.
4 * Copyright (c) 2008 Robert N. M. Watson
5 * Copyright (c) 2010-2011 Juniper Networks, Inc.
6 * All rights reserved.
7 *
8 * Portions of this software were developed by Robert N. M. Watson under
9 * contract to Juniper Networks, Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95
36 */
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD: head/sys/netinet/udp_usrreq.c 253571 2013-07-23 14:14:24Z ae $");
39__FBSDID("$FreeBSD: head/sys/netinet/udp_usrreq.c 254889 2013-08-25 21:54:41Z markj $");
40
41#include "opt_ipfw.h"
42#include "opt_inet.h"
43#include "opt_inet6.h"
44#include "opt_ipsec.h"
40
41#include "opt_ipfw.h"
42#include "opt_inet.h"
43#include "opt_inet6.h"
44#include "opt_ipsec.h"
45#include "opt_kdtrace.h"
45
46#include <sys/param.h>
47#include <sys/domain.h>
48#include <sys/eventhandler.h>
49#include <sys/jail.h>
50#include <sys/kernel.h>
51#include <sys/lock.h>
52#include <sys/malloc.h>
53#include <sys/mbuf.h>
54#include <sys/priv.h>
55#include <sys/proc.h>
56#include <sys/protosw.h>
46
47#include <sys/param.h>
48#include <sys/domain.h>
49#include <sys/eventhandler.h>
50#include <sys/jail.h>
51#include <sys/kernel.h>
52#include <sys/lock.h>
53#include <sys/malloc.h>
54#include <sys/mbuf.h>
55#include <sys/priv.h>
56#include <sys/proc.h>
57#include <sys/protosw.h>
58#include <sys/sdt.h>
57#include <sys/signalvar.h>
58#include <sys/socket.h>
59#include <sys/socketvar.h>
60#include <sys/sx.h>
61#include <sys/sysctl.h>
62#include <sys/syslog.h>
63#include <sys/systm.h>
64
65#include <vm/uma.h>
66
67#include <net/if.h>
68#include <net/route.h>
69
70#include <netinet/in.h>
59#include <sys/signalvar.h>
60#include <sys/socket.h>
61#include <sys/socketvar.h>
62#include <sys/sx.h>
63#include <sys/sysctl.h>
64#include <sys/syslog.h>
65#include <sys/systm.h>
66
67#include <vm/uma.h>
68
69#include <net/if.h>
70#include <net/route.h>
71
72#include <netinet/in.h>
73#include <netinet/in_kdtrace.h>
71#include <netinet/in_pcb.h>
72#include <netinet/in_systm.h>
73#include <netinet/in_var.h>
74#include <netinet/ip.h>
75#ifdef INET6
76#include <netinet/ip6.h>
77#endif
78#include <netinet/ip_icmp.h>
79#include <netinet/icmp_var.h>
80#include <netinet/ip_var.h>
81#include <netinet/ip_options.h>
82#ifdef INET6
83#include <netinet6/ip6_var.h>
84#endif
85#include <netinet/udp.h>
86#include <netinet/udp_var.h>
87
88#ifdef IPSEC
89#include <netipsec/ipsec.h>
90#include <netipsec/esp.h>
91#endif
92
93#include <machine/in_cksum.h>
94
95#include <security/mac/mac_framework.h>
96
97/*
98 * UDP protocol implementation.
99 * Per RFC 768, August, 1980.
100 */
101
102/*
103 * BSD 4.2 defaulted the udp checksum to be off. Turning off udp checksums
104 * removes the only data integrity mechanism for packets and malformed
105 * packets that would otherwise be discarded due to bad checksums, and may
106 * cause problems (especially for NFS data blocks).
107 */
108VNET_DEFINE(int, udp_cksum) = 1;
109SYSCTL_VNET_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW,
110 &VNET_NAME(udp_cksum), 0, "compute udp checksum");
111
112int udp_log_in_vain = 0;
113SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
114 &udp_log_in_vain, 0, "Log all incoming UDP packets");
115
116VNET_DEFINE(int, udp_blackhole) = 0;
117SYSCTL_VNET_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW,
118 &VNET_NAME(udp_blackhole), 0,
119 "Do not send port unreachables for refused connects");
120
121u_long udp_sendspace = 9216; /* really max datagram size */
122 /* 40 1K datagrams */
123SYSCTL_ULONG(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW,
124 &udp_sendspace, 0, "Maximum outgoing UDP datagram size");
125
126u_long udp_recvspace = 40 * (1024 +
127#ifdef INET6
128 sizeof(struct sockaddr_in6)
129#else
130 sizeof(struct sockaddr_in)
131#endif
132 );
133
134SYSCTL_ULONG(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
135 &udp_recvspace, 0, "Maximum space for incoming UDP datagrams");
136
137VNET_DEFINE(struct inpcbhead, udb); /* from udp_var.h */
138VNET_DEFINE(struct inpcbinfo, udbinfo);
139static VNET_DEFINE(uma_zone_t, udpcb_zone);
140#define V_udpcb_zone VNET(udpcb_zone)
141
142#ifndef UDBHASHSIZE
143#define UDBHASHSIZE 128
144#endif
145
146VNET_PCPUSTAT_DEFINE(struct udpstat, udpstat); /* from udp_var.h */
147VNET_PCPUSTAT_SYSINIT(udpstat);
148SYSCTL_VNET_PCPUSTAT(_net_inet_udp, UDPCTL_STATS, stats, struct udpstat,
149 udpstat, "UDP statistics (struct udpstat, netinet/udp_var.h)");
150
151#ifdef VIMAGE
152VNET_PCPUSTAT_SYSUNINIT(udpstat);
153#endif /* VIMAGE */
154#ifdef INET
155static void udp_detach(struct socket *so);
156static int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
157 struct mbuf *, struct thread *);
158#endif
159
160#ifdef IPSEC
161#ifdef IPSEC_NAT_T
162#define UF_ESPINUDP_ALL (UF_ESPINUDP_NON_IKE|UF_ESPINUDP)
163#ifdef INET
164static struct mbuf *udp4_espdecap(struct inpcb *, struct mbuf *, int);
165#endif
166#endif /* IPSEC_NAT_T */
167#endif /* IPSEC */
168
169static void
170udp_zone_change(void *tag)
171{
172
173 uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets);
174 uma_zone_set_max(V_udpcb_zone, maxsockets);
175}
176
177static int
178udp_inpcb_init(void *mem, int size, int flags)
179{
180 struct inpcb *inp;
181
182 inp = mem;
183 INP_LOCK_INIT(inp, "inp", "udpinp");
184 return (0);
185}
186
187void
188udp_init(void)
189{
190
191 in_pcbinfo_init(&V_udbinfo, "udp", &V_udb, UDBHASHSIZE, UDBHASHSIZE,
192 "udp_inpcb", udp_inpcb_init, NULL, UMA_ZONE_NOFREE,
193 IPI_HASHFIELDS_2TUPLE);
194 V_udpcb_zone = uma_zcreate("udpcb", sizeof(struct udpcb),
195 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
196 uma_zone_set_max(V_udpcb_zone, maxsockets);
197 uma_zone_set_warning(V_udpcb_zone, "kern.ipc.maxsockets limit reached");
198 EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL,
199 EVENTHANDLER_PRI_ANY);
200}
201
202/*
203 * Kernel module interface for updating udpstat. The argument is an index
204 * into udpstat treated as an array of u_long. While this encodes the
205 * general layout of udpstat into the caller, it doesn't encode its location,
206 * so that future changes to add, for example, per-CPU stats support won't
207 * cause binary compatibility problems for kernel modules.
208 */
209void
210kmod_udpstat_inc(int statnum)
211{
212
213 counter_u64_add(VNET(udpstat)[statnum], 1);
214}
215
216int
217udp_newudpcb(struct inpcb *inp)
218{
219 struct udpcb *up;
220
221 up = uma_zalloc(V_udpcb_zone, M_NOWAIT | M_ZERO);
222 if (up == NULL)
223 return (ENOBUFS);
224 inp->inp_ppcb = up;
225 return (0);
226}
227
228void
229udp_discardcb(struct udpcb *up)
230{
231
232 uma_zfree(V_udpcb_zone, up);
233}
234
235#ifdef VIMAGE
236void
237udp_destroy(void)
238{
239
240 in_pcbinfo_destroy(&V_udbinfo);
241 uma_zdestroy(V_udpcb_zone);
242}
243#endif
244
245#ifdef INET
246/*
247 * Subroutine of udp_input(), which appends the provided mbuf chain to the
248 * passed pcb/socket. The caller must provide a sockaddr_in via udp_in that
249 * contains the source address. If the socket ends up being an IPv6 socket,
250 * udp_append() will convert to a sockaddr_in6 before passing the address
251 * into the socket code.
252 */
253static void
254udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
255 struct sockaddr_in *udp_in)
256{
257 struct sockaddr *append_sa;
258 struct socket *so;
259 struct mbuf *opts = 0;
260#ifdef INET6
261 struct sockaddr_in6 udp_in6;
262#endif
263 struct udpcb *up;
264
265 INP_LOCK_ASSERT(inp);
266
267 /*
268 * Engage the tunneling protocol.
269 */
270 up = intoudpcb(inp);
271 if (up->u_tun_func != NULL) {
272 (*up->u_tun_func)(n, off, inp);
273 return;
274 }
275
276 if (n == NULL)
277 return;
278
279 off += sizeof(struct udphdr);
280
281#ifdef IPSEC
282 /* Check AH/ESP integrity. */
283 if (ipsec4_in_reject(n, inp)) {
284 m_freem(n);
285 IPSECSTAT_INC(ips_in_polvio);
286 return;
287 }
288#ifdef IPSEC_NAT_T
289 up = intoudpcb(inp);
290 KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
291 if (up->u_flags & UF_ESPINUDP_ALL) { /* IPSec UDP encaps. */
292 n = udp4_espdecap(inp, n, off);
293 if (n == NULL) /* Consumed. */
294 return;
295 }
296#endif /* IPSEC_NAT_T */
297#endif /* IPSEC */
298#ifdef MAC
299 if (mac_inpcb_check_deliver(inp, n) != 0) {
300 m_freem(n);
301 return;
302 }
303#endif /* MAC */
304 if (inp->inp_flags & INP_CONTROLOPTS ||
305 inp->inp_socket->so_options & (SO_TIMESTAMP | SO_BINTIME)) {
306#ifdef INET6
307 if (inp->inp_vflag & INP_IPV6)
308 (void)ip6_savecontrol_v4(inp, n, &opts, NULL);
309 else
310#endif /* INET6 */
311 ip_savecontrol(inp, &opts, ip, n);
312 }
313#ifdef INET6
314 if (inp->inp_vflag & INP_IPV6) {
315 bzero(&udp_in6, sizeof(udp_in6));
316 udp_in6.sin6_len = sizeof(udp_in6);
317 udp_in6.sin6_family = AF_INET6;
318 in6_sin_2_v4mapsin6(udp_in, &udp_in6);
319 append_sa = (struct sockaddr *)&udp_in6;
320 } else
321#endif /* INET6 */
322 append_sa = (struct sockaddr *)udp_in;
323 m_adj(n, off);
324
325 so = inp->inp_socket;
326 SOCKBUF_LOCK(&so->so_rcv);
327 if (sbappendaddr_locked(&so->so_rcv, append_sa, n, opts) == 0) {
328 SOCKBUF_UNLOCK(&so->so_rcv);
329 m_freem(n);
330 if (opts)
331 m_freem(opts);
332 UDPSTAT_INC(udps_fullsock);
333 } else
334 sorwakeup_locked(so);
335}
336
337void
338udp_input(struct mbuf *m, int off)
339{
340 int iphlen = off;
341 struct ip *ip;
342 struct udphdr *uh;
343 struct ifnet *ifp;
344 struct inpcb *inp;
345 uint16_t len, ip_len;
346 struct ip save_ip;
347 struct sockaddr_in udp_in;
348 struct m_tag *fwd_tag;
349
350 ifp = m->m_pkthdr.rcvif;
351 UDPSTAT_INC(udps_ipackets);
352
353 /*
354 * Strip IP options, if any; should skip this, make available to
355 * user, and use on returned packets, but we don't yet have a way to
356 * check the checksum with options still present.
357 */
358 if (iphlen > sizeof (struct ip)) {
359 ip_stripoptions(m);
360 iphlen = sizeof(struct ip);
361 }
362
363 /*
364 * Get IP and UDP header together in first mbuf.
365 */
366 ip = mtod(m, struct ip *);
367 if (m->m_len < iphlen + sizeof(struct udphdr)) {
368 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) {
369 UDPSTAT_INC(udps_hdrops);
370 return;
371 }
372 ip = mtod(m, struct ip *);
373 }
374 uh = (struct udphdr *)((caddr_t)ip + iphlen);
375
376 /*
377 * Destination port of 0 is illegal, based on RFC768.
378 */
379 if (uh->uh_dport == 0)
380 goto badunlocked;
381
382 /*
383 * Construct sockaddr format source address. Stuff source address
384 * and datagram in user buffer.
385 */
386 bzero(&udp_in, sizeof(udp_in));
387 udp_in.sin_len = sizeof(udp_in);
388 udp_in.sin_family = AF_INET;
389 udp_in.sin_port = uh->uh_sport;
390 udp_in.sin_addr = ip->ip_src;
391
392 /*
393 * Make mbuf data length reflect UDP length. If not enough data to
394 * reflect UDP length, drop.
395 */
396 len = ntohs((u_short)uh->uh_ulen);
397 ip_len = ntohs(ip->ip_len) - iphlen;
398 if (ip_len != len) {
399 if (len > ip_len || len < sizeof(struct udphdr)) {
400 UDPSTAT_INC(udps_badlen);
401 goto badunlocked;
402 }
403 m_adj(m, len - ip_len);
404 }
405
406 /*
407 * Save a copy of the IP header in case we want restore it for
408 * sending an ICMP error message in response.
409 */
410 if (!V_udp_blackhole)
411 save_ip = *ip;
412 else
413 memset(&save_ip, 0, sizeof(save_ip));
414
415 /*
416 * Checksum extended UDP header and data.
417 */
418 if (uh->uh_sum) {
419 u_short uh_sum;
420
421 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
422 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
423 uh_sum = m->m_pkthdr.csum_data;
424 else
425 uh_sum = in_pseudo(ip->ip_src.s_addr,
426 ip->ip_dst.s_addr, htonl((u_short)len +
427 m->m_pkthdr.csum_data + IPPROTO_UDP));
428 uh_sum ^= 0xffff;
429 } else {
430 char b[9];
431
432 bcopy(((struct ipovly *)ip)->ih_x1, b, 9);
433 bzero(((struct ipovly *)ip)->ih_x1, 9);
434 ((struct ipovly *)ip)->ih_len = uh->uh_ulen;
435 uh_sum = in_cksum(m, len + sizeof (struct ip));
436 bcopy(b, ((struct ipovly *)ip)->ih_x1, 9);
437 }
438 if (uh_sum) {
439 UDPSTAT_INC(udps_badsum);
440 m_freem(m);
441 return;
442 }
443 } else
444 UDPSTAT_INC(udps_nosum);
445
446 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
447 in_broadcast(ip->ip_dst, ifp)) {
448 struct inpcb *last;
449 struct ip_moptions *imo;
450
451 INP_INFO_RLOCK(&V_udbinfo);
452 last = NULL;
453 LIST_FOREACH(inp, &V_udb, inp_list) {
454 if (inp->inp_lport != uh->uh_dport)
455 continue;
456#ifdef INET6
457 if ((inp->inp_vflag & INP_IPV4) == 0)
458 continue;
459#endif
460 if (inp->inp_laddr.s_addr != INADDR_ANY &&
461 inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
462 continue;
463 if (inp->inp_faddr.s_addr != INADDR_ANY &&
464 inp->inp_faddr.s_addr != ip->ip_src.s_addr)
465 continue;
466 if (inp->inp_fport != 0 &&
467 inp->inp_fport != uh->uh_sport)
468 continue;
469
470 INP_RLOCK(inp);
471
472 /*
473 * XXXRW: Because we weren't holding either the inpcb
474 * or the hash lock when we checked for a match
475 * before, we should probably recheck now that the
476 * inpcb lock is held.
477 */
478
479 /*
480 * Handle socket delivery policy for any-source
481 * and source-specific multicast. [RFC3678]
482 */
483 imo = inp->inp_moptions;
484 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
485 struct sockaddr_in group;
486 int blocked;
487 if (imo == NULL) {
488 INP_RUNLOCK(inp);
489 continue;
490 }
491 bzero(&group, sizeof(struct sockaddr_in));
492 group.sin_len = sizeof(struct sockaddr_in);
493 group.sin_family = AF_INET;
494 group.sin_addr = ip->ip_dst;
495
496 blocked = imo_multi_filter(imo, ifp,
497 (struct sockaddr *)&group,
498 (struct sockaddr *)&udp_in);
499 if (blocked != MCAST_PASS) {
500 if (blocked == MCAST_NOTGMEMBER)
501 IPSTAT_INC(ips_notmember);
502 if (blocked == MCAST_NOTSMEMBER ||
503 blocked == MCAST_MUTED)
504 UDPSTAT_INC(udps_filtermcast);
505 INP_RUNLOCK(inp);
506 continue;
507 }
508 }
509 if (last != NULL) {
510 struct mbuf *n;
511
512 n = m_copy(m, 0, M_COPYALL);
513 udp_append(last, ip, n, iphlen, &udp_in);
514 INP_RUNLOCK(last);
515 }
516 last = inp;
517 /*
518 * Don't look for additional matches if this one does
519 * not have either the SO_REUSEPORT or SO_REUSEADDR
520 * socket options set. This heuristic avoids
521 * searching through all pcbs in the common case of a
522 * non-shared port. It assumes that an application
523 * will never clear these options after setting them.
524 */
525 if ((last->inp_socket->so_options &
526 (SO_REUSEPORT|SO_REUSEADDR)) == 0)
527 break;
528 }
529
530 if (last == NULL) {
531 /*
532 * No matching pcb found; discard datagram. (No need
533 * to send an ICMP Port Unreachable for a broadcast
534 * or multicast datgram.)
535 */
536 UDPSTAT_INC(udps_noportbcast);
537 if (inp)
538 INP_RUNLOCK(inp);
539 INP_INFO_RUNLOCK(&V_udbinfo);
540 goto badunlocked;
541 }
542 udp_append(last, ip, m, iphlen, &udp_in);
543 INP_RUNLOCK(last);
544 INP_INFO_RUNLOCK(&V_udbinfo);
545 return;
546 }
547
548 /*
549 * Locate pcb for datagram.
550 */
551
552 /*
553 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
554 */
555 if ((m->m_flags & M_IP_NEXTHOP) &&
556 (fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL)) != NULL) {
557 struct sockaddr_in *next_hop;
558
559 next_hop = (struct sockaddr_in *)(fwd_tag + 1);
560
561 /*
562 * Transparently forwarded. Pretend to be the destination.
563 * Already got one like this?
564 */
565 inp = in_pcblookup_mbuf(&V_udbinfo, ip->ip_src, uh->uh_sport,
566 ip->ip_dst, uh->uh_dport, INPLOOKUP_RLOCKPCB, ifp, m);
567 if (!inp) {
568 /*
569 * It's new. Try to find the ambushing socket.
570 * Because we've rewritten the destination address,
571 * any hardware-generated hash is ignored.
572 */
573 inp = in_pcblookup(&V_udbinfo, ip->ip_src,
574 uh->uh_sport, next_hop->sin_addr,
575 next_hop->sin_port ? htons(next_hop->sin_port) :
576 uh->uh_dport, INPLOOKUP_WILDCARD |
577 INPLOOKUP_RLOCKPCB, ifp);
578 }
579 /* Remove the tag from the packet. We don't need it anymore. */
580 m_tag_delete(m, fwd_tag);
581 m->m_flags &= ~M_IP_NEXTHOP;
582 } else
583 inp = in_pcblookup_mbuf(&V_udbinfo, ip->ip_src, uh->uh_sport,
584 ip->ip_dst, uh->uh_dport, INPLOOKUP_WILDCARD |
585 INPLOOKUP_RLOCKPCB, ifp, m);
586 if (inp == NULL) {
587 if (udp_log_in_vain) {
588 char buf[4*sizeof "123"];
589
590 strcpy(buf, inet_ntoa(ip->ip_dst));
591 log(LOG_INFO,
592 "Connection attempt to UDP %s:%d from %s:%d\n",
593 buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src),
594 ntohs(uh->uh_sport));
595 }
596 UDPSTAT_INC(udps_noport);
597 if (m->m_flags & (M_BCAST | M_MCAST)) {
598 UDPSTAT_INC(udps_noportbcast);
599 goto badunlocked;
600 }
601 if (V_udp_blackhole)
602 goto badunlocked;
603 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0)
604 goto badunlocked;
605 *ip = save_ip;
606 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
607 return;
608 }
609
610 /*
611 * Check the minimum TTL for socket.
612 */
613 INP_RLOCK_ASSERT(inp);
614 if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) {
615 INP_RUNLOCK(inp);
616 m_freem(m);
617 return;
618 }
74#include <netinet/in_pcb.h>
75#include <netinet/in_systm.h>
76#include <netinet/in_var.h>
77#include <netinet/ip.h>
78#ifdef INET6
79#include <netinet/ip6.h>
80#endif
81#include <netinet/ip_icmp.h>
82#include <netinet/icmp_var.h>
83#include <netinet/ip_var.h>
84#include <netinet/ip_options.h>
85#ifdef INET6
86#include <netinet6/ip6_var.h>
87#endif
88#include <netinet/udp.h>
89#include <netinet/udp_var.h>
90
91#ifdef IPSEC
92#include <netipsec/ipsec.h>
93#include <netipsec/esp.h>
94#endif
95
96#include <machine/in_cksum.h>
97
98#include <security/mac/mac_framework.h>
99
100/*
101 * UDP protocol implementation.
102 * Per RFC 768, August, 1980.
103 */
104
105/*
106 * BSD 4.2 defaulted the udp checksum to be off. Turning off udp checksums
107 * removes the only data integrity mechanism for packets and malformed
108 * packets that would otherwise be discarded due to bad checksums, and may
109 * cause problems (especially for NFS data blocks).
110 */
111VNET_DEFINE(int, udp_cksum) = 1;
112SYSCTL_VNET_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW,
113 &VNET_NAME(udp_cksum), 0, "compute udp checksum");
114
115int udp_log_in_vain = 0;
116SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
117 &udp_log_in_vain, 0, "Log all incoming UDP packets");
118
119VNET_DEFINE(int, udp_blackhole) = 0;
120SYSCTL_VNET_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW,
121 &VNET_NAME(udp_blackhole), 0,
122 "Do not send port unreachables for refused connects");
123
124u_long udp_sendspace = 9216; /* really max datagram size */
125 /* 40 1K datagrams */
126SYSCTL_ULONG(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW,
127 &udp_sendspace, 0, "Maximum outgoing UDP datagram size");
128
129u_long udp_recvspace = 40 * (1024 +
130#ifdef INET6
131 sizeof(struct sockaddr_in6)
132#else
133 sizeof(struct sockaddr_in)
134#endif
135 );
136
137SYSCTL_ULONG(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
138 &udp_recvspace, 0, "Maximum space for incoming UDP datagrams");
139
140VNET_DEFINE(struct inpcbhead, udb); /* from udp_var.h */
141VNET_DEFINE(struct inpcbinfo, udbinfo);
142static VNET_DEFINE(uma_zone_t, udpcb_zone);
143#define V_udpcb_zone VNET(udpcb_zone)
144
145#ifndef UDBHASHSIZE
146#define UDBHASHSIZE 128
147#endif
148
149VNET_PCPUSTAT_DEFINE(struct udpstat, udpstat); /* from udp_var.h */
150VNET_PCPUSTAT_SYSINIT(udpstat);
151SYSCTL_VNET_PCPUSTAT(_net_inet_udp, UDPCTL_STATS, stats, struct udpstat,
152 udpstat, "UDP statistics (struct udpstat, netinet/udp_var.h)");
153
154#ifdef VIMAGE
155VNET_PCPUSTAT_SYSUNINIT(udpstat);
156#endif /* VIMAGE */
157#ifdef INET
158static void udp_detach(struct socket *so);
159static int udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
160 struct mbuf *, struct thread *);
161#endif
162
163#ifdef IPSEC
164#ifdef IPSEC_NAT_T
165#define UF_ESPINUDP_ALL (UF_ESPINUDP_NON_IKE|UF_ESPINUDP)
166#ifdef INET
167static struct mbuf *udp4_espdecap(struct inpcb *, struct mbuf *, int);
168#endif
169#endif /* IPSEC_NAT_T */
170#endif /* IPSEC */
171
172static void
173udp_zone_change(void *tag)
174{
175
176 uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets);
177 uma_zone_set_max(V_udpcb_zone, maxsockets);
178}
179
180static int
181udp_inpcb_init(void *mem, int size, int flags)
182{
183 struct inpcb *inp;
184
185 inp = mem;
186 INP_LOCK_INIT(inp, "inp", "udpinp");
187 return (0);
188}
189
190void
191udp_init(void)
192{
193
194 in_pcbinfo_init(&V_udbinfo, "udp", &V_udb, UDBHASHSIZE, UDBHASHSIZE,
195 "udp_inpcb", udp_inpcb_init, NULL, UMA_ZONE_NOFREE,
196 IPI_HASHFIELDS_2TUPLE);
197 V_udpcb_zone = uma_zcreate("udpcb", sizeof(struct udpcb),
198 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
199 uma_zone_set_max(V_udpcb_zone, maxsockets);
200 uma_zone_set_warning(V_udpcb_zone, "kern.ipc.maxsockets limit reached");
201 EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL,
202 EVENTHANDLER_PRI_ANY);
203}
204
205/*
206 * Kernel module interface for updating udpstat. The argument is an index
207 * into udpstat treated as an array of u_long. While this encodes the
208 * general layout of udpstat into the caller, it doesn't encode its location,
209 * so that future changes to add, for example, per-CPU stats support won't
210 * cause binary compatibility problems for kernel modules.
211 */
212void
213kmod_udpstat_inc(int statnum)
214{
215
216 counter_u64_add(VNET(udpstat)[statnum], 1);
217}
218
219int
220udp_newudpcb(struct inpcb *inp)
221{
222 struct udpcb *up;
223
224 up = uma_zalloc(V_udpcb_zone, M_NOWAIT | M_ZERO);
225 if (up == NULL)
226 return (ENOBUFS);
227 inp->inp_ppcb = up;
228 return (0);
229}
230
231void
232udp_discardcb(struct udpcb *up)
233{
234
235 uma_zfree(V_udpcb_zone, up);
236}
237
238#ifdef VIMAGE
239void
240udp_destroy(void)
241{
242
243 in_pcbinfo_destroy(&V_udbinfo);
244 uma_zdestroy(V_udpcb_zone);
245}
246#endif
247
248#ifdef INET
249/*
250 * Subroutine of udp_input(), which appends the provided mbuf chain to the
251 * passed pcb/socket. The caller must provide a sockaddr_in via udp_in that
252 * contains the source address. If the socket ends up being an IPv6 socket,
253 * udp_append() will convert to a sockaddr_in6 before passing the address
254 * into the socket code.
255 */
256static void
257udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
258 struct sockaddr_in *udp_in)
259{
260 struct sockaddr *append_sa;
261 struct socket *so;
262 struct mbuf *opts = 0;
263#ifdef INET6
264 struct sockaddr_in6 udp_in6;
265#endif
266 struct udpcb *up;
267
268 INP_LOCK_ASSERT(inp);
269
270 /*
271 * Engage the tunneling protocol.
272 */
273 up = intoudpcb(inp);
274 if (up->u_tun_func != NULL) {
275 (*up->u_tun_func)(n, off, inp);
276 return;
277 }
278
279 if (n == NULL)
280 return;
281
282 off += sizeof(struct udphdr);
283
284#ifdef IPSEC
285 /* Check AH/ESP integrity. */
286 if (ipsec4_in_reject(n, inp)) {
287 m_freem(n);
288 IPSECSTAT_INC(ips_in_polvio);
289 return;
290 }
291#ifdef IPSEC_NAT_T
292 up = intoudpcb(inp);
293 KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
294 if (up->u_flags & UF_ESPINUDP_ALL) { /* IPSec UDP encaps. */
295 n = udp4_espdecap(inp, n, off);
296 if (n == NULL) /* Consumed. */
297 return;
298 }
299#endif /* IPSEC_NAT_T */
300#endif /* IPSEC */
301#ifdef MAC
302 if (mac_inpcb_check_deliver(inp, n) != 0) {
303 m_freem(n);
304 return;
305 }
306#endif /* MAC */
307 if (inp->inp_flags & INP_CONTROLOPTS ||
308 inp->inp_socket->so_options & (SO_TIMESTAMP | SO_BINTIME)) {
309#ifdef INET6
310 if (inp->inp_vflag & INP_IPV6)
311 (void)ip6_savecontrol_v4(inp, n, &opts, NULL);
312 else
313#endif /* INET6 */
314 ip_savecontrol(inp, &opts, ip, n);
315 }
316#ifdef INET6
317 if (inp->inp_vflag & INP_IPV6) {
318 bzero(&udp_in6, sizeof(udp_in6));
319 udp_in6.sin6_len = sizeof(udp_in6);
320 udp_in6.sin6_family = AF_INET6;
321 in6_sin_2_v4mapsin6(udp_in, &udp_in6);
322 append_sa = (struct sockaddr *)&udp_in6;
323 } else
324#endif /* INET6 */
325 append_sa = (struct sockaddr *)udp_in;
326 m_adj(n, off);
327
328 so = inp->inp_socket;
329 SOCKBUF_LOCK(&so->so_rcv);
330 if (sbappendaddr_locked(&so->so_rcv, append_sa, n, opts) == 0) {
331 SOCKBUF_UNLOCK(&so->so_rcv);
332 m_freem(n);
333 if (opts)
334 m_freem(opts);
335 UDPSTAT_INC(udps_fullsock);
336 } else
337 sorwakeup_locked(so);
338}
339
340void
341udp_input(struct mbuf *m, int off)
342{
343 int iphlen = off;
344 struct ip *ip;
345 struct udphdr *uh;
346 struct ifnet *ifp;
347 struct inpcb *inp;
348 uint16_t len, ip_len;
349 struct ip save_ip;
350 struct sockaddr_in udp_in;
351 struct m_tag *fwd_tag;
352
353 ifp = m->m_pkthdr.rcvif;
354 UDPSTAT_INC(udps_ipackets);
355
356 /*
357 * Strip IP options, if any; should skip this, make available to
358 * user, and use on returned packets, but we don't yet have a way to
359 * check the checksum with options still present.
360 */
361 if (iphlen > sizeof (struct ip)) {
362 ip_stripoptions(m);
363 iphlen = sizeof(struct ip);
364 }
365
366 /*
367 * Get IP and UDP header together in first mbuf.
368 */
369 ip = mtod(m, struct ip *);
370 if (m->m_len < iphlen + sizeof(struct udphdr)) {
371 if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) {
372 UDPSTAT_INC(udps_hdrops);
373 return;
374 }
375 ip = mtod(m, struct ip *);
376 }
377 uh = (struct udphdr *)((caddr_t)ip + iphlen);
378
379 /*
380 * Destination port of 0 is illegal, based on RFC768.
381 */
382 if (uh->uh_dport == 0)
383 goto badunlocked;
384
385 /*
386 * Construct sockaddr format source address. Stuff source address
387 * and datagram in user buffer.
388 */
389 bzero(&udp_in, sizeof(udp_in));
390 udp_in.sin_len = sizeof(udp_in);
391 udp_in.sin_family = AF_INET;
392 udp_in.sin_port = uh->uh_sport;
393 udp_in.sin_addr = ip->ip_src;
394
395 /*
396 * Make mbuf data length reflect UDP length. If not enough data to
397 * reflect UDP length, drop.
398 */
399 len = ntohs((u_short)uh->uh_ulen);
400 ip_len = ntohs(ip->ip_len) - iphlen;
401 if (ip_len != len) {
402 if (len > ip_len || len < sizeof(struct udphdr)) {
403 UDPSTAT_INC(udps_badlen);
404 goto badunlocked;
405 }
406 m_adj(m, len - ip_len);
407 }
408
409 /*
410 * Save a copy of the IP header in case we want restore it for
411 * sending an ICMP error message in response.
412 */
413 if (!V_udp_blackhole)
414 save_ip = *ip;
415 else
416 memset(&save_ip, 0, sizeof(save_ip));
417
418 /*
419 * Checksum extended UDP header and data.
420 */
421 if (uh->uh_sum) {
422 u_short uh_sum;
423
424 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
425 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
426 uh_sum = m->m_pkthdr.csum_data;
427 else
428 uh_sum = in_pseudo(ip->ip_src.s_addr,
429 ip->ip_dst.s_addr, htonl((u_short)len +
430 m->m_pkthdr.csum_data + IPPROTO_UDP));
431 uh_sum ^= 0xffff;
432 } else {
433 char b[9];
434
435 bcopy(((struct ipovly *)ip)->ih_x1, b, 9);
436 bzero(((struct ipovly *)ip)->ih_x1, 9);
437 ((struct ipovly *)ip)->ih_len = uh->uh_ulen;
438 uh_sum = in_cksum(m, len + sizeof (struct ip));
439 bcopy(b, ((struct ipovly *)ip)->ih_x1, 9);
440 }
441 if (uh_sum) {
442 UDPSTAT_INC(udps_badsum);
443 m_freem(m);
444 return;
445 }
446 } else
447 UDPSTAT_INC(udps_nosum);
448
449 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
450 in_broadcast(ip->ip_dst, ifp)) {
451 struct inpcb *last;
452 struct ip_moptions *imo;
453
454 INP_INFO_RLOCK(&V_udbinfo);
455 last = NULL;
456 LIST_FOREACH(inp, &V_udb, inp_list) {
457 if (inp->inp_lport != uh->uh_dport)
458 continue;
459#ifdef INET6
460 if ((inp->inp_vflag & INP_IPV4) == 0)
461 continue;
462#endif
463 if (inp->inp_laddr.s_addr != INADDR_ANY &&
464 inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
465 continue;
466 if (inp->inp_faddr.s_addr != INADDR_ANY &&
467 inp->inp_faddr.s_addr != ip->ip_src.s_addr)
468 continue;
469 if (inp->inp_fport != 0 &&
470 inp->inp_fport != uh->uh_sport)
471 continue;
472
473 INP_RLOCK(inp);
474
475 /*
476 * XXXRW: Because we weren't holding either the inpcb
477 * or the hash lock when we checked for a match
478 * before, we should probably recheck now that the
479 * inpcb lock is held.
480 */
481
482 /*
483 * Handle socket delivery policy for any-source
484 * and source-specific multicast. [RFC3678]
485 */
486 imo = inp->inp_moptions;
487 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
488 struct sockaddr_in group;
489 int blocked;
490 if (imo == NULL) {
491 INP_RUNLOCK(inp);
492 continue;
493 }
494 bzero(&group, sizeof(struct sockaddr_in));
495 group.sin_len = sizeof(struct sockaddr_in);
496 group.sin_family = AF_INET;
497 group.sin_addr = ip->ip_dst;
498
499 blocked = imo_multi_filter(imo, ifp,
500 (struct sockaddr *)&group,
501 (struct sockaddr *)&udp_in);
502 if (blocked != MCAST_PASS) {
503 if (blocked == MCAST_NOTGMEMBER)
504 IPSTAT_INC(ips_notmember);
505 if (blocked == MCAST_NOTSMEMBER ||
506 blocked == MCAST_MUTED)
507 UDPSTAT_INC(udps_filtermcast);
508 INP_RUNLOCK(inp);
509 continue;
510 }
511 }
512 if (last != NULL) {
513 struct mbuf *n;
514
515 n = m_copy(m, 0, M_COPYALL);
516 udp_append(last, ip, n, iphlen, &udp_in);
517 INP_RUNLOCK(last);
518 }
519 last = inp;
520 /*
521 * Don't look for additional matches if this one does
522 * not have either the SO_REUSEPORT or SO_REUSEADDR
523 * socket options set. This heuristic avoids
524 * searching through all pcbs in the common case of a
525 * non-shared port. It assumes that an application
526 * will never clear these options after setting them.
527 */
528 if ((last->inp_socket->so_options &
529 (SO_REUSEPORT|SO_REUSEADDR)) == 0)
530 break;
531 }
532
533 if (last == NULL) {
534 /*
535 * No matching pcb found; discard datagram. (No need
536 * to send an ICMP Port Unreachable for a broadcast
537 * or multicast datgram.)
538 */
539 UDPSTAT_INC(udps_noportbcast);
540 if (inp)
541 INP_RUNLOCK(inp);
542 INP_INFO_RUNLOCK(&V_udbinfo);
543 goto badunlocked;
544 }
545 udp_append(last, ip, m, iphlen, &udp_in);
546 INP_RUNLOCK(last);
547 INP_INFO_RUNLOCK(&V_udbinfo);
548 return;
549 }
550
551 /*
552 * Locate pcb for datagram.
553 */
554
555 /*
556 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
557 */
558 if ((m->m_flags & M_IP_NEXTHOP) &&
559 (fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL)) != NULL) {
560 struct sockaddr_in *next_hop;
561
562 next_hop = (struct sockaddr_in *)(fwd_tag + 1);
563
564 /*
565 * Transparently forwarded. Pretend to be the destination.
566 * Already got one like this?
567 */
568 inp = in_pcblookup_mbuf(&V_udbinfo, ip->ip_src, uh->uh_sport,
569 ip->ip_dst, uh->uh_dport, INPLOOKUP_RLOCKPCB, ifp, m);
570 if (!inp) {
571 /*
572 * It's new. Try to find the ambushing socket.
573 * Because we've rewritten the destination address,
574 * any hardware-generated hash is ignored.
575 */
576 inp = in_pcblookup(&V_udbinfo, ip->ip_src,
577 uh->uh_sport, next_hop->sin_addr,
578 next_hop->sin_port ? htons(next_hop->sin_port) :
579 uh->uh_dport, INPLOOKUP_WILDCARD |
580 INPLOOKUP_RLOCKPCB, ifp);
581 }
582 /* Remove the tag from the packet. We don't need it anymore. */
583 m_tag_delete(m, fwd_tag);
584 m->m_flags &= ~M_IP_NEXTHOP;
585 } else
586 inp = in_pcblookup_mbuf(&V_udbinfo, ip->ip_src, uh->uh_sport,
587 ip->ip_dst, uh->uh_dport, INPLOOKUP_WILDCARD |
588 INPLOOKUP_RLOCKPCB, ifp, m);
589 if (inp == NULL) {
590 if (udp_log_in_vain) {
591 char buf[4*sizeof "123"];
592
593 strcpy(buf, inet_ntoa(ip->ip_dst));
594 log(LOG_INFO,
595 "Connection attempt to UDP %s:%d from %s:%d\n",
596 buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src),
597 ntohs(uh->uh_sport));
598 }
599 UDPSTAT_INC(udps_noport);
600 if (m->m_flags & (M_BCAST | M_MCAST)) {
601 UDPSTAT_INC(udps_noportbcast);
602 goto badunlocked;
603 }
604 if (V_udp_blackhole)
605 goto badunlocked;
606 if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0)
607 goto badunlocked;
608 *ip = save_ip;
609 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
610 return;
611 }
612
613 /*
614 * Check the minimum TTL for socket.
615 */
616 INP_RLOCK_ASSERT(inp);
617 if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) {
618 INP_RUNLOCK(inp);
619 m_freem(m);
620 return;
621 }
622
623 UDP_PROBE(receive, NULL, inp, ip, ip, uh);
619 udp_append(inp, ip, m, iphlen, &udp_in);
620 INP_RUNLOCK(inp);
621 return;
622
623badunlocked:
624 m_freem(m);
625}
626#endif /* INET */
627
628/*
629 * Notify a udp user of an asynchronous error; just wake up so that they can
630 * collect error status.
631 */
632struct inpcb *
633udp_notify(struct inpcb *inp, int errno)
634{
635
636 /*
637 * While udp_ctlinput() always calls udp_notify() with a read lock
638 * when invoking it directly, in_pcbnotifyall() currently uses write
639 * locks due to sharing code with TCP. For now, accept either a read
640 * or a write lock, but a read lock is sufficient.
641 */
642 INP_LOCK_ASSERT(inp);
643
644 inp->inp_socket->so_error = errno;
645 sorwakeup(inp->inp_socket);
646 sowwakeup(inp->inp_socket);
647 return (inp);
648}
649
650#ifdef INET
651void
652udp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
653{
654 struct ip *ip = vip;
655 struct udphdr *uh;
656 struct in_addr faddr;
657 struct inpcb *inp;
658
659 faddr = ((struct sockaddr_in *)sa)->sin_addr;
660 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
661 return;
662
663 /*
664 * Redirects don't need to be handled up here.
665 */
666 if (PRC_IS_REDIRECT(cmd))
667 return;
668
669 /*
670 * Hostdead is ugly because it goes linearly through all PCBs.
671 *
672 * XXX: We never get this from ICMP, otherwise it makes an excellent
673 * DoS attack on machines with many connections.
674 */
675 if (cmd == PRC_HOSTDEAD)
676 ip = NULL;
677 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
678 return;
679 if (ip != NULL) {
680 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
681 inp = in_pcblookup(&V_udbinfo, faddr, uh->uh_dport,
682 ip->ip_src, uh->uh_sport, INPLOOKUP_RLOCKPCB, NULL);
683 if (inp != NULL) {
684 INP_RLOCK_ASSERT(inp);
685 if (inp->inp_socket != NULL) {
686 udp_notify(inp, inetctlerrmap[cmd]);
687 }
688 INP_RUNLOCK(inp);
689 }
690 } else
691 in_pcbnotifyall(&V_udbinfo, faddr, inetctlerrmap[cmd],
692 udp_notify);
693}
694#endif /* INET */
695
696static int
697udp_pcblist(SYSCTL_HANDLER_ARGS)
698{
699 int error, i, n;
700 struct inpcb *inp, **inp_list;
701 inp_gen_t gencnt;
702 struct xinpgen xig;
703
704 /*
705 * The process of preparing the PCB list is too time-consuming and
706 * resource-intensive to repeat twice on every request.
707 */
708 if (req->oldptr == 0) {
709 n = V_udbinfo.ipi_count;
710 n += imax(n / 8, 10);
711 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
712 return (0);
713 }
714
715 if (req->newptr != 0)
716 return (EPERM);
717
718 /*
719 * OK, now we're committed to doing something.
720 */
721 INP_INFO_RLOCK(&V_udbinfo);
722 gencnt = V_udbinfo.ipi_gencnt;
723 n = V_udbinfo.ipi_count;
724 INP_INFO_RUNLOCK(&V_udbinfo);
725
726 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
727 + n * sizeof(struct xinpcb));
728 if (error != 0)
729 return (error);
730
731 xig.xig_len = sizeof xig;
732 xig.xig_count = n;
733 xig.xig_gen = gencnt;
734 xig.xig_sogen = so_gencnt;
735 error = SYSCTL_OUT(req, &xig, sizeof xig);
736 if (error)
737 return (error);
738
739 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
740 if (inp_list == 0)
741 return (ENOMEM);
742
743 INP_INFO_RLOCK(&V_udbinfo);
744 for (inp = LIST_FIRST(V_udbinfo.ipi_listhead), i = 0; inp && i < n;
745 inp = LIST_NEXT(inp, inp_list)) {
746 INP_WLOCK(inp);
747 if (inp->inp_gencnt <= gencnt &&
748 cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
749 in_pcbref(inp);
750 inp_list[i++] = inp;
751 }
752 INP_WUNLOCK(inp);
753 }
754 INP_INFO_RUNLOCK(&V_udbinfo);
755 n = i;
756
757 error = 0;
758 for (i = 0; i < n; i++) {
759 inp = inp_list[i];
760 INP_RLOCK(inp);
761 if (inp->inp_gencnt <= gencnt) {
762 struct xinpcb xi;
763
764 bzero(&xi, sizeof(xi));
765 xi.xi_len = sizeof xi;
766 /* XXX should avoid extra copy */
767 bcopy(inp, &xi.xi_inp, sizeof *inp);
768 if (inp->inp_socket)
769 sotoxsocket(inp->inp_socket, &xi.xi_socket);
770 xi.xi_inp.inp_gencnt = inp->inp_gencnt;
771 INP_RUNLOCK(inp);
772 error = SYSCTL_OUT(req, &xi, sizeof xi);
773 } else
774 INP_RUNLOCK(inp);
775 }
776 INP_INFO_WLOCK(&V_udbinfo);
777 for (i = 0; i < n; i++) {
778 inp = inp_list[i];
779 INP_RLOCK(inp);
780 if (!in_pcbrele_rlocked(inp))
781 INP_RUNLOCK(inp);
782 }
783 INP_INFO_WUNLOCK(&V_udbinfo);
784
785 if (!error) {
786 /*
787 * Give the user an updated idea of our state. If the
788 * generation differs from what we told her before, she knows
789 * that something happened while we were processing this
790 * request, and it might be necessary to retry.
791 */
792 INP_INFO_RLOCK(&V_udbinfo);
793 xig.xig_gen = V_udbinfo.ipi_gencnt;
794 xig.xig_sogen = so_gencnt;
795 xig.xig_count = V_udbinfo.ipi_count;
796 INP_INFO_RUNLOCK(&V_udbinfo);
797 error = SYSCTL_OUT(req, &xig, sizeof xig);
798 }
799 free(inp_list, M_TEMP);
800 return (error);
801}
802
803SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist,
804 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
805 udp_pcblist, "S,xinpcb", "List of active UDP sockets");
806
807#ifdef INET
808static int
809udp_getcred(SYSCTL_HANDLER_ARGS)
810{
811 struct xucred xuc;
812 struct sockaddr_in addrs[2];
813 struct inpcb *inp;
814 int error;
815
816 error = priv_check(req->td, PRIV_NETINET_GETCRED);
817 if (error)
818 return (error);
819 error = SYSCTL_IN(req, addrs, sizeof(addrs));
820 if (error)
821 return (error);
822 inp = in_pcblookup(&V_udbinfo, addrs[1].sin_addr, addrs[1].sin_port,
823 addrs[0].sin_addr, addrs[0].sin_port,
824 INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, NULL);
825 if (inp != NULL) {
826 INP_RLOCK_ASSERT(inp);
827 if (inp->inp_socket == NULL)
828 error = ENOENT;
829 if (error == 0)
830 error = cr_canseeinpcb(req->td->td_ucred, inp);
831 if (error == 0)
832 cru2x(inp->inp_cred, &xuc);
833 INP_RUNLOCK(inp);
834 } else
835 error = ENOENT;
836 if (error == 0)
837 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
838 return (error);
839}
840
841SYSCTL_PROC(_net_inet_udp, OID_AUTO, getcred,
842 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
843 udp_getcred, "S,xucred", "Get the xucred of a UDP connection");
844#endif /* INET */
845
846int
847udp_ctloutput(struct socket *so, struct sockopt *sopt)
848{
849 int error = 0, optval;
850 struct inpcb *inp;
851#ifdef IPSEC_NAT_T
852 struct udpcb *up;
853#endif
854
855 inp = sotoinpcb(so);
856 KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
857 INP_WLOCK(inp);
858 if (sopt->sopt_level != IPPROTO_UDP) {
859#ifdef INET6
860 if (INP_CHECK_SOCKAF(so, AF_INET6)) {
861 INP_WUNLOCK(inp);
862 error = ip6_ctloutput(so, sopt);
863 }
864#endif
865#if defined(INET) && defined(INET6)
866 else
867#endif
868#ifdef INET
869 {
870 INP_WUNLOCK(inp);
871 error = ip_ctloutput(so, sopt);
872 }
873#endif
874 return (error);
875 }
876
877 switch (sopt->sopt_dir) {
878 case SOPT_SET:
879 switch (sopt->sopt_name) {
880 case UDP_ENCAP:
881 INP_WUNLOCK(inp);
882 error = sooptcopyin(sopt, &optval, sizeof optval,
883 sizeof optval);
884 if (error)
885 break;
886 inp = sotoinpcb(so);
887 KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
888 INP_WLOCK(inp);
889#ifdef IPSEC_NAT_T
890 up = intoudpcb(inp);
891 KASSERT(up != NULL, ("%s: up == NULL", __func__));
892#endif
893 switch (optval) {
894 case 0:
895 /* Clear all UDP encap. */
896#ifdef IPSEC_NAT_T
897 up->u_flags &= ~UF_ESPINUDP_ALL;
898#endif
899 break;
900#ifdef IPSEC_NAT_T
901 case UDP_ENCAP_ESPINUDP:
902 case UDP_ENCAP_ESPINUDP_NON_IKE:
903 up->u_flags &= ~UF_ESPINUDP_ALL;
904 if (optval == UDP_ENCAP_ESPINUDP)
905 up->u_flags |= UF_ESPINUDP;
906 else if (optval == UDP_ENCAP_ESPINUDP_NON_IKE)
907 up->u_flags |= UF_ESPINUDP_NON_IKE;
908 break;
909#endif
910 default:
911 error = EINVAL;
912 break;
913 }
914 INP_WUNLOCK(inp);
915 break;
916 default:
917 INP_WUNLOCK(inp);
918 error = ENOPROTOOPT;
919 break;
920 }
921 break;
922 case SOPT_GET:
923 switch (sopt->sopt_name) {
924#ifdef IPSEC_NAT_T
925 case UDP_ENCAP:
926 up = intoudpcb(inp);
927 KASSERT(up != NULL, ("%s: up == NULL", __func__));
928 optval = up->u_flags & UF_ESPINUDP_ALL;
929 INP_WUNLOCK(inp);
930 error = sooptcopyout(sopt, &optval, sizeof optval);
931 break;
932#endif
933 default:
934 INP_WUNLOCK(inp);
935 error = ENOPROTOOPT;
936 break;
937 }
938 break;
939 }
940 return (error);
941}
942
943#ifdef INET
944#define UH_WLOCKED 2
945#define UH_RLOCKED 1
946#define UH_UNLOCKED 0
947static int
948udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
949 struct mbuf *control, struct thread *td)
950{
951 struct udpiphdr *ui;
952 int len = m->m_pkthdr.len;
953 struct in_addr faddr, laddr;
954 struct cmsghdr *cm;
955 struct sockaddr_in *sin, src;
956 int error = 0;
957 int ipflags;
958 u_short fport, lport;
959 int unlock_udbinfo;
960 u_char tos;
961
962 /*
963 * udp_output() may need to temporarily bind or connect the current
964 * inpcb. As such, we don't know up front whether we will need the
965 * pcbinfo lock or not. Do any work to decide what is needed up
966 * front before acquiring any locks.
967 */
968 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
969 if (control)
970 m_freem(control);
971 m_freem(m);
972 return (EMSGSIZE);
973 }
974
975 src.sin_family = 0;
976 INP_RLOCK(inp);
977 tos = inp->inp_ip_tos;
978 if (control != NULL) {
979 /*
980 * XXX: Currently, we assume all the optional information is
981 * stored in a single mbuf.
982 */
983 if (control->m_next) {
984 INP_RUNLOCK(inp);
985 m_freem(control);
986 m_freem(m);
987 return (EINVAL);
988 }
989 for (; control->m_len > 0;
990 control->m_data += CMSG_ALIGN(cm->cmsg_len),
991 control->m_len -= CMSG_ALIGN(cm->cmsg_len)) {
992 cm = mtod(control, struct cmsghdr *);
993 if (control->m_len < sizeof(*cm) || cm->cmsg_len == 0
994 || cm->cmsg_len > control->m_len) {
995 error = EINVAL;
996 break;
997 }
998 if (cm->cmsg_level != IPPROTO_IP)
999 continue;
1000
1001 switch (cm->cmsg_type) {
1002 case IP_SENDSRCADDR:
1003 if (cm->cmsg_len !=
1004 CMSG_LEN(sizeof(struct in_addr))) {
1005 error = EINVAL;
1006 break;
1007 }
1008 bzero(&src, sizeof(src));
1009 src.sin_family = AF_INET;
1010 src.sin_len = sizeof(src);
1011 src.sin_port = inp->inp_lport;
1012 src.sin_addr =
1013 *(struct in_addr *)CMSG_DATA(cm);
1014 break;
1015
1016 case IP_TOS:
1017 if (cm->cmsg_len != CMSG_LEN(sizeof(u_char))) {
1018 error = EINVAL;
1019 break;
1020 }
1021 tos = *(u_char *)CMSG_DATA(cm);
1022 break;
1023
1024 default:
1025 error = ENOPROTOOPT;
1026 break;
1027 }
1028 if (error)
1029 break;
1030 }
1031 m_freem(control);
1032 }
1033 if (error) {
1034 INP_RUNLOCK(inp);
1035 m_freem(m);
1036 return (error);
1037 }
1038
1039 /*
1040 * Depending on whether or not the application has bound or connected
1041 * the socket, we may have to do varying levels of work. The optimal
1042 * case is for a connected UDP socket, as a global lock isn't
1043 * required at all.
1044 *
1045 * In order to decide which we need, we require stability of the
1046 * inpcb binding, which we ensure by acquiring a read lock on the
1047 * inpcb. This doesn't strictly follow the lock order, so we play
1048 * the trylock and retry game; note that we may end up with more
1049 * conservative locks than required the second time around, so later
1050 * assertions have to accept that. Further analysis of the number of
1051 * misses under contention is required.
1052 *
1053 * XXXRW: Check that hash locking update here is correct.
1054 */
1055 sin = (struct sockaddr_in *)addr;
1056 if (sin != NULL &&
1057 (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) {
1058 INP_RUNLOCK(inp);
1059 INP_WLOCK(inp);
1060 INP_HASH_WLOCK(&V_udbinfo);
1061 unlock_udbinfo = UH_WLOCKED;
1062 } else if ((sin != NULL && (
1063 (sin->sin_addr.s_addr == INADDR_ANY) ||
1064 (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
1065 (inp->inp_laddr.s_addr == INADDR_ANY) ||
1066 (inp->inp_lport == 0))) ||
1067 (src.sin_family == AF_INET)) {
1068 INP_HASH_RLOCK(&V_udbinfo);
1069 unlock_udbinfo = UH_RLOCKED;
1070 } else
1071 unlock_udbinfo = UH_UNLOCKED;
1072
1073 /*
1074 * If the IP_SENDSRCADDR control message was specified, override the
1075 * source address for this datagram. Its use is invalidated if the
1076 * address thus specified is incomplete or clobbers other inpcbs.
1077 */
1078 laddr = inp->inp_laddr;
1079 lport = inp->inp_lport;
1080 if (src.sin_family == AF_INET) {
1081 INP_HASH_LOCK_ASSERT(&V_udbinfo);
1082 if ((lport == 0) ||
1083 (laddr.s_addr == INADDR_ANY &&
1084 src.sin_addr.s_addr == INADDR_ANY)) {
1085 error = EINVAL;
1086 goto release;
1087 }
1088 error = in_pcbbind_setup(inp, (struct sockaddr *)&src,
1089 &laddr.s_addr, &lport, td->td_ucred);
1090 if (error)
1091 goto release;
1092 }
1093
1094 /*
1095 * If a UDP socket has been connected, then a local address/port will
1096 * have been selected and bound.
1097 *
1098 * If a UDP socket has not been connected to, then an explicit
1099 * destination address must be used, in which case a local
1100 * address/port may not have been selected and bound.
1101 */
1102 if (sin != NULL) {
1103 INP_LOCK_ASSERT(inp);
1104 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1105 error = EISCONN;
1106 goto release;
1107 }
1108
1109 /*
1110 * Jail may rewrite the destination address, so let it do
1111 * that before we use it.
1112 */
1113 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1114 if (error)
1115 goto release;
1116
1117 /*
1118 * If a local address or port hasn't yet been selected, or if
1119 * the destination address needs to be rewritten due to using
1120 * a special INADDR_ constant, invoke in_pcbconnect_setup()
1121 * to do the heavy lifting. Once a port is selected, we
1122 * commit the binding back to the socket; we also commit the
1123 * binding of the address if in jail.
1124 *
1125 * If we already have a valid binding and we're not
1126 * requesting a destination address rewrite, use a fast path.
1127 */
1128 if (inp->inp_laddr.s_addr == INADDR_ANY ||
1129 inp->inp_lport == 0 ||
1130 sin->sin_addr.s_addr == INADDR_ANY ||
1131 sin->sin_addr.s_addr == INADDR_BROADCAST) {
1132 INP_HASH_LOCK_ASSERT(&V_udbinfo);
1133 error = in_pcbconnect_setup(inp, addr, &laddr.s_addr,
1134 &lport, &faddr.s_addr, &fport, NULL,
1135 td->td_ucred);
1136 if (error)
1137 goto release;
1138
1139 /*
1140 * XXXRW: Why not commit the port if the address is
1141 * !INADDR_ANY?
1142 */
1143 /* Commit the local port if newly assigned. */
1144 if (inp->inp_laddr.s_addr == INADDR_ANY &&
1145 inp->inp_lport == 0) {
1146 INP_WLOCK_ASSERT(inp);
1147 INP_HASH_WLOCK_ASSERT(&V_udbinfo);
1148 /*
1149 * Remember addr if jailed, to prevent
1150 * rebinding.
1151 */
1152 if (prison_flag(td->td_ucred, PR_IP4))
1153 inp->inp_laddr = laddr;
1154 inp->inp_lport = lport;
1155 if (in_pcbinshash(inp) != 0) {
1156 inp->inp_lport = 0;
1157 error = EAGAIN;
1158 goto release;
1159 }
1160 inp->inp_flags |= INP_ANONPORT;
1161 }
1162 } else {
1163 faddr = sin->sin_addr;
1164 fport = sin->sin_port;
1165 }
1166 } else {
1167 INP_LOCK_ASSERT(inp);
1168 faddr = inp->inp_faddr;
1169 fport = inp->inp_fport;
1170 if (faddr.s_addr == INADDR_ANY) {
1171 error = ENOTCONN;
1172 goto release;
1173 }
1174 }
1175
1176 /*
1177 * Calculate data length and get a mbuf for UDP, IP, and possible
1178 * link-layer headers. Immediate slide the data pointer back forward
1179 * since we won't use that space at this layer.
1180 */
1181 M_PREPEND(m, sizeof(struct udpiphdr) + max_linkhdr, M_NOWAIT);
1182 if (m == NULL) {
1183 error = ENOBUFS;
1184 goto release;
1185 }
1186 m->m_data += max_linkhdr;
1187 m->m_len -= max_linkhdr;
1188 m->m_pkthdr.len -= max_linkhdr;
1189
1190 /*
1191 * Fill in mbuf with extended UDP header and addresses and length put
1192 * into network format.
1193 */
1194 ui = mtod(m, struct udpiphdr *);
1195 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */
624 udp_append(inp, ip, m, iphlen, &udp_in);
625 INP_RUNLOCK(inp);
626 return;
627
628badunlocked:
629 m_freem(m);
630}
631#endif /* INET */
632
633/*
634 * Notify a udp user of an asynchronous error; just wake up so that they can
635 * collect error status.
636 */
637struct inpcb *
638udp_notify(struct inpcb *inp, int errno)
639{
640
641 /*
642 * While udp_ctlinput() always calls udp_notify() with a read lock
643 * when invoking it directly, in_pcbnotifyall() currently uses write
644 * locks due to sharing code with TCP. For now, accept either a read
645 * or a write lock, but a read lock is sufficient.
646 */
647 INP_LOCK_ASSERT(inp);
648
649 inp->inp_socket->so_error = errno;
650 sorwakeup(inp->inp_socket);
651 sowwakeup(inp->inp_socket);
652 return (inp);
653}
654
655#ifdef INET
656void
657udp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
658{
659 struct ip *ip = vip;
660 struct udphdr *uh;
661 struct in_addr faddr;
662 struct inpcb *inp;
663
664 faddr = ((struct sockaddr_in *)sa)->sin_addr;
665 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
666 return;
667
668 /*
669 * Redirects don't need to be handled up here.
670 */
671 if (PRC_IS_REDIRECT(cmd))
672 return;
673
674 /*
675 * Hostdead is ugly because it goes linearly through all PCBs.
676 *
677 * XXX: We never get this from ICMP, otherwise it makes an excellent
678 * DoS attack on machines with many connections.
679 */
680 if (cmd == PRC_HOSTDEAD)
681 ip = NULL;
682 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
683 return;
684 if (ip != NULL) {
685 uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
686 inp = in_pcblookup(&V_udbinfo, faddr, uh->uh_dport,
687 ip->ip_src, uh->uh_sport, INPLOOKUP_RLOCKPCB, NULL);
688 if (inp != NULL) {
689 INP_RLOCK_ASSERT(inp);
690 if (inp->inp_socket != NULL) {
691 udp_notify(inp, inetctlerrmap[cmd]);
692 }
693 INP_RUNLOCK(inp);
694 }
695 } else
696 in_pcbnotifyall(&V_udbinfo, faddr, inetctlerrmap[cmd],
697 udp_notify);
698}
699#endif /* INET */
700
701static int
702udp_pcblist(SYSCTL_HANDLER_ARGS)
703{
704 int error, i, n;
705 struct inpcb *inp, **inp_list;
706 inp_gen_t gencnt;
707 struct xinpgen xig;
708
709 /*
710 * The process of preparing the PCB list is too time-consuming and
711 * resource-intensive to repeat twice on every request.
712 */
713 if (req->oldptr == 0) {
714 n = V_udbinfo.ipi_count;
715 n += imax(n / 8, 10);
716 req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
717 return (0);
718 }
719
720 if (req->newptr != 0)
721 return (EPERM);
722
723 /*
724 * OK, now we're committed to doing something.
725 */
726 INP_INFO_RLOCK(&V_udbinfo);
727 gencnt = V_udbinfo.ipi_gencnt;
728 n = V_udbinfo.ipi_count;
729 INP_INFO_RUNLOCK(&V_udbinfo);
730
731 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
732 + n * sizeof(struct xinpcb));
733 if (error != 0)
734 return (error);
735
736 xig.xig_len = sizeof xig;
737 xig.xig_count = n;
738 xig.xig_gen = gencnt;
739 xig.xig_sogen = so_gencnt;
740 error = SYSCTL_OUT(req, &xig, sizeof xig);
741 if (error)
742 return (error);
743
744 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
745 if (inp_list == 0)
746 return (ENOMEM);
747
748 INP_INFO_RLOCK(&V_udbinfo);
749 for (inp = LIST_FIRST(V_udbinfo.ipi_listhead), i = 0; inp && i < n;
750 inp = LIST_NEXT(inp, inp_list)) {
751 INP_WLOCK(inp);
752 if (inp->inp_gencnt <= gencnt &&
753 cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
754 in_pcbref(inp);
755 inp_list[i++] = inp;
756 }
757 INP_WUNLOCK(inp);
758 }
759 INP_INFO_RUNLOCK(&V_udbinfo);
760 n = i;
761
762 error = 0;
763 for (i = 0; i < n; i++) {
764 inp = inp_list[i];
765 INP_RLOCK(inp);
766 if (inp->inp_gencnt <= gencnt) {
767 struct xinpcb xi;
768
769 bzero(&xi, sizeof(xi));
770 xi.xi_len = sizeof xi;
771 /* XXX should avoid extra copy */
772 bcopy(inp, &xi.xi_inp, sizeof *inp);
773 if (inp->inp_socket)
774 sotoxsocket(inp->inp_socket, &xi.xi_socket);
775 xi.xi_inp.inp_gencnt = inp->inp_gencnt;
776 INP_RUNLOCK(inp);
777 error = SYSCTL_OUT(req, &xi, sizeof xi);
778 } else
779 INP_RUNLOCK(inp);
780 }
781 INP_INFO_WLOCK(&V_udbinfo);
782 for (i = 0; i < n; i++) {
783 inp = inp_list[i];
784 INP_RLOCK(inp);
785 if (!in_pcbrele_rlocked(inp))
786 INP_RUNLOCK(inp);
787 }
788 INP_INFO_WUNLOCK(&V_udbinfo);
789
790 if (!error) {
791 /*
792 * Give the user an updated idea of our state. If the
793 * generation differs from what we told her before, she knows
794 * that something happened while we were processing this
795 * request, and it might be necessary to retry.
796 */
797 INP_INFO_RLOCK(&V_udbinfo);
798 xig.xig_gen = V_udbinfo.ipi_gencnt;
799 xig.xig_sogen = so_gencnt;
800 xig.xig_count = V_udbinfo.ipi_count;
801 INP_INFO_RUNLOCK(&V_udbinfo);
802 error = SYSCTL_OUT(req, &xig, sizeof xig);
803 }
804 free(inp_list, M_TEMP);
805 return (error);
806}
807
808SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist,
809 CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
810 udp_pcblist, "S,xinpcb", "List of active UDP sockets");
811
812#ifdef INET
813static int
814udp_getcred(SYSCTL_HANDLER_ARGS)
815{
816 struct xucred xuc;
817 struct sockaddr_in addrs[2];
818 struct inpcb *inp;
819 int error;
820
821 error = priv_check(req->td, PRIV_NETINET_GETCRED);
822 if (error)
823 return (error);
824 error = SYSCTL_IN(req, addrs, sizeof(addrs));
825 if (error)
826 return (error);
827 inp = in_pcblookup(&V_udbinfo, addrs[1].sin_addr, addrs[1].sin_port,
828 addrs[0].sin_addr, addrs[0].sin_port,
829 INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, NULL);
830 if (inp != NULL) {
831 INP_RLOCK_ASSERT(inp);
832 if (inp->inp_socket == NULL)
833 error = ENOENT;
834 if (error == 0)
835 error = cr_canseeinpcb(req->td->td_ucred, inp);
836 if (error == 0)
837 cru2x(inp->inp_cred, &xuc);
838 INP_RUNLOCK(inp);
839 } else
840 error = ENOENT;
841 if (error == 0)
842 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
843 return (error);
844}
845
846SYSCTL_PROC(_net_inet_udp, OID_AUTO, getcred,
847 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
848 udp_getcred, "S,xucred", "Get the xucred of a UDP connection");
849#endif /* INET */
850
851int
852udp_ctloutput(struct socket *so, struct sockopt *sopt)
853{
854 int error = 0, optval;
855 struct inpcb *inp;
856#ifdef IPSEC_NAT_T
857 struct udpcb *up;
858#endif
859
860 inp = sotoinpcb(so);
861 KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
862 INP_WLOCK(inp);
863 if (sopt->sopt_level != IPPROTO_UDP) {
864#ifdef INET6
865 if (INP_CHECK_SOCKAF(so, AF_INET6)) {
866 INP_WUNLOCK(inp);
867 error = ip6_ctloutput(so, sopt);
868 }
869#endif
870#if defined(INET) && defined(INET6)
871 else
872#endif
873#ifdef INET
874 {
875 INP_WUNLOCK(inp);
876 error = ip_ctloutput(so, sopt);
877 }
878#endif
879 return (error);
880 }
881
882 switch (sopt->sopt_dir) {
883 case SOPT_SET:
884 switch (sopt->sopt_name) {
885 case UDP_ENCAP:
886 INP_WUNLOCK(inp);
887 error = sooptcopyin(sopt, &optval, sizeof optval,
888 sizeof optval);
889 if (error)
890 break;
891 inp = sotoinpcb(so);
892 KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
893 INP_WLOCK(inp);
894#ifdef IPSEC_NAT_T
895 up = intoudpcb(inp);
896 KASSERT(up != NULL, ("%s: up == NULL", __func__));
897#endif
898 switch (optval) {
899 case 0:
900 /* Clear all UDP encap. */
901#ifdef IPSEC_NAT_T
902 up->u_flags &= ~UF_ESPINUDP_ALL;
903#endif
904 break;
905#ifdef IPSEC_NAT_T
906 case UDP_ENCAP_ESPINUDP:
907 case UDP_ENCAP_ESPINUDP_NON_IKE:
908 up->u_flags &= ~UF_ESPINUDP_ALL;
909 if (optval == UDP_ENCAP_ESPINUDP)
910 up->u_flags |= UF_ESPINUDP;
911 else if (optval == UDP_ENCAP_ESPINUDP_NON_IKE)
912 up->u_flags |= UF_ESPINUDP_NON_IKE;
913 break;
914#endif
915 default:
916 error = EINVAL;
917 break;
918 }
919 INP_WUNLOCK(inp);
920 break;
921 default:
922 INP_WUNLOCK(inp);
923 error = ENOPROTOOPT;
924 break;
925 }
926 break;
927 case SOPT_GET:
928 switch (sopt->sopt_name) {
929#ifdef IPSEC_NAT_T
930 case UDP_ENCAP:
931 up = intoudpcb(inp);
932 KASSERT(up != NULL, ("%s: up == NULL", __func__));
933 optval = up->u_flags & UF_ESPINUDP_ALL;
934 INP_WUNLOCK(inp);
935 error = sooptcopyout(sopt, &optval, sizeof optval);
936 break;
937#endif
938 default:
939 INP_WUNLOCK(inp);
940 error = ENOPROTOOPT;
941 break;
942 }
943 break;
944 }
945 return (error);
946}
947
948#ifdef INET
949#define UH_WLOCKED 2
950#define UH_RLOCKED 1
951#define UH_UNLOCKED 0
952static int
953udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
954 struct mbuf *control, struct thread *td)
955{
956 struct udpiphdr *ui;
957 int len = m->m_pkthdr.len;
958 struct in_addr faddr, laddr;
959 struct cmsghdr *cm;
960 struct sockaddr_in *sin, src;
961 int error = 0;
962 int ipflags;
963 u_short fport, lport;
964 int unlock_udbinfo;
965 u_char tos;
966
967 /*
968 * udp_output() may need to temporarily bind or connect the current
969 * inpcb. As such, we don't know up front whether we will need the
970 * pcbinfo lock or not. Do any work to decide what is needed up
971 * front before acquiring any locks.
972 */
973 if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
974 if (control)
975 m_freem(control);
976 m_freem(m);
977 return (EMSGSIZE);
978 }
979
980 src.sin_family = 0;
981 INP_RLOCK(inp);
982 tos = inp->inp_ip_tos;
983 if (control != NULL) {
984 /*
985 * XXX: Currently, we assume all the optional information is
986 * stored in a single mbuf.
987 */
988 if (control->m_next) {
989 INP_RUNLOCK(inp);
990 m_freem(control);
991 m_freem(m);
992 return (EINVAL);
993 }
994 for (; control->m_len > 0;
995 control->m_data += CMSG_ALIGN(cm->cmsg_len),
996 control->m_len -= CMSG_ALIGN(cm->cmsg_len)) {
997 cm = mtod(control, struct cmsghdr *);
998 if (control->m_len < sizeof(*cm) || cm->cmsg_len == 0
999 || cm->cmsg_len > control->m_len) {
1000 error = EINVAL;
1001 break;
1002 }
1003 if (cm->cmsg_level != IPPROTO_IP)
1004 continue;
1005
1006 switch (cm->cmsg_type) {
1007 case IP_SENDSRCADDR:
1008 if (cm->cmsg_len !=
1009 CMSG_LEN(sizeof(struct in_addr))) {
1010 error = EINVAL;
1011 break;
1012 }
1013 bzero(&src, sizeof(src));
1014 src.sin_family = AF_INET;
1015 src.sin_len = sizeof(src);
1016 src.sin_port = inp->inp_lport;
1017 src.sin_addr =
1018 *(struct in_addr *)CMSG_DATA(cm);
1019 break;
1020
1021 case IP_TOS:
1022 if (cm->cmsg_len != CMSG_LEN(sizeof(u_char))) {
1023 error = EINVAL;
1024 break;
1025 }
1026 tos = *(u_char *)CMSG_DATA(cm);
1027 break;
1028
1029 default:
1030 error = ENOPROTOOPT;
1031 break;
1032 }
1033 if (error)
1034 break;
1035 }
1036 m_freem(control);
1037 }
1038 if (error) {
1039 INP_RUNLOCK(inp);
1040 m_freem(m);
1041 return (error);
1042 }
1043
1044 /*
1045 * Depending on whether or not the application has bound or connected
1046 * the socket, we may have to do varying levels of work. The optimal
1047 * case is for a connected UDP socket, as a global lock isn't
1048 * required at all.
1049 *
1050 * In order to decide which we need, we require stability of the
1051 * inpcb binding, which we ensure by acquiring a read lock on the
1052 * inpcb. This doesn't strictly follow the lock order, so we play
1053 * the trylock and retry game; note that we may end up with more
1054 * conservative locks than required the second time around, so later
1055 * assertions have to accept that. Further analysis of the number of
1056 * misses under contention is required.
1057 *
1058 * XXXRW: Check that hash locking update here is correct.
1059 */
1060 sin = (struct sockaddr_in *)addr;
1061 if (sin != NULL &&
1062 (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) {
1063 INP_RUNLOCK(inp);
1064 INP_WLOCK(inp);
1065 INP_HASH_WLOCK(&V_udbinfo);
1066 unlock_udbinfo = UH_WLOCKED;
1067 } else if ((sin != NULL && (
1068 (sin->sin_addr.s_addr == INADDR_ANY) ||
1069 (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
1070 (inp->inp_laddr.s_addr == INADDR_ANY) ||
1071 (inp->inp_lport == 0))) ||
1072 (src.sin_family == AF_INET)) {
1073 INP_HASH_RLOCK(&V_udbinfo);
1074 unlock_udbinfo = UH_RLOCKED;
1075 } else
1076 unlock_udbinfo = UH_UNLOCKED;
1077
1078 /*
1079 * If the IP_SENDSRCADDR control message was specified, override the
1080 * source address for this datagram. Its use is invalidated if the
1081 * address thus specified is incomplete or clobbers other inpcbs.
1082 */
1083 laddr = inp->inp_laddr;
1084 lport = inp->inp_lport;
1085 if (src.sin_family == AF_INET) {
1086 INP_HASH_LOCK_ASSERT(&V_udbinfo);
1087 if ((lport == 0) ||
1088 (laddr.s_addr == INADDR_ANY &&
1089 src.sin_addr.s_addr == INADDR_ANY)) {
1090 error = EINVAL;
1091 goto release;
1092 }
1093 error = in_pcbbind_setup(inp, (struct sockaddr *)&src,
1094 &laddr.s_addr, &lport, td->td_ucred);
1095 if (error)
1096 goto release;
1097 }
1098
1099 /*
1100 * If a UDP socket has been connected, then a local address/port will
1101 * have been selected and bound.
1102 *
1103 * If a UDP socket has not been connected to, then an explicit
1104 * destination address must be used, in which case a local
1105 * address/port may not have been selected and bound.
1106 */
1107 if (sin != NULL) {
1108 INP_LOCK_ASSERT(inp);
1109 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1110 error = EISCONN;
1111 goto release;
1112 }
1113
1114 /*
1115 * Jail may rewrite the destination address, so let it do
1116 * that before we use it.
1117 */
1118 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1119 if (error)
1120 goto release;
1121
1122 /*
1123 * If a local address or port hasn't yet been selected, or if
1124 * the destination address needs to be rewritten due to using
1125 * a special INADDR_ constant, invoke in_pcbconnect_setup()
1126 * to do the heavy lifting. Once a port is selected, we
1127 * commit the binding back to the socket; we also commit the
1128 * binding of the address if in jail.
1129 *
1130 * If we already have a valid binding and we're not
1131 * requesting a destination address rewrite, use a fast path.
1132 */
1133 if (inp->inp_laddr.s_addr == INADDR_ANY ||
1134 inp->inp_lport == 0 ||
1135 sin->sin_addr.s_addr == INADDR_ANY ||
1136 sin->sin_addr.s_addr == INADDR_BROADCAST) {
1137 INP_HASH_LOCK_ASSERT(&V_udbinfo);
1138 error = in_pcbconnect_setup(inp, addr, &laddr.s_addr,
1139 &lport, &faddr.s_addr, &fport, NULL,
1140 td->td_ucred);
1141 if (error)
1142 goto release;
1143
1144 /*
1145 * XXXRW: Why not commit the port if the address is
1146 * !INADDR_ANY?
1147 */
1148 /* Commit the local port if newly assigned. */
1149 if (inp->inp_laddr.s_addr == INADDR_ANY &&
1150 inp->inp_lport == 0) {
1151 INP_WLOCK_ASSERT(inp);
1152 INP_HASH_WLOCK_ASSERT(&V_udbinfo);
1153 /*
1154 * Remember addr if jailed, to prevent
1155 * rebinding.
1156 */
1157 if (prison_flag(td->td_ucred, PR_IP4))
1158 inp->inp_laddr = laddr;
1159 inp->inp_lport = lport;
1160 if (in_pcbinshash(inp) != 0) {
1161 inp->inp_lport = 0;
1162 error = EAGAIN;
1163 goto release;
1164 }
1165 inp->inp_flags |= INP_ANONPORT;
1166 }
1167 } else {
1168 faddr = sin->sin_addr;
1169 fport = sin->sin_port;
1170 }
1171 } else {
1172 INP_LOCK_ASSERT(inp);
1173 faddr = inp->inp_faddr;
1174 fport = inp->inp_fport;
1175 if (faddr.s_addr == INADDR_ANY) {
1176 error = ENOTCONN;
1177 goto release;
1178 }
1179 }
1180
1181 /*
1182 * Calculate data length and get a mbuf for UDP, IP, and possible
1183 * link-layer headers. Immediate slide the data pointer back forward
1184 * since we won't use that space at this layer.
1185 */
1186 M_PREPEND(m, sizeof(struct udpiphdr) + max_linkhdr, M_NOWAIT);
1187 if (m == NULL) {
1188 error = ENOBUFS;
1189 goto release;
1190 }
1191 m->m_data += max_linkhdr;
1192 m->m_len -= max_linkhdr;
1193 m->m_pkthdr.len -= max_linkhdr;
1194
1195 /*
1196 * Fill in mbuf with extended UDP header and addresses and length put
1197 * into network format.
1198 */
1199 ui = mtod(m, struct udpiphdr *);
1200 bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */
1201 ui->ui_v = IPVERSION << 4;
1196 ui->ui_pr = IPPROTO_UDP;
1197 ui->ui_src = laddr;
1198 ui->ui_dst = faddr;
1199 ui->ui_sport = lport;
1200 ui->ui_dport = fport;
1201 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1202
1203 /*
1204 * Set the Don't Fragment bit in the IP header.
1205 */
1206 if (inp->inp_flags & INP_DONTFRAG) {
1207 struct ip *ip;
1208
1209 ip = (struct ip *)&ui->ui_i;
1210 ip->ip_off |= htons(IP_DF);
1211 }
1212
1213 ipflags = 0;
1214 if (inp->inp_socket->so_options & SO_DONTROUTE)
1215 ipflags |= IP_ROUTETOIF;
1216 if (inp->inp_socket->so_options & SO_BROADCAST)
1217 ipflags |= IP_ALLOWBROADCAST;
1218 if (inp->inp_flags & INP_ONESBCAST)
1219 ipflags |= IP_SENDONES;
1220
1221#ifdef MAC
1222 mac_inpcb_create_mbuf(inp, m);
1223#endif
1224
1225 /*
1226 * Set up checksum and output datagram.
1227 */
1228 if (V_udp_cksum) {
1229 if (inp->inp_flags & INP_ONESBCAST)
1230 faddr.s_addr = INADDR_BROADCAST;
1231 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, faddr.s_addr,
1232 htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
1233 m->m_pkthdr.csum_flags = CSUM_UDP;
1234 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1235 } else
1236 ui->ui_sum = 0;
1237 ((struct ip *)ui)->ip_len = htons(sizeof(struct udpiphdr) + len);
1238 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */
1239 ((struct ip *)ui)->ip_tos = tos; /* XXX */
1240 UDPSTAT_INC(udps_opackets);
1241
1242 if (unlock_udbinfo == UH_WLOCKED)
1243 INP_HASH_WUNLOCK(&V_udbinfo);
1244 else if (unlock_udbinfo == UH_RLOCKED)
1245 INP_HASH_RUNLOCK(&V_udbinfo);
1202 ui->ui_pr = IPPROTO_UDP;
1203 ui->ui_src = laddr;
1204 ui->ui_dst = faddr;
1205 ui->ui_sport = lport;
1206 ui->ui_dport = fport;
1207 ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1208
1209 /*
1210 * Set the Don't Fragment bit in the IP header.
1211 */
1212 if (inp->inp_flags & INP_DONTFRAG) {
1213 struct ip *ip;
1214
1215 ip = (struct ip *)&ui->ui_i;
1216 ip->ip_off |= htons(IP_DF);
1217 }
1218
1219 ipflags = 0;
1220 if (inp->inp_socket->so_options & SO_DONTROUTE)
1221 ipflags |= IP_ROUTETOIF;
1222 if (inp->inp_socket->so_options & SO_BROADCAST)
1223 ipflags |= IP_ALLOWBROADCAST;
1224 if (inp->inp_flags & INP_ONESBCAST)
1225 ipflags |= IP_SENDONES;
1226
1227#ifdef MAC
1228 mac_inpcb_create_mbuf(inp, m);
1229#endif
1230
1231 /*
1232 * Set up checksum and output datagram.
1233 */
1234 if (V_udp_cksum) {
1235 if (inp->inp_flags & INP_ONESBCAST)
1236 faddr.s_addr = INADDR_BROADCAST;
1237 ui->ui_sum = in_pseudo(ui->ui_src.s_addr, faddr.s_addr,
1238 htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
1239 m->m_pkthdr.csum_flags = CSUM_UDP;
1240 m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1241 } else
1242 ui->ui_sum = 0;
1243 ((struct ip *)ui)->ip_len = htons(sizeof(struct udpiphdr) + len);
1244 ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */
1245 ((struct ip *)ui)->ip_tos = tos; /* XXX */
1246 UDPSTAT_INC(udps_opackets);
1247
1248 if (unlock_udbinfo == UH_WLOCKED)
1249 INP_HASH_WUNLOCK(&V_udbinfo);
1250 else if (unlock_udbinfo == UH_RLOCKED)
1251 INP_HASH_RUNLOCK(&V_udbinfo);
1252 UDP_PROBE(send, NULL, inp, &ui->ui_i, inp, &ui->ui_u);
1246 error = ip_output(m, inp->inp_options, NULL, ipflags,
1247 inp->inp_moptions, inp);
1248 if (unlock_udbinfo == UH_WLOCKED)
1249 INP_WUNLOCK(inp);
1250 else
1251 INP_RUNLOCK(inp);
1252 return (error);
1253
1254release:
1255 if (unlock_udbinfo == UH_WLOCKED) {
1256 INP_HASH_WUNLOCK(&V_udbinfo);
1257 INP_WUNLOCK(inp);
1258 } else if (unlock_udbinfo == UH_RLOCKED) {
1259 INP_HASH_RUNLOCK(&V_udbinfo);
1260 INP_RUNLOCK(inp);
1261 } else
1262 INP_RUNLOCK(inp);
1263 m_freem(m);
1264 return (error);
1265}
1266
1267
1268#if defined(IPSEC) && defined(IPSEC_NAT_T)
1269/*
1270 * Potentially decap ESP in UDP frame. Check for an ESP header
1271 * and optional marker; if present, strip the UDP header and
1272 * push the result through IPSec.
1273 *
1274 * Returns mbuf to be processed (potentially re-allocated) or
1275 * NULL if consumed and/or processed.
1276 */
1277static struct mbuf *
1278udp4_espdecap(struct inpcb *inp, struct mbuf *m, int off)
1279{
1280 size_t minlen, payload, skip, iphlen;
1281 caddr_t data;
1282 struct udpcb *up;
1283 struct m_tag *tag;
1284 struct udphdr *udphdr;
1285 struct ip *ip;
1286
1287 INP_RLOCK_ASSERT(inp);
1288
1289 /*
1290 * Pull up data so the longest case is contiguous:
1291 * IP/UDP hdr + non ESP marker + ESP hdr.
1292 */
1293 minlen = off + sizeof(uint64_t) + sizeof(struct esp);
1294 if (minlen > m->m_pkthdr.len)
1295 minlen = m->m_pkthdr.len;
1296 if ((m = m_pullup(m, minlen)) == NULL) {
1297 IPSECSTAT_INC(ips_in_inval);
1298 return (NULL); /* Bypass caller processing. */
1299 }
1300 data = mtod(m, caddr_t); /* Points to ip header. */
1301 payload = m->m_len - off; /* Size of payload. */
1302
1303 if (payload == 1 && data[off] == '\xff')
1304 return (m); /* NB: keepalive packet, no decap. */
1305
1306 up = intoudpcb(inp);
1307 KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
1308 KASSERT((up->u_flags & UF_ESPINUDP_ALL) != 0,
1309 ("u_flags 0x%x", up->u_flags));
1310
1311 /*
1312 * Check that the payload is large enough to hold an
1313 * ESP header and compute the amount of data to remove.
1314 *
1315 * NB: the caller has already done a pullup for us.
1316 * XXX can we assume alignment and eliminate bcopys?
1317 */
1318 if (up->u_flags & UF_ESPINUDP_NON_IKE) {
1319 /*
1320 * draft-ietf-ipsec-nat-t-ike-0[01].txt and
1321 * draft-ietf-ipsec-udp-encaps-(00/)01.txt, ignoring
1322 * possible AH mode non-IKE marker+non-ESP marker
1323 * from draft-ietf-ipsec-udp-encaps-00.txt.
1324 */
1325 uint64_t marker;
1326
1327 if (payload <= sizeof(uint64_t) + sizeof(struct esp))
1328 return (m); /* NB: no decap. */
1329 bcopy(data + off, &marker, sizeof(uint64_t));
1330 if (marker != 0) /* Non-IKE marker. */
1331 return (m); /* NB: no decap. */
1332 skip = sizeof(uint64_t) + sizeof(struct udphdr);
1333 } else {
1334 uint32_t spi;
1335
1336 if (payload <= sizeof(struct esp)) {
1337 IPSECSTAT_INC(ips_in_inval);
1338 m_freem(m);
1339 return (NULL); /* Discard. */
1340 }
1341 bcopy(data + off, &spi, sizeof(uint32_t));
1342 if (spi == 0) /* Non-ESP marker. */
1343 return (m); /* NB: no decap. */
1344 skip = sizeof(struct udphdr);
1345 }
1346
1347 /*
1348 * Setup a PACKET_TAG_IPSEC_NAT_T_PORT tag to remember
1349 * the UDP ports. This is required if we want to select
1350 * the right SPD for multiple hosts behind same NAT.
1351 *
1352 * NB: ports are maintained in network byte order everywhere
1353 * in the NAT-T code.
1354 */
1355 tag = m_tag_get(PACKET_TAG_IPSEC_NAT_T_PORTS,
1356 2 * sizeof(uint16_t), M_NOWAIT);
1357 if (tag == NULL) {
1358 IPSECSTAT_INC(ips_in_nomem);
1359 m_freem(m);
1360 return (NULL); /* Discard. */
1361 }
1362 iphlen = off - sizeof(struct udphdr);
1363 udphdr = (struct udphdr *)(data + iphlen);
1364 ((uint16_t *)(tag + 1))[0] = udphdr->uh_sport;
1365 ((uint16_t *)(tag + 1))[1] = udphdr->uh_dport;
1366 m_tag_prepend(m, tag);
1367
1368 /*
1369 * Remove the UDP header (and possibly the non ESP marker)
1370 * IP header length is iphlen
1371 * Before:
1372 * <--- off --->
1373 * +----+------+-----+
1374 * | IP | UDP | ESP |
1375 * +----+------+-----+
1376 * <-skip->
1377 * After:
1378 * +----+-----+
1379 * | IP | ESP |
1380 * +----+-----+
1381 * <-skip->
1382 */
1383 ovbcopy(data, data + skip, iphlen);
1384 m_adj(m, skip);
1385
1386 ip = mtod(m, struct ip *);
1387 ip->ip_len = htons(ntohs(ip->ip_len) - skip);
1388 ip->ip_p = IPPROTO_ESP;
1389
1390 /*
1391 * We cannot yet update the cksums so clear any
1392 * h/w cksum flags as they are no longer valid.
1393 */
1394 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)
1395 m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
1396
1397 (void) ipsec4_common_input(m, iphlen, ip->ip_p);
1398 return (NULL); /* NB: consumed, bypass processing. */
1399}
1400#endif /* defined(IPSEC) && defined(IPSEC_NAT_T) */
1401
1402static void
1403udp_abort(struct socket *so)
1404{
1405 struct inpcb *inp;
1406
1407 inp = sotoinpcb(so);
1408 KASSERT(inp != NULL, ("udp_abort: inp == NULL"));
1409 INP_WLOCK(inp);
1410 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1411 INP_HASH_WLOCK(&V_udbinfo);
1412 in_pcbdisconnect(inp);
1413 inp->inp_laddr.s_addr = INADDR_ANY;
1414 INP_HASH_WUNLOCK(&V_udbinfo);
1415 soisdisconnected(so);
1416 }
1417 INP_WUNLOCK(inp);
1418}
1419
1420static int
1421udp_attach(struct socket *so, int proto, struct thread *td)
1422{
1423 struct inpcb *inp;
1424 int error;
1425
1426 inp = sotoinpcb(so);
1427 KASSERT(inp == NULL, ("udp_attach: inp != NULL"));
1428 error = soreserve(so, udp_sendspace, udp_recvspace);
1429 if (error)
1430 return (error);
1431 INP_INFO_WLOCK(&V_udbinfo);
1432 error = in_pcballoc(so, &V_udbinfo);
1433 if (error) {
1434 INP_INFO_WUNLOCK(&V_udbinfo);
1435 return (error);
1436 }
1437
1438 inp = sotoinpcb(so);
1439 inp->inp_vflag |= INP_IPV4;
1440 inp->inp_ip_ttl = V_ip_defttl;
1441
1442 error = udp_newudpcb(inp);
1443 if (error) {
1444 in_pcbdetach(inp);
1445 in_pcbfree(inp);
1446 INP_INFO_WUNLOCK(&V_udbinfo);
1447 return (error);
1448 }
1449
1450 INP_WUNLOCK(inp);
1451 INP_INFO_WUNLOCK(&V_udbinfo);
1452 return (0);
1453}
1454#endif /* INET */
1455
1456int
1457udp_set_kernel_tunneling(struct socket *so, udp_tun_func_t f)
1458{
1459 struct inpcb *inp;
1460 struct udpcb *up;
1461
1462 KASSERT(so->so_type == SOCK_DGRAM,
1463 ("udp_set_kernel_tunneling: !dgram"));
1464 inp = sotoinpcb(so);
1465 KASSERT(inp != NULL, ("udp_set_kernel_tunneling: inp == NULL"));
1466 INP_WLOCK(inp);
1467 up = intoudpcb(inp);
1468 if (up->u_tun_func != NULL) {
1469 INP_WUNLOCK(inp);
1470 return (EBUSY);
1471 }
1472 up->u_tun_func = f;
1473 INP_WUNLOCK(inp);
1474 return (0);
1475}
1476
1477#ifdef INET
1478static int
1479udp_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
1480{
1481 struct inpcb *inp;
1482 int error;
1483
1484 inp = sotoinpcb(so);
1485 KASSERT(inp != NULL, ("udp_bind: inp == NULL"));
1486 INP_WLOCK(inp);
1487 INP_HASH_WLOCK(&V_udbinfo);
1488 error = in_pcbbind(inp, nam, td->td_ucred);
1489 INP_HASH_WUNLOCK(&V_udbinfo);
1490 INP_WUNLOCK(inp);
1491 return (error);
1492}
1493
1494static void
1495udp_close(struct socket *so)
1496{
1497 struct inpcb *inp;
1498
1499 inp = sotoinpcb(so);
1500 KASSERT(inp != NULL, ("udp_close: inp == NULL"));
1501 INP_WLOCK(inp);
1502 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1503 INP_HASH_WLOCK(&V_udbinfo);
1504 in_pcbdisconnect(inp);
1505 inp->inp_laddr.s_addr = INADDR_ANY;
1506 INP_HASH_WUNLOCK(&V_udbinfo);
1507 soisdisconnected(so);
1508 }
1509 INP_WUNLOCK(inp);
1510}
1511
1512static int
1513udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1514{
1515 struct inpcb *inp;
1516 int error;
1517 struct sockaddr_in *sin;
1518
1519 inp = sotoinpcb(so);
1520 KASSERT(inp != NULL, ("udp_connect: inp == NULL"));
1521 INP_WLOCK(inp);
1522 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1523 INP_WUNLOCK(inp);
1524 return (EISCONN);
1525 }
1526 sin = (struct sockaddr_in *)nam;
1527 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1528 if (error != 0) {
1529 INP_WUNLOCK(inp);
1530 return (error);
1531 }
1532 INP_HASH_WLOCK(&V_udbinfo);
1533 error = in_pcbconnect(inp, nam, td->td_ucred);
1534 INP_HASH_WUNLOCK(&V_udbinfo);
1535 if (error == 0)
1536 soisconnected(so);
1537 INP_WUNLOCK(inp);
1538 return (error);
1539}
1540
1541static void
1542udp_detach(struct socket *so)
1543{
1544 struct inpcb *inp;
1545 struct udpcb *up;
1546
1547 inp = sotoinpcb(so);
1548 KASSERT(inp != NULL, ("udp_detach: inp == NULL"));
1549 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
1550 ("udp_detach: not disconnected"));
1551 INP_INFO_WLOCK(&V_udbinfo);
1552 INP_WLOCK(inp);
1553 up = intoudpcb(inp);
1554 KASSERT(up != NULL, ("%s: up == NULL", __func__));
1555 inp->inp_ppcb = NULL;
1556 in_pcbdetach(inp);
1557 in_pcbfree(inp);
1558 INP_INFO_WUNLOCK(&V_udbinfo);
1559 udp_discardcb(up);
1560}
1561
1562static int
1563udp_disconnect(struct socket *so)
1564{
1565 struct inpcb *inp;
1566
1567 inp = sotoinpcb(so);
1568 KASSERT(inp != NULL, ("udp_disconnect: inp == NULL"));
1569 INP_WLOCK(inp);
1570 if (inp->inp_faddr.s_addr == INADDR_ANY) {
1571 INP_WUNLOCK(inp);
1572 return (ENOTCONN);
1573 }
1574 INP_HASH_WLOCK(&V_udbinfo);
1575 in_pcbdisconnect(inp);
1576 inp->inp_laddr.s_addr = INADDR_ANY;
1577 INP_HASH_WUNLOCK(&V_udbinfo);
1578 SOCK_LOCK(so);
1579 so->so_state &= ~SS_ISCONNECTED; /* XXX */
1580 SOCK_UNLOCK(so);
1581 INP_WUNLOCK(inp);
1582 return (0);
1583}
1584
1585static int
1586udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
1587 struct mbuf *control, struct thread *td)
1588{
1589 struct inpcb *inp;
1590
1591 inp = sotoinpcb(so);
1592 KASSERT(inp != NULL, ("udp_send: inp == NULL"));
1593 return (udp_output(inp, m, addr, control, td));
1594}
1595#endif /* INET */
1596
1597int
1598udp_shutdown(struct socket *so)
1599{
1600 struct inpcb *inp;
1601
1602 inp = sotoinpcb(so);
1603 KASSERT(inp != NULL, ("udp_shutdown: inp == NULL"));
1604 INP_WLOCK(inp);
1605 socantsendmore(so);
1606 INP_WUNLOCK(inp);
1607 return (0);
1608}
1609
1610#ifdef INET
1611struct pr_usrreqs udp_usrreqs = {
1612 .pru_abort = udp_abort,
1613 .pru_attach = udp_attach,
1614 .pru_bind = udp_bind,
1615 .pru_connect = udp_connect,
1616 .pru_control = in_control,
1617 .pru_detach = udp_detach,
1618 .pru_disconnect = udp_disconnect,
1619 .pru_peeraddr = in_getpeeraddr,
1620 .pru_send = udp_send,
1621 .pru_soreceive = soreceive_dgram,
1622 .pru_sosend = sosend_dgram,
1623 .pru_shutdown = udp_shutdown,
1624 .pru_sockaddr = in_getsockaddr,
1625 .pru_sosetlabel = in_pcbsosetlabel,
1626 .pru_close = udp_close,
1627};
1628#endif /* INET */
1253 error = ip_output(m, inp->inp_options, NULL, ipflags,
1254 inp->inp_moptions, inp);
1255 if (unlock_udbinfo == UH_WLOCKED)
1256 INP_WUNLOCK(inp);
1257 else
1258 INP_RUNLOCK(inp);
1259 return (error);
1260
1261release:
1262 if (unlock_udbinfo == UH_WLOCKED) {
1263 INP_HASH_WUNLOCK(&V_udbinfo);
1264 INP_WUNLOCK(inp);
1265 } else if (unlock_udbinfo == UH_RLOCKED) {
1266 INP_HASH_RUNLOCK(&V_udbinfo);
1267 INP_RUNLOCK(inp);
1268 } else
1269 INP_RUNLOCK(inp);
1270 m_freem(m);
1271 return (error);
1272}
1273
1274
1275#if defined(IPSEC) && defined(IPSEC_NAT_T)
1276/*
1277 * Potentially decap ESP in UDP frame. Check for an ESP header
1278 * and optional marker; if present, strip the UDP header and
1279 * push the result through IPSec.
1280 *
1281 * Returns mbuf to be processed (potentially re-allocated) or
1282 * NULL if consumed and/or processed.
1283 */
1284static struct mbuf *
1285udp4_espdecap(struct inpcb *inp, struct mbuf *m, int off)
1286{
1287 size_t minlen, payload, skip, iphlen;
1288 caddr_t data;
1289 struct udpcb *up;
1290 struct m_tag *tag;
1291 struct udphdr *udphdr;
1292 struct ip *ip;
1293
1294 INP_RLOCK_ASSERT(inp);
1295
1296 /*
1297 * Pull up data so the longest case is contiguous:
1298 * IP/UDP hdr + non ESP marker + ESP hdr.
1299 */
1300 minlen = off + sizeof(uint64_t) + sizeof(struct esp);
1301 if (minlen > m->m_pkthdr.len)
1302 minlen = m->m_pkthdr.len;
1303 if ((m = m_pullup(m, minlen)) == NULL) {
1304 IPSECSTAT_INC(ips_in_inval);
1305 return (NULL); /* Bypass caller processing. */
1306 }
1307 data = mtod(m, caddr_t); /* Points to ip header. */
1308 payload = m->m_len - off; /* Size of payload. */
1309
1310 if (payload == 1 && data[off] == '\xff')
1311 return (m); /* NB: keepalive packet, no decap. */
1312
1313 up = intoudpcb(inp);
1314 KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
1315 KASSERT((up->u_flags & UF_ESPINUDP_ALL) != 0,
1316 ("u_flags 0x%x", up->u_flags));
1317
1318 /*
1319 * Check that the payload is large enough to hold an
1320 * ESP header and compute the amount of data to remove.
1321 *
1322 * NB: the caller has already done a pullup for us.
1323 * XXX can we assume alignment and eliminate bcopys?
1324 */
1325 if (up->u_flags & UF_ESPINUDP_NON_IKE) {
1326 /*
1327 * draft-ietf-ipsec-nat-t-ike-0[01].txt and
1328 * draft-ietf-ipsec-udp-encaps-(00/)01.txt, ignoring
1329 * possible AH mode non-IKE marker+non-ESP marker
1330 * from draft-ietf-ipsec-udp-encaps-00.txt.
1331 */
1332 uint64_t marker;
1333
1334 if (payload <= sizeof(uint64_t) + sizeof(struct esp))
1335 return (m); /* NB: no decap. */
1336 bcopy(data + off, &marker, sizeof(uint64_t));
1337 if (marker != 0) /* Non-IKE marker. */
1338 return (m); /* NB: no decap. */
1339 skip = sizeof(uint64_t) + sizeof(struct udphdr);
1340 } else {
1341 uint32_t spi;
1342
1343 if (payload <= sizeof(struct esp)) {
1344 IPSECSTAT_INC(ips_in_inval);
1345 m_freem(m);
1346 return (NULL); /* Discard. */
1347 }
1348 bcopy(data + off, &spi, sizeof(uint32_t));
1349 if (spi == 0) /* Non-ESP marker. */
1350 return (m); /* NB: no decap. */
1351 skip = sizeof(struct udphdr);
1352 }
1353
1354 /*
1355 * Setup a PACKET_TAG_IPSEC_NAT_T_PORT tag to remember
1356 * the UDP ports. This is required if we want to select
1357 * the right SPD for multiple hosts behind same NAT.
1358 *
1359 * NB: ports are maintained in network byte order everywhere
1360 * in the NAT-T code.
1361 */
1362 tag = m_tag_get(PACKET_TAG_IPSEC_NAT_T_PORTS,
1363 2 * sizeof(uint16_t), M_NOWAIT);
1364 if (tag == NULL) {
1365 IPSECSTAT_INC(ips_in_nomem);
1366 m_freem(m);
1367 return (NULL); /* Discard. */
1368 }
1369 iphlen = off - sizeof(struct udphdr);
1370 udphdr = (struct udphdr *)(data + iphlen);
1371 ((uint16_t *)(tag + 1))[0] = udphdr->uh_sport;
1372 ((uint16_t *)(tag + 1))[1] = udphdr->uh_dport;
1373 m_tag_prepend(m, tag);
1374
1375 /*
1376 * Remove the UDP header (and possibly the non ESP marker)
1377 * IP header length is iphlen
1378 * Before:
1379 * <--- off --->
1380 * +----+------+-----+
1381 * | IP | UDP | ESP |
1382 * +----+------+-----+
1383 * <-skip->
1384 * After:
1385 * +----+-----+
1386 * | IP | ESP |
1387 * +----+-----+
1388 * <-skip->
1389 */
1390 ovbcopy(data, data + skip, iphlen);
1391 m_adj(m, skip);
1392
1393 ip = mtod(m, struct ip *);
1394 ip->ip_len = htons(ntohs(ip->ip_len) - skip);
1395 ip->ip_p = IPPROTO_ESP;
1396
1397 /*
1398 * We cannot yet update the cksums so clear any
1399 * h/w cksum flags as they are no longer valid.
1400 */
1401 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)
1402 m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
1403
1404 (void) ipsec4_common_input(m, iphlen, ip->ip_p);
1405 return (NULL); /* NB: consumed, bypass processing. */
1406}
1407#endif /* defined(IPSEC) && defined(IPSEC_NAT_T) */
1408
1409static void
1410udp_abort(struct socket *so)
1411{
1412 struct inpcb *inp;
1413
1414 inp = sotoinpcb(so);
1415 KASSERT(inp != NULL, ("udp_abort: inp == NULL"));
1416 INP_WLOCK(inp);
1417 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1418 INP_HASH_WLOCK(&V_udbinfo);
1419 in_pcbdisconnect(inp);
1420 inp->inp_laddr.s_addr = INADDR_ANY;
1421 INP_HASH_WUNLOCK(&V_udbinfo);
1422 soisdisconnected(so);
1423 }
1424 INP_WUNLOCK(inp);
1425}
1426
1427static int
1428udp_attach(struct socket *so, int proto, struct thread *td)
1429{
1430 struct inpcb *inp;
1431 int error;
1432
1433 inp = sotoinpcb(so);
1434 KASSERT(inp == NULL, ("udp_attach: inp != NULL"));
1435 error = soreserve(so, udp_sendspace, udp_recvspace);
1436 if (error)
1437 return (error);
1438 INP_INFO_WLOCK(&V_udbinfo);
1439 error = in_pcballoc(so, &V_udbinfo);
1440 if (error) {
1441 INP_INFO_WUNLOCK(&V_udbinfo);
1442 return (error);
1443 }
1444
1445 inp = sotoinpcb(so);
1446 inp->inp_vflag |= INP_IPV4;
1447 inp->inp_ip_ttl = V_ip_defttl;
1448
1449 error = udp_newudpcb(inp);
1450 if (error) {
1451 in_pcbdetach(inp);
1452 in_pcbfree(inp);
1453 INP_INFO_WUNLOCK(&V_udbinfo);
1454 return (error);
1455 }
1456
1457 INP_WUNLOCK(inp);
1458 INP_INFO_WUNLOCK(&V_udbinfo);
1459 return (0);
1460}
1461#endif /* INET */
1462
1463int
1464udp_set_kernel_tunneling(struct socket *so, udp_tun_func_t f)
1465{
1466 struct inpcb *inp;
1467 struct udpcb *up;
1468
1469 KASSERT(so->so_type == SOCK_DGRAM,
1470 ("udp_set_kernel_tunneling: !dgram"));
1471 inp = sotoinpcb(so);
1472 KASSERT(inp != NULL, ("udp_set_kernel_tunneling: inp == NULL"));
1473 INP_WLOCK(inp);
1474 up = intoudpcb(inp);
1475 if (up->u_tun_func != NULL) {
1476 INP_WUNLOCK(inp);
1477 return (EBUSY);
1478 }
1479 up->u_tun_func = f;
1480 INP_WUNLOCK(inp);
1481 return (0);
1482}
1483
1484#ifdef INET
1485static int
1486udp_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
1487{
1488 struct inpcb *inp;
1489 int error;
1490
1491 inp = sotoinpcb(so);
1492 KASSERT(inp != NULL, ("udp_bind: inp == NULL"));
1493 INP_WLOCK(inp);
1494 INP_HASH_WLOCK(&V_udbinfo);
1495 error = in_pcbbind(inp, nam, td->td_ucred);
1496 INP_HASH_WUNLOCK(&V_udbinfo);
1497 INP_WUNLOCK(inp);
1498 return (error);
1499}
1500
1501static void
1502udp_close(struct socket *so)
1503{
1504 struct inpcb *inp;
1505
1506 inp = sotoinpcb(so);
1507 KASSERT(inp != NULL, ("udp_close: inp == NULL"));
1508 INP_WLOCK(inp);
1509 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1510 INP_HASH_WLOCK(&V_udbinfo);
1511 in_pcbdisconnect(inp);
1512 inp->inp_laddr.s_addr = INADDR_ANY;
1513 INP_HASH_WUNLOCK(&V_udbinfo);
1514 soisdisconnected(so);
1515 }
1516 INP_WUNLOCK(inp);
1517}
1518
1519static int
1520udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1521{
1522 struct inpcb *inp;
1523 int error;
1524 struct sockaddr_in *sin;
1525
1526 inp = sotoinpcb(so);
1527 KASSERT(inp != NULL, ("udp_connect: inp == NULL"));
1528 INP_WLOCK(inp);
1529 if (inp->inp_faddr.s_addr != INADDR_ANY) {
1530 INP_WUNLOCK(inp);
1531 return (EISCONN);
1532 }
1533 sin = (struct sockaddr_in *)nam;
1534 error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1535 if (error != 0) {
1536 INP_WUNLOCK(inp);
1537 return (error);
1538 }
1539 INP_HASH_WLOCK(&V_udbinfo);
1540 error = in_pcbconnect(inp, nam, td->td_ucred);
1541 INP_HASH_WUNLOCK(&V_udbinfo);
1542 if (error == 0)
1543 soisconnected(so);
1544 INP_WUNLOCK(inp);
1545 return (error);
1546}
1547
1548static void
1549udp_detach(struct socket *so)
1550{
1551 struct inpcb *inp;
1552 struct udpcb *up;
1553
1554 inp = sotoinpcb(so);
1555 KASSERT(inp != NULL, ("udp_detach: inp == NULL"));
1556 KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
1557 ("udp_detach: not disconnected"));
1558 INP_INFO_WLOCK(&V_udbinfo);
1559 INP_WLOCK(inp);
1560 up = intoudpcb(inp);
1561 KASSERT(up != NULL, ("%s: up == NULL", __func__));
1562 inp->inp_ppcb = NULL;
1563 in_pcbdetach(inp);
1564 in_pcbfree(inp);
1565 INP_INFO_WUNLOCK(&V_udbinfo);
1566 udp_discardcb(up);
1567}
1568
1569static int
1570udp_disconnect(struct socket *so)
1571{
1572 struct inpcb *inp;
1573
1574 inp = sotoinpcb(so);
1575 KASSERT(inp != NULL, ("udp_disconnect: inp == NULL"));
1576 INP_WLOCK(inp);
1577 if (inp->inp_faddr.s_addr == INADDR_ANY) {
1578 INP_WUNLOCK(inp);
1579 return (ENOTCONN);
1580 }
1581 INP_HASH_WLOCK(&V_udbinfo);
1582 in_pcbdisconnect(inp);
1583 inp->inp_laddr.s_addr = INADDR_ANY;
1584 INP_HASH_WUNLOCK(&V_udbinfo);
1585 SOCK_LOCK(so);
1586 so->so_state &= ~SS_ISCONNECTED; /* XXX */
1587 SOCK_UNLOCK(so);
1588 INP_WUNLOCK(inp);
1589 return (0);
1590}
1591
1592static int
1593udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
1594 struct mbuf *control, struct thread *td)
1595{
1596 struct inpcb *inp;
1597
1598 inp = sotoinpcb(so);
1599 KASSERT(inp != NULL, ("udp_send: inp == NULL"));
1600 return (udp_output(inp, m, addr, control, td));
1601}
1602#endif /* INET */
1603
1604int
1605udp_shutdown(struct socket *so)
1606{
1607 struct inpcb *inp;
1608
1609 inp = sotoinpcb(so);
1610 KASSERT(inp != NULL, ("udp_shutdown: inp == NULL"));
1611 INP_WLOCK(inp);
1612 socantsendmore(so);
1613 INP_WUNLOCK(inp);
1614 return (0);
1615}
1616
1617#ifdef INET
1618struct pr_usrreqs udp_usrreqs = {
1619 .pru_abort = udp_abort,
1620 .pru_attach = udp_attach,
1621 .pru_bind = udp_bind,
1622 .pru_connect = udp_connect,
1623 .pru_control = in_control,
1624 .pru_detach = udp_detach,
1625 .pru_disconnect = udp_disconnect,
1626 .pru_peeraddr = in_getpeeraddr,
1627 .pru_send = udp_send,
1628 .pru_soreceive = soreceive_dgram,
1629 .pru_sosend = sosend_dgram,
1630 .pru_shutdown = udp_shutdown,
1631 .pru_sockaddr = in_getsockaddr,
1632 .pru_sosetlabel = in_pcbsosetlabel,
1633 .pru_close = udp_close,
1634};
1635#endif /* INET */