1/*-
2 * Copyright (c) 1982, 1986, 1988, 1993
3 *	The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 4. Neither the name of the University nor the names of its contributors
15 *    may be used to endorse or promote products derived from this software
16 *    without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 *	@(#)raw_ip.c	8.7 (Berkeley) 5/15/95
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/11/sys/netinet/raw_ip.c 347685 2019-05-16 11:09:53Z tuexen $");
35
36#include "opt_inet.h"
37#include "opt_inet6.h"
38#include "opt_ipsec.h"
39
40#include <sys/param.h>
41#include <sys/jail.h>
42#include <sys/kernel.h>
43#include <sys/eventhandler.h>
44#include <sys/lock.h>
45#include <sys/malloc.h>
46#include <sys/mbuf.h>
47#include <sys/priv.h>
48#include <sys/proc.h>
49#include <sys/protosw.h>
50#include <sys/rmlock.h>
51#include <sys/rwlock.h>
52#include <sys/signalvar.h>
53#include <sys/socket.h>
54#include <sys/socketvar.h>
55#include <sys/sx.h>
56#include <sys/sysctl.h>
57#include <sys/systm.h>
58
59#include <vm/uma.h>
60
61#include <net/if.h>
62#include <net/if_var.h>
63#include <net/route.h>
64#include <net/vnet.h>
65
66#include <netinet/in.h>
67#include <netinet/in_systm.h>
68#include <netinet/in_pcb.h>
69#include <netinet/in_var.h>
70#include <netinet/if_ether.h>
71#include <netinet/ip.h>
72#include <netinet/ip_var.h>
73#include <netinet/ip_mroute.h>
74#include <netinet/ip_icmp.h>
75
76#include <netipsec/ipsec_support.h>
77
78#include <machine/stdarg.h>
79#include <security/mac/mac_framework.h>
80
81VNET_DEFINE(int, ip_defttl) = IPDEFTTL;
82SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_VNET | CTLFLAG_RW,
83    &VNET_NAME(ip_defttl), 0,
84    "Maximum TTL on IP packets");
85
86VNET_DEFINE(struct inpcbhead, ripcb);
87VNET_DEFINE(struct inpcbinfo, ripcbinfo);
88
89#define	V_ripcb			VNET(ripcb)
90#define	V_ripcbinfo		VNET(ripcbinfo)
91
92/*
93 * Control and data hooks for ipfw, dummynet, divert and so on.
94 * The data hooks are not used here but it is convenient
95 * to keep them all in one place.
96 */
97VNET_DEFINE(ip_fw_chk_ptr_t, ip_fw_chk_ptr) = NULL;
98VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL;
99
100int	(*ip_dn_ctl_ptr)(struct sockopt *);
101int	(*ip_dn_io_ptr)(struct mbuf **, int, struct ip_fw_args *);
102void	(*ip_divert_ptr)(struct mbuf *, int);
103int	(*ng_ipfw_input_p)(struct mbuf **, int,
104			struct ip_fw_args *, int);
105
106#ifdef INET
107/*
108 * Hooks for multicast routing. They all default to NULL, so leave them not
109 * initialized and rely on BSS being set to 0.
110 */
111
112/*
113 * The socket used to communicate with the multicast routing daemon.
114 */
115VNET_DEFINE(struct socket *, ip_mrouter);
116
117/*
118 * The various mrouter and rsvp functions.
119 */
120int (*ip_mrouter_set)(struct socket *, struct sockopt *);
121int (*ip_mrouter_get)(struct socket *, struct sockopt *);
122int (*ip_mrouter_done)(void);
123int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *,
124		   struct ip_moptions *);
125int (*mrt_ioctl)(u_long, caddr_t, int);
126int (*legal_vif_num)(int);
127u_long (*ip_mcast_src)(int);
128
129int (*rsvp_input_p)(struct mbuf **, int *, int);
130int (*ip_rsvp_vif)(struct socket *, struct sockopt *);
131void (*ip_rsvp_force_done)(struct socket *);
132#endif /* INET */
133
134extern	struct protosw inetsw[];
135
136u_long	rip_sendspace = 9216;
137SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
138    &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
139
140u_long	rip_recvspace = 9216;
141SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
142    &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams");
143
144/*
145 * Hash functions
146 */
147
148#define INP_PCBHASH_RAW_SIZE	256
149#define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \
150        (((proto) + (laddr) + (faddr)) % (mask) + 1)
151
152#ifdef INET
153static void
154rip_inshash(struct inpcb *inp)
155{
156	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
157	struct inpcbhead *pcbhash;
158	int hash;
159
160	INP_INFO_WLOCK_ASSERT(pcbinfo);
161	INP_WLOCK_ASSERT(inp);
162
163	if (inp->inp_ip_p != 0 &&
164	    inp->inp_laddr.s_addr != INADDR_ANY &&
165	    inp->inp_faddr.s_addr != INADDR_ANY) {
166		hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr,
167		    inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask);
168	} else
169		hash = 0;
170	pcbhash = &pcbinfo->ipi_hashbase[hash];
171	LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
172}
173
174static void
175rip_delhash(struct inpcb *inp)
176{
177
178	INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
179	INP_WLOCK_ASSERT(inp);
180
181	LIST_REMOVE(inp, inp_hash);
182}
183#endif /* INET */
184
185/*
186 * Raw interface to IP protocol.
187 */
188
189/*
190 * Initialize raw connection block q.
191 */
192static void
193rip_zone_change(void *tag)
194{
195
196	uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
197}
198
199static int
200rip_inpcb_init(void *mem, int size, int flags)
201{
202	struct inpcb *inp = mem;
203
204	INP_LOCK_INIT(inp, "inp", "rawinp");
205	return (0);
206}
207
208void
209rip_init(void)
210{
211
212	in_pcbinfo_init(&V_ripcbinfo, "rip", &V_ripcb, INP_PCBHASH_RAW_SIZE,
213	    1, "ripcb", rip_inpcb_init, NULL, 0, IPI_HASHFIELDS_NONE);
214	EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL,
215	    EVENTHANDLER_PRI_ANY);
216}
217
218#ifdef VIMAGE
219static void
220rip_destroy(void *unused __unused)
221{
222
223	in_pcbinfo_destroy(&V_ripcbinfo);
224}
225VNET_SYSUNINIT(raw_ip, SI_SUB_PROTO_DOMAIN, SI_ORDER_FOURTH, rip_destroy, NULL);
226#endif
227
228#ifdef INET
229static int
230rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n,
231    struct sockaddr_in *ripsrc)
232{
233	int policyfail = 0;
234
235	INP_LOCK_ASSERT(last);
236
237#if defined(IPSEC) || defined(IPSEC_SUPPORT)
238	/* check AH/ESP integrity. */
239	if (IPSEC_ENABLED(ipv4)) {
240		if (IPSEC_CHECK_POLICY(ipv4, n, last) != 0)
241			policyfail = 1;
242	}
243#endif /* IPSEC */
244#ifdef MAC
245	if (!policyfail && mac_inpcb_check_deliver(last, n) != 0)
246		policyfail = 1;
247#endif
248	/* Check the minimum TTL for socket. */
249	if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl)
250		policyfail = 1;
251	if (!policyfail) {
252		struct mbuf *opts = NULL;
253		struct socket *so;
254
255		so = last->inp_socket;
256		if ((last->inp_flags & INP_CONTROLOPTS) ||
257		    (so->so_options & (SO_TIMESTAMP | SO_BINTIME)))
258			ip_savecontrol(last, &opts, ip, n);
259		SOCKBUF_LOCK(&so->so_rcv);
260		if (sbappendaddr_locked(&so->so_rcv,
261		    (struct sockaddr *)ripsrc, n, opts) == 0) {
262			/* should notify about lost packet */
263			m_freem(n);
264			if (opts)
265				m_freem(opts);
266			SOCKBUF_UNLOCK(&so->so_rcv);
267		} else
268			sorwakeup_locked(so);
269	} else
270		m_freem(n);
271	return (policyfail);
272}
273
274/*
275 * Setup generic address and protocol structures for raw_input routine, then
276 * pass them along with mbuf chain.
277 */
278int
279rip_input(struct mbuf **mp, int *offp, int proto)
280{
281	struct ifnet *ifp;
282	struct mbuf *m = *mp;
283	struct ip *ip = mtod(m, struct ip *);
284	struct inpcb *inp, *last;
285	struct sockaddr_in ripsrc;
286	int hash;
287
288	*mp = NULL;
289
290	bzero(&ripsrc, sizeof(ripsrc));
291	ripsrc.sin_len = sizeof(ripsrc);
292	ripsrc.sin_family = AF_INET;
293	ripsrc.sin_addr = ip->ip_src;
294	last = NULL;
295
296	ifp = m->m_pkthdr.rcvif;
297
298	hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr,
299	    ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask);
300	INP_INFO_RLOCK(&V_ripcbinfo);
301	LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) {
302		if (inp->inp_ip_p != proto)
303			continue;
304#ifdef INET6
305		/* XXX inp locking */
306		if ((inp->inp_vflag & INP_IPV4) == 0)
307			continue;
308#endif
309		if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
310			continue;
311		if (inp->inp_faddr.s_addr != ip->ip_src.s_addr)
312			continue;
313		if (jailed_without_vnet(inp->inp_cred)) {
314			/*
315			 * XXX: If faddr was bound to multicast group,
316			 * jailed raw socket will drop datagram.
317			 */
318			if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
319				continue;
320		}
321		if (last != NULL) {
322			struct mbuf *n;
323
324			n = m_copy(m, 0, (int)M_COPYALL);
325			if (n != NULL)
326		    	    (void) rip_append(last, ip, n, &ripsrc);
327			/* XXX count dropped packet */
328			INP_RUNLOCK(last);
329		}
330		INP_RLOCK(inp);
331		last = inp;
332	}
333	LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) {
334		if (inp->inp_ip_p && inp->inp_ip_p != proto)
335			continue;
336#ifdef INET6
337		/* XXX inp locking */
338		if ((inp->inp_vflag & INP_IPV4) == 0)
339			continue;
340#endif
341		if (!in_nullhost(inp->inp_laddr) &&
342		    !in_hosteq(inp->inp_laddr, ip->ip_dst))
343			continue;
344		if (!in_nullhost(inp->inp_faddr) &&
345		    !in_hosteq(inp->inp_faddr, ip->ip_src))
346			continue;
347		if (jailed_without_vnet(inp->inp_cred)) {
348			/*
349			 * Allow raw socket in jail to receive multicast;
350			 * assume process had PRIV_NETINET_RAW at attach,
351			 * and fall through into normal filter path if so.
352			 */
353			if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
354			    prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
355				continue;
356		}
357		/*
358		 * If this raw socket has multicast state, and we
359		 * have received a multicast, check if this socket
360		 * should receive it, as multicast filtering is now
361		 * the responsibility of the transport layer.
362		 */
363		if (inp->inp_moptions != NULL &&
364		    IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
365			/*
366			 * If the incoming datagram is for IGMP, allow it
367			 * through unconditionally to the raw socket.
368			 *
369			 * In the case of IGMPv2, we may not have explicitly
370			 * joined the group, and may have set IFF_ALLMULTI
371			 * on the interface. imo_multi_filter() may discard
372			 * control traffic we actually need to see.
373			 *
374			 * Userland multicast routing daemons should continue
375			 * filter the control traffic appropriately.
376			 */
377			int blocked;
378
379			blocked = MCAST_PASS;
380			if (proto != IPPROTO_IGMP) {
381				struct sockaddr_in group;
382
383				bzero(&group, sizeof(struct sockaddr_in));
384				group.sin_len = sizeof(struct sockaddr_in);
385				group.sin_family = AF_INET;
386				group.sin_addr = ip->ip_dst;
387
388				blocked = imo_multi_filter(inp->inp_moptions,
389				    ifp,
390				    (struct sockaddr *)&group,
391				    (struct sockaddr *)&ripsrc);
392			}
393
394			if (blocked != MCAST_PASS) {
395				IPSTAT_INC(ips_notmember);
396				continue;
397			}
398		}
399		if (last != NULL) {
400			struct mbuf *n;
401
402			n = m_copy(m, 0, (int)M_COPYALL);
403			if (n != NULL)
404				(void) rip_append(last, ip, n, &ripsrc);
405			/* XXX count dropped packet */
406			INP_RUNLOCK(last);
407		}
408		INP_RLOCK(inp);
409		last = inp;
410	}
411	INP_INFO_RUNLOCK(&V_ripcbinfo);
412	if (last != NULL) {
413		if (rip_append(last, ip, m, &ripsrc) != 0)
414			IPSTAT_INC(ips_delivered);
415		INP_RUNLOCK(last);
416	} else {
417		if (inetsw[ip_protox[ip->ip_p]].pr_input == rip_input) {
418			IPSTAT_INC(ips_noproto);
419			IPSTAT_DEC(ips_delivered);
420			icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PROTOCOL, 0, 0);
421		} else {
422			m_freem(m);
423		}
424	}
425	return (IPPROTO_DONE);
426}
427
428/*
429 * Generate IP header and pass packet to ip_output.  Tack on options user may
430 * have setup with control call.
431 */
432int
433rip_output(struct mbuf *m, struct socket *so, ...)
434{
435	struct ip *ip;
436	int error;
437	struct inpcb *inp = sotoinpcb(so);
438	va_list ap;
439	u_long dst;
440	int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) |
441	    IP_ALLOWBROADCAST;
442	int cnt, hlen;
443	u_char opttype, optlen, *cp;
444
445	va_start(ap, so);
446	dst = va_arg(ap, u_long);
447	va_end(ap);
448
449	/*
450	 * If the user handed us a complete IP packet, use it.  Otherwise,
451	 * allocate an mbuf for a header and fill it in.
452	 */
453	if ((inp->inp_flags & INP_HDRINCL) == 0) {
454		if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
455			m_freem(m);
456			return(EMSGSIZE);
457		}
458		M_PREPEND(m, sizeof(struct ip), M_NOWAIT);
459		if (m == NULL)
460			return(ENOBUFS);
461
462		INP_RLOCK(inp);
463		ip = mtod(m, struct ip *);
464		ip->ip_tos = inp->inp_ip_tos;
465		if (inp->inp_flags & INP_DONTFRAG)
466			ip->ip_off = htons(IP_DF);
467		else
468			ip->ip_off = htons(0);
469		ip->ip_p = inp->inp_ip_p;
470		ip->ip_len = htons(m->m_pkthdr.len);
471		ip->ip_src = inp->inp_laddr;
472		ip->ip_dst.s_addr = dst;
473		if (jailed(inp->inp_cred)) {
474			/*
475			 * prison_local_ip4() would be good enough but would
476			 * let a source of INADDR_ANY pass, which we do not
477			 * want to see from jails.
478			 */
479			if (ip->ip_src.s_addr == INADDR_ANY) {
480				error = in_pcbladdr(inp, &ip->ip_dst, &ip->ip_src,
481				    inp->inp_cred);
482			} else {
483				error = prison_local_ip4(inp->inp_cred,
484				    &ip->ip_src);
485			}
486			if (error != 0) {
487				INP_RUNLOCK(inp);
488				m_freem(m);
489				return (error);
490			}
491		}
492		ip->ip_ttl = inp->inp_ip_ttl;
493	} else {
494		if (m->m_pkthdr.len > IP_MAXPACKET) {
495			m_freem(m);
496			return(EMSGSIZE);
497		}
498		ip = mtod(m, struct ip *);
499		hlen = ip->ip_hl << 2;
500		if (m->m_len < hlen) {
501			m = m_pullup(m, hlen);
502			if (m == NULL)
503				return (EINVAL);
504			ip = mtod(m, struct ip *);
505		}
506
507		INP_RLOCK(inp);
508		/*
509		 * Don't allow both user specified and setsockopt options,
510		 * and don't allow packet length sizes that will crash.
511		 */
512		if ((hlen < sizeof (*ip))
513		    || ((hlen > sizeof (*ip)) && inp->inp_options)
514		    || (ntohs(ip->ip_len) != m->m_pkthdr.len)) {
515			INP_RUNLOCK(inp);
516			m_freem(m);
517			return (EINVAL);
518		}
519		error = prison_check_ip4(inp->inp_cred, &ip->ip_src);
520		if (error != 0) {
521			INP_RUNLOCK(inp);
522			m_freem(m);
523			return (error);
524		}
525		/*
526		 * Don't allow IP options which do not have the required
527		 * structure as specified in section 3.1 of RFC 791 on
528		 * pages 15-23.
529		 */
530		cp = (u_char *)(ip + 1);
531		cnt = hlen - sizeof (struct ip);
532		for (; cnt > 0; cnt -= optlen, cp += optlen) {
533			opttype = cp[IPOPT_OPTVAL];
534			if (opttype == IPOPT_EOL)
535				break;
536			if (opttype == IPOPT_NOP) {
537				optlen = 1;
538				continue;
539			}
540			if (cnt < IPOPT_OLEN + sizeof(u_char)) {
541				INP_RUNLOCK(inp);
542				m_freem(m);
543				return (EINVAL);
544			}
545			optlen = cp[IPOPT_OLEN];
546			if (optlen < IPOPT_OLEN + sizeof(u_char) ||
547			    optlen > cnt) {
548				INP_RUNLOCK(inp);
549				m_freem(m);
550				return (EINVAL);
551			}
552		}
553		/*
554		 * This doesn't allow application to specify ID of zero,
555		 * but we got this limitation from the beginning of history.
556		 */
557		if (ip->ip_id == 0)
558			ip_fillid(ip);
559
560		/*
561		 * XXX prevent ip_output from overwriting header fields.
562		 */
563		flags |= IP_RAWOUTPUT;
564		IPSTAT_INC(ips_rawout);
565	}
566
567	if (inp->inp_flags & INP_ONESBCAST)
568		flags |= IP_SENDONES;
569
570#ifdef MAC
571	mac_inpcb_create_mbuf(inp, m);
572#endif
573
574	error = ip_output(m, inp->inp_options, NULL, flags,
575	    inp->inp_moptions, inp);
576	INP_RUNLOCK(inp);
577	return (error);
578}
579
580/*
581 * Raw IP socket option processing.
582 *
583 * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could
584 * only be created by a privileged process, and as such, socket option
585 * operations to manage system properties on any raw socket were allowed to
586 * take place without explicit additional access control checks.  However,
587 * raw sockets can now also be created in jail(), and therefore explicit
588 * checks are now required.  Likewise, raw sockets can be used by a process
589 * after it gives up privilege, so some caution is required.  For options
590 * passed down to the IP layer via ip_ctloutput(), checks are assumed to be
591 * performed in ip_ctloutput() and therefore no check occurs here.
592 * Unilaterally checking priv_check() here breaks normal IP socket option
593 * operations on raw sockets.
594 *
595 * When adding new socket options here, make sure to add access control
596 * checks here as necessary.
597 *
598 * XXX-BZ inp locking?
599 */
600int
601rip_ctloutput(struct socket *so, struct sockopt *sopt)
602{
603	struct	inpcb *inp = sotoinpcb(so);
604	int	error, optval;
605
606	if (sopt->sopt_level != IPPROTO_IP) {
607		if ((sopt->sopt_level == SOL_SOCKET) &&
608		    (sopt->sopt_name == SO_SETFIB)) {
609			inp->inp_inc.inc_fibnum = so->so_fibnum;
610			return (0);
611		}
612		return (EINVAL);
613	}
614
615	error = 0;
616	switch (sopt->sopt_dir) {
617	case SOPT_GET:
618		switch (sopt->sopt_name) {
619		case IP_HDRINCL:
620			optval = inp->inp_flags & INP_HDRINCL;
621			error = sooptcopyout(sopt, &optval, sizeof optval);
622			break;
623
624		case IP_FW3:	/* generic ipfw v.3 functions */
625		case IP_FW_ADD:	/* ADD actually returns the body... */
626		case IP_FW_GET:
627		case IP_FW_TABLE_GETSIZE:
628		case IP_FW_TABLE_LIST:
629		case IP_FW_NAT_GET_CONFIG:
630		case IP_FW_NAT_GET_LOG:
631			if (V_ip_fw_ctl_ptr != NULL)
632				error = V_ip_fw_ctl_ptr(sopt);
633			else
634				error = ENOPROTOOPT;
635			break;
636
637		case IP_DUMMYNET3:	/* generic dummynet v.3 functions */
638		case IP_DUMMYNET_GET:
639			if (ip_dn_ctl_ptr != NULL)
640				error = ip_dn_ctl_ptr(sopt);
641			else
642				error = ENOPROTOOPT;
643			break ;
644
645		case MRT_INIT:
646		case MRT_DONE:
647		case MRT_ADD_VIF:
648		case MRT_DEL_VIF:
649		case MRT_ADD_MFC:
650		case MRT_DEL_MFC:
651		case MRT_VERSION:
652		case MRT_ASSERT:
653		case MRT_API_SUPPORT:
654		case MRT_API_CONFIG:
655		case MRT_ADD_BW_UPCALL:
656		case MRT_DEL_BW_UPCALL:
657			error = priv_check(curthread, PRIV_NETINET_MROUTE);
658			if (error != 0)
659				return (error);
660			error = ip_mrouter_get ? ip_mrouter_get(so, sopt) :
661				EOPNOTSUPP;
662			break;
663
664		default:
665			error = ip_ctloutput(so, sopt);
666			break;
667		}
668		break;
669
670	case SOPT_SET:
671		switch (sopt->sopt_name) {
672		case IP_HDRINCL:
673			error = sooptcopyin(sopt, &optval, sizeof optval,
674					    sizeof optval);
675			if (error)
676				break;
677			if (optval)
678				inp->inp_flags |= INP_HDRINCL;
679			else
680				inp->inp_flags &= ~INP_HDRINCL;
681			break;
682
683		case IP_FW3:	/* generic ipfw v.3 functions */
684		case IP_FW_ADD:
685		case IP_FW_DEL:
686		case IP_FW_FLUSH:
687		case IP_FW_ZERO:
688		case IP_FW_RESETLOG:
689		case IP_FW_TABLE_ADD:
690		case IP_FW_TABLE_DEL:
691		case IP_FW_TABLE_FLUSH:
692		case IP_FW_NAT_CFG:
693		case IP_FW_NAT_DEL:
694			if (V_ip_fw_ctl_ptr != NULL)
695				error = V_ip_fw_ctl_ptr(sopt);
696			else
697				error = ENOPROTOOPT;
698			break;
699
700		case IP_DUMMYNET3:	/* generic dummynet v.3 functions */
701		case IP_DUMMYNET_CONFIGURE:
702		case IP_DUMMYNET_DEL:
703		case IP_DUMMYNET_FLUSH:
704			if (ip_dn_ctl_ptr != NULL)
705				error = ip_dn_ctl_ptr(sopt);
706			else
707				error = ENOPROTOOPT ;
708			break ;
709
710		case IP_RSVP_ON:
711			error = priv_check(curthread, PRIV_NETINET_MROUTE);
712			if (error != 0)
713				return (error);
714			error = ip_rsvp_init(so);
715			break;
716
717		case IP_RSVP_OFF:
718			error = priv_check(curthread, PRIV_NETINET_MROUTE);
719			if (error != 0)
720				return (error);
721			error = ip_rsvp_done();
722			break;
723
724		case IP_RSVP_VIF_ON:
725		case IP_RSVP_VIF_OFF:
726			error = priv_check(curthread, PRIV_NETINET_MROUTE);
727			if (error != 0)
728				return (error);
729			error = ip_rsvp_vif ?
730				ip_rsvp_vif(so, sopt) : EINVAL;
731			break;
732
733		case MRT_INIT:
734		case MRT_DONE:
735		case MRT_ADD_VIF:
736		case MRT_DEL_VIF:
737		case MRT_ADD_MFC:
738		case MRT_DEL_MFC:
739		case MRT_VERSION:
740		case MRT_ASSERT:
741		case MRT_API_SUPPORT:
742		case MRT_API_CONFIG:
743		case MRT_ADD_BW_UPCALL:
744		case MRT_DEL_BW_UPCALL:
745			error = priv_check(curthread, PRIV_NETINET_MROUTE);
746			if (error != 0)
747				return (error);
748			error = ip_mrouter_set ? ip_mrouter_set(so, sopt) :
749					EOPNOTSUPP;
750			break;
751
752		default:
753			error = ip_ctloutput(so, sopt);
754			break;
755		}
756		break;
757	}
758
759	return (error);
760}
761
762/*
763 * This function exists solely to receive the PRC_IFDOWN messages which are
764 * sent by if_down().  It looks for an ifaddr whose ifa_addr is sa, and calls
765 * in_ifadown() to remove all routes corresponding to that address.  It also
766 * receives the PRC_IFUP messages from if_up() and reinstalls the interface
767 * routes.
768 */
769void
770rip_ctlinput(int cmd, struct sockaddr *sa, void *vip)
771{
772	struct rm_priotracker in_ifa_tracker;
773	struct in_ifaddr *ia;
774	struct ifnet *ifp;
775	int err;
776	int flags;
777
778	switch (cmd) {
779	case PRC_IFDOWN:
780		IN_IFADDR_RLOCK(&in_ifa_tracker);
781		TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
782			if (ia->ia_ifa.ifa_addr == sa
783			    && (ia->ia_flags & IFA_ROUTE)) {
784				ifa_ref(&ia->ia_ifa);
785				IN_IFADDR_RUNLOCK(&in_ifa_tracker);
786				/*
787				 * in_scrubprefix() kills the interface route.
788				 */
789				in_scrubprefix(ia, 0);
790				/*
791				 * in_ifadown gets rid of all the rest of the
792				 * routes.  This is not quite the right thing
793				 * to do, but at least if we are running a
794				 * routing process they will come back.
795				 */
796				in_ifadown(&ia->ia_ifa, 0);
797				ifa_free(&ia->ia_ifa);
798				break;
799			}
800		}
801		if (ia == NULL)		/* If ia matched, already unlocked. */
802			IN_IFADDR_RUNLOCK(&in_ifa_tracker);
803		break;
804
805	case PRC_IFUP:
806		IN_IFADDR_RLOCK(&in_ifa_tracker);
807		TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
808			if (ia->ia_ifa.ifa_addr == sa)
809				break;
810		}
811		if (ia == NULL || (ia->ia_flags & IFA_ROUTE)) {
812			IN_IFADDR_RUNLOCK(&in_ifa_tracker);
813			return;
814		}
815		ifa_ref(&ia->ia_ifa);
816		IN_IFADDR_RUNLOCK(&in_ifa_tracker);
817		flags = RTF_UP;
818		ifp = ia->ia_ifa.ifa_ifp;
819
820		if ((ifp->if_flags & IFF_LOOPBACK)
821		    || (ifp->if_flags & IFF_POINTOPOINT))
822			flags |= RTF_HOST;
823
824		err = ifa_del_loopback_route((struct ifaddr *)ia, sa);
825
826		err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
827		if (err == 0)
828			ia->ia_flags |= IFA_ROUTE;
829
830		err = ifa_add_loopback_route((struct ifaddr *)ia, sa);
831
832		ifa_free(&ia->ia_ifa);
833		break;
834	}
835}
836
837static int
838rip_attach(struct socket *so, int proto, struct thread *td)
839{
840	struct inpcb *inp;
841	int error;
842
843	inp = sotoinpcb(so);
844	KASSERT(inp == NULL, ("rip_attach: inp != NULL"));
845
846	error = priv_check(td, PRIV_NETINET_RAW);
847	if (error)
848		return (error);
849	if (proto >= IPPROTO_MAX || proto < 0)
850		return EPROTONOSUPPORT;
851	error = soreserve(so, rip_sendspace, rip_recvspace);
852	if (error)
853		return (error);
854	INP_INFO_WLOCK(&V_ripcbinfo);
855	error = in_pcballoc(so, &V_ripcbinfo);
856	if (error) {
857		INP_INFO_WUNLOCK(&V_ripcbinfo);
858		return (error);
859	}
860	inp = (struct inpcb *)so->so_pcb;
861	inp->inp_vflag |= INP_IPV4;
862	inp->inp_ip_p = proto;
863	inp->inp_ip_ttl = V_ip_defttl;
864	rip_inshash(inp);
865	INP_INFO_WUNLOCK(&V_ripcbinfo);
866	INP_WUNLOCK(inp);
867	return (0);
868}
869
870static void
871rip_detach(struct socket *so)
872{
873	struct inpcb *inp;
874
875	inp = sotoinpcb(so);
876	KASSERT(inp != NULL, ("rip_detach: inp == NULL"));
877	KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
878	    ("rip_detach: not closed"));
879
880	INP_INFO_WLOCK(&V_ripcbinfo);
881	INP_WLOCK(inp);
882	rip_delhash(inp);
883	if (so == V_ip_mrouter && ip_mrouter_done)
884		ip_mrouter_done();
885	if (ip_rsvp_force_done)
886		ip_rsvp_force_done(so);
887	if (so == V_ip_rsvpd)
888		ip_rsvp_done();
889	in_pcbdetach(inp);
890	in_pcbfree(inp);
891	INP_INFO_WUNLOCK(&V_ripcbinfo);
892}
893
894static void
895rip_dodisconnect(struct socket *so, struct inpcb *inp)
896{
897	struct inpcbinfo *pcbinfo;
898
899	pcbinfo = inp->inp_pcbinfo;
900	INP_INFO_WLOCK(pcbinfo);
901	INP_WLOCK(inp);
902	rip_delhash(inp);
903	inp->inp_faddr.s_addr = INADDR_ANY;
904	rip_inshash(inp);
905	SOCK_LOCK(so);
906	so->so_state &= ~SS_ISCONNECTED;
907	SOCK_UNLOCK(so);
908	INP_WUNLOCK(inp);
909	INP_INFO_WUNLOCK(pcbinfo);
910}
911
912static void
913rip_abort(struct socket *so)
914{
915	struct inpcb *inp;
916
917	inp = sotoinpcb(so);
918	KASSERT(inp != NULL, ("rip_abort: inp == NULL"));
919
920	rip_dodisconnect(so, inp);
921}
922
923static void
924rip_close(struct socket *so)
925{
926	struct inpcb *inp;
927
928	inp = sotoinpcb(so);
929	KASSERT(inp != NULL, ("rip_close: inp == NULL"));
930
931	rip_dodisconnect(so, inp);
932}
933
934static int
935rip_disconnect(struct socket *so)
936{
937	struct inpcb *inp;
938
939	if ((so->so_state & SS_ISCONNECTED) == 0)
940		return (ENOTCONN);
941
942	inp = sotoinpcb(so);
943	KASSERT(inp != NULL, ("rip_disconnect: inp == NULL"));
944
945	rip_dodisconnect(so, inp);
946	return (0);
947}
948
949static int
950rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
951{
952	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
953	struct inpcb *inp;
954	int error;
955
956	if (nam->sa_len != sizeof(*addr))
957		return (EINVAL);
958
959	error = prison_check_ip4(td->td_ucred, &addr->sin_addr);
960	if (error != 0)
961		return (error);
962
963	inp = sotoinpcb(so);
964	KASSERT(inp != NULL, ("rip_bind: inp == NULL"));
965
966	if (TAILQ_EMPTY(&V_ifnet) ||
967	    (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) ||
968	    (addr->sin_addr.s_addr &&
969	     (inp->inp_flags & INP_BINDANY) == 0 &&
970	     ifa_ifwithaddr_check((struct sockaddr *)addr) == 0))
971		return (EADDRNOTAVAIL);
972
973	INP_INFO_WLOCK(&V_ripcbinfo);
974	INP_WLOCK(inp);
975	rip_delhash(inp);
976	inp->inp_laddr = addr->sin_addr;
977	rip_inshash(inp);
978	INP_WUNLOCK(inp);
979	INP_INFO_WUNLOCK(&V_ripcbinfo);
980	return (0);
981}
982
983static int
984rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
985{
986	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
987	struct inpcb *inp;
988
989	if (nam->sa_len != sizeof(*addr))
990		return (EINVAL);
991	if (TAILQ_EMPTY(&V_ifnet))
992		return (EADDRNOTAVAIL);
993	if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK)
994		return (EAFNOSUPPORT);
995
996	inp = sotoinpcb(so);
997	KASSERT(inp != NULL, ("rip_connect: inp == NULL"));
998
999	INP_INFO_WLOCK(&V_ripcbinfo);
1000	INP_WLOCK(inp);
1001	rip_delhash(inp);
1002	inp->inp_faddr = addr->sin_addr;
1003	rip_inshash(inp);
1004	soisconnected(so);
1005	INP_WUNLOCK(inp);
1006	INP_INFO_WUNLOCK(&V_ripcbinfo);
1007	return (0);
1008}
1009
1010static int
1011rip_shutdown(struct socket *so)
1012{
1013	struct inpcb *inp;
1014
1015	inp = sotoinpcb(so);
1016	KASSERT(inp != NULL, ("rip_shutdown: inp == NULL"));
1017
1018	INP_WLOCK(inp);
1019	socantsendmore(so);
1020	INP_WUNLOCK(inp);
1021	return (0);
1022}
1023
1024static int
1025rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
1026    struct mbuf *control, struct thread *td)
1027{
1028	struct inpcb *inp;
1029	u_long dst;
1030
1031	inp = sotoinpcb(so);
1032	KASSERT(inp != NULL, ("rip_send: inp == NULL"));
1033
1034	/*
1035	 * Note: 'dst' reads below are unlocked.
1036	 */
1037	if (so->so_state & SS_ISCONNECTED) {
1038		if (nam) {
1039			m_freem(m);
1040			return (EISCONN);
1041		}
1042		dst = inp->inp_faddr.s_addr;	/* Unlocked read. */
1043	} else {
1044		if (nam == NULL) {
1045			m_freem(m);
1046			return (ENOTCONN);
1047		}
1048		dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
1049	}
1050	return (rip_output(m, so, dst));
1051}
1052#endif /* INET */
1053
1054static int
1055rip_pcblist(SYSCTL_HANDLER_ARGS)
1056{
1057	int error, i, n;
1058	struct inpcb *inp, **inp_list;
1059	inp_gen_t gencnt;
1060	struct xinpgen xig;
1061
1062	/*
1063	 * The process of preparing the TCB list is too time-consuming and
1064	 * resource-intensive to repeat twice on every request.
1065	 */
1066	if (req->oldptr == 0) {
1067		n = V_ripcbinfo.ipi_count;
1068		n += imax(n / 8, 10);
1069		req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
1070		return (0);
1071	}
1072
1073	if (req->newptr != 0)
1074		return (EPERM);
1075
1076	/*
1077	 * OK, now we're committed to doing something.
1078	 */
1079	INP_INFO_RLOCK(&V_ripcbinfo);
1080	gencnt = V_ripcbinfo.ipi_gencnt;
1081	n = V_ripcbinfo.ipi_count;
1082	INP_INFO_RUNLOCK(&V_ripcbinfo);
1083
1084	bzero(&xig, sizeof(xig));
1085	xig.xig_len = sizeof xig;
1086	xig.xig_count = n;
1087	xig.xig_gen = gencnt;
1088	xig.xig_sogen = so_gencnt;
1089	error = SYSCTL_OUT(req, &xig, sizeof xig);
1090	if (error)
1091		return (error);
1092
1093	inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1094
1095	INP_INFO_RLOCK(&V_ripcbinfo);
1096	for (inp = LIST_FIRST(V_ripcbinfo.ipi_listhead), i = 0; inp && i < n;
1097	     inp = LIST_NEXT(inp, inp_list)) {
1098		INP_WLOCK(inp);
1099		if (inp->inp_gencnt <= gencnt &&
1100		    cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
1101			in_pcbref(inp);
1102			inp_list[i++] = inp;
1103		}
1104		INP_WUNLOCK(inp);
1105	}
1106	INP_INFO_RUNLOCK(&V_ripcbinfo);
1107	n = i;
1108
1109	error = 0;
1110	for (i = 0; i < n; i++) {
1111		inp = inp_list[i];
1112		INP_RLOCK(inp);
1113		if (inp->inp_gencnt <= gencnt) {
1114			struct xinpcb xi;
1115
1116			bzero(&xi, sizeof(xi));
1117			xi.xi_len = sizeof xi;
1118			/* XXX should avoid extra copy */
1119			bcopy(inp, &xi.xi_inp, sizeof *inp);
1120			if (inp->inp_socket)
1121				sotoxsocket(inp->inp_socket, &xi.xi_socket);
1122			INP_RUNLOCK(inp);
1123			error = SYSCTL_OUT(req, &xi, sizeof xi);
1124		} else
1125			INP_RUNLOCK(inp);
1126	}
1127	INP_INFO_WLOCK(&V_ripcbinfo);
1128	for (i = 0; i < n; i++) {
1129		inp = inp_list[i];
1130		INP_RLOCK(inp);
1131		if (!in_pcbrele_rlocked(inp))
1132			INP_RUNLOCK(inp);
1133	}
1134	INP_INFO_WUNLOCK(&V_ripcbinfo);
1135
1136	if (!error) {
1137		/*
1138		 * Give the user an updated idea of our state.  If the
1139		 * generation differs from what we told her before, she knows
1140		 * that something happened while we were processing this
1141		 * request, and it might be necessary to retry.
1142		 */
1143		INP_INFO_RLOCK(&V_ripcbinfo);
1144		xig.xig_gen = V_ripcbinfo.ipi_gencnt;
1145		xig.xig_sogen = so_gencnt;
1146		xig.xig_count = V_ripcbinfo.ipi_count;
1147		INP_INFO_RUNLOCK(&V_ripcbinfo);
1148		error = SYSCTL_OUT(req, &xig, sizeof xig);
1149	}
1150	free(inp_list, M_TEMP);
1151	return (error);
1152}
1153
1154SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist,
1155    CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
1156    rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
1157
1158#ifdef INET
1159struct pr_usrreqs rip_usrreqs = {
1160	.pru_abort =		rip_abort,
1161	.pru_attach =		rip_attach,
1162	.pru_bind =		rip_bind,
1163	.pru_connect =		rip_connect,
1164	.pru_control =		in_control,
1165	.pru_detach =		rip_detach,
1166	.pru_disconnect =	rip_disconnect,
1167	.pru_peeraddr =		in_getpeeraddr,
1168	.pru_send =		rip_send,
1169	.pru_shutdown =		rip_shutdown,
1170	.pru_sockaddr =		in_getsockaddr,
1171	.pru_sosetlabel =	in_pcbsosetlabel,
1172	.pru_close =		rip_close,
1173};
1174#endif /* INET */
1175