ip_mroute.c revision 111888
1/*
2 * IP multicast forwarding procedures
3 *
4 * Written by David Waitzman, BBN Labs, August 1988.
5 * Modified by Steve Deering, Stanford, February 1989.
6 * Modified by Mark J. Steiglitz, Stanford, May, 1991
7 * Modified by Van Jacobson, LBL, January 1993
8 * Modified by Ajit Thyagarajan, PARC, August 1993
9 * Modified by Bill Fenner, PARC, April 1995
10 *
11 * MROUTING Revision: 3.5
12 * $FreeBSD: head/sys/netinet/ip_mroute.c 111888 2003-03-04 23:19:55Z jlemon $
13 */
14
15#include "opt_mac.h"
16#include "opt_mrouting.h"
17#include "opt_random_ip_id.h"
18
19#include <sys/param.h>
20#include <sys/kernel.h>
21#include <sys/lock.h>
22#include <sys/mac.h>
23#include <sys/malloc.h>
24#include <sys/mbuf.h>
25#include <sys/protosw.h>
26#include <sys/signalvar.h>
27#include <sys/socket.h>
28#include <sys/socketvar.h>
29#include <sys/sockio.h>
30#include <sys/sx.h>
31#include <sys/sysctl.h>
32#include <sys/syslog.h>
33#include <sys/systm.h>
34#include <sys/time.h>
35#include <net/if.h>
36#include <net/netisr.h>
37#include <net/route.h>
38#include <netinet/in.h>
39#include <netinet/igmp.h>
40#include <netinet/in_systm.h>
41#include <netinet/in_var.h>
42#include <netinet/ip.h>
43#include <netinet/ip_encap.h>
44#include <netinet/ip_mroute.h>
45#include <netinet/ip_var.h>
46#include <netinet/udp.h>
47#include <machine/in_cksum.h>
48
49/*
50 * Control debugging code for rsvp and multicast routing code.
51 * Can only set them with the debugger.
52 */
53static u_int    rsvpdebug;		/* non-zero enables debugging	*/
54
55static u_int	mrtdebug;		/* any set of the flags below	*/
56#define		DEBUG_MFC	0x02
57#define		DEBUG_FORWARD	0x04
58#define		DEBUG_EXPIRE	0x08
59#define		DEBUG_XMIT	0x10
60
61#define M_HASCL(m)	((m)->m_flags & M_EXT)
62
63static MALLOC_DEFINE(M_MRTABLE, "mroutetbl", "multicast routing tables");
64
65static struct mrtstat	mrtstat;
66SYSCTL_STRUCT(_net_inet_ip, OID_AUTO, mrtstat, CTLFLAG_RW,
67    &mrtstat, mrtstat,
68    "Multicast Routing Statistics (struct mrtstat, netinet/ip_mroute.h)");
69
70static struct mfc	*mfctable[MFCTBLSIZ];
71static u_char		nexpire[MFCTBLSIZ];
72static struct vif	viftable[MAXVIFS];
73
74static struct callout_handle expire_upcalls_ch;
75
76#define		EXPIRE_TIMEOUT	(hz / 4)	/* 4x / second		*/
77#define		UPCALL_EXPIRE	6		/* number of timeouts	*/
78
79/*
80 * Define the token bucket filter structures
81 * tbftable -> each vif has one of these for storing info
82 */
83
84static struct tbf tbftable[MAXVIFS];
85#define		TBF_REPROCESS	(hz / 100)	/* 100x / second */
86
87/*
88 * 'Interfaces' associated with decapsulator (so we can tell
89 * packets that went through it from ones that get reflected
90 * by a broken gateway).  These interfaces are never linked into
91 * the system ifnet list & no routes point to them.  I.e., packets
92 * can't be sent this way.  They only exist as a placeholder for
93 * multicast source verification.
94 */
95static struct ifnet multicast_decap_if[MAXVIFS];
96
97#define ENCAP_TTL 64
98#define ENCAP_PROTO IPPROTO_IPIP	/* 4 */
99
100/* prototype IP hdr for encapsulated packets */
101static struct ip multicast_encap_iphdr = {
102#if BYTE_ORDER == LITTLE_ENDIAN
103	sizeof(struct ip) >> 2, IPVERSION,
104#else
105	IPVERSION, sizeof(struct ip) >> 2,
106#endif
107	0,				/* tos */
108	sizeof(struct ip),		/* total length */
109	0,				/* id */
110	0,				/* frag offset */
111	ENCAP_TTL, ENCAP_PROTO,
112	0,				/* checksum */
113};
114
115/*
116 * Private variables.
117 */
118static vifi_t	   numvifs;
119static const struct encaptab *encap_cookie;
120
121/*
122 * one-back cache used by mroute_encapcheck to locate a tunnel's vif
123 * given a datagram's src ip address.
124 */
125static u_long last_encap_src;
126static struct vif *last_encap_vif;
127
128static u_long	X_ip_mcast_src(int vifi);
129static int	X_ip_mforward(struct ip *ip, struct ifnet *ifp,
130			struct mbuf *m, struct ip_moptions *imo);
131static int	X_ip_mrouter_done(void);
132static int	X_ip_mrouter_get(struct socket *so, struct sockopt *m);
133static int	X_ip_mrouter_set(struct socket *so, struct sockopt *m);
134static int	X_legal_vif_num(int vif);
135static int	X_mrt_ioctl(int cmd, caddr_t data);
136
137static int get_sg_cnt(struct sioc_sg_req *);
138static int get_vif_cnt(struct sioc_vif_req *);
139static int ip_mrouter_init(struct socket *, int);
140static int add_vif(struct vifctl *);
141static int del_vif(vifi_t);
142static int add_mfc(struct mfcctl *);
143static int del_mfc(struct mfcctl *);
144static int socket_send(struct socket *, struct mbuf *, struct sockaddr_in *);
145static int set_assert(int);
146static void expire_upcalls(void *);
147static int ip_mdq(struct mbuf *, struct ifnet *, struct mfc *, vifi_t);
148static void phyint_send(struct ip *, struct vif *, struct mbuf *);
149static void encap_send(struct ip *, struct vif *, struct mbuf *);
150static void tbf_control(struct vif *, struct mbuf *, struct ip *, u_long);
151static void tbf_queue(struct vif *, struct mbuf *);
152static void tbf_process_q(struct vif *);
153static void tbf_reprocess_q(void *);
154static int tbf_dq_sel(struct vif *, struct ip *);
155static void tbf_send_packet(struct vif *, struct mbuf *);
156static void tbf_update_tokens(struct vif *);
157static int priority(struct vif *, struct ip *);
158
159/*
160 * whether or not special PIM assert processing is enabled.
161 */
162static int pim_assert;
163/*
164 * Rate limit for assert notification messages, in usec
165 */
166#define ASSERT_MSG_TIME		3000000
167
168/*
169 * Hash function for a source, group entry
170 */
171#define MFCHASH(a, g) MFCHASHMOD(((a) >> 20) ^ ((a) >> 10) ^ (a) ^ \
172			((g) >> 20) ^ ((g) >> 10) ^ (g))
173
174/*
175 * Find a route for a given origin IP address and Multicast group address
176 * Type of service parameter to be added in the future!!!
177 * Statistics are updated by the caller if needed
178 * (mrtstat.mrts_mfc_lookups and mrtstat.mrts_mfc_misses)
179 */
180static struct mfc *
181mfc_find(in_addr_t o, in_addr_t g)
182{
183    struct mfc *rt;
184
185    for (rt = mfctable[MFCHASH(o,g)]; rt; rt = rt->mfc_next)
186	if ((rt->mfc_origin.s_addr == o) &&
187		(rt->mfc_mcastgrp.s_addr == g) && (rt->mfc_stall == NULL))
188	    break;
189    return rt;
190}
191
192/*
193 * Macros to compute elapsed time efficiently
194 * Borrowed from Van Jacobson's scheduling code
195 */
196#define TV_DELTA(a, b, delta) {					\
197	int xxs;						\
198	delta = (a).tv_usec - (b).tv_usec;			\
199	if ((xxs = (a).tv_sec - (b).tv_sec)) {			\
200		switch (xxs) {					\
201		case 2:						\
202		      delta += 1000000;				\
203		      /* FALLTHROUGH */				\
204		case 1:						\
205		      delta += 1000000;				\
206		      break;					\
207		default:					\
208		      delta += (1000000 * xxs);			\
209		}						\
210	}							\
211}
212
213#define TV_LT(a, b) (((a).tv_usec < (b).tv_usec && \
214	      (a).tv_sec <= (b).tv_sec) || (a).tv_sec < (b).tv_sec)
215
216/*
217 * Handle MRT setsockopt commands to modify the multicast routing tables.
218 */
219static int
220X_ip_mrouter_set(struct socket *so, struct sockopt *sopt)
221{
222    int	error, optval;
223    vifi_t	vifi;
224    struct	vifctl vifc;
225    struct	mfcctl mfc;
226
227    if (so != ip_mrouter && sopt->sopt_name != MRT_INIT)
228	return EPERM;
229
230    error = 0;
231    switch (sopt->sopt_name) {
232    case MRT_INIT:
233	error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval);
234	if (error)
235	    break;
236	error = ip_mrouter_init(so, optval);
237	break;
238
239    case MRT_DONE:
240	error = ip_mrouter_done();
241	break;
242
243    case MRT_ADD_VIF:
244	error = sooptcopyin(sopt, &vifc, sizeof vifc, sizeof vifc);
245	if (error)
246	    break;
247	error = add_vif(&vifc);
248	break;
249
250    case MRT_DEL_VIF:
251	error = sooptcopyin(sopt, &vifi, sizeof vifi, sizeof vifi);
252	if (error)
253	    break;
254	error = del_vif(vifi);
255	break;
256
257    case MRT_ADD_MFC:
258    case MRT_DEL_MFC:
259	error = sooptcopyin(sopt, &mfc, sizeof mfc, sizeof mfc);
260	if (error)
261	    break;
262	if (sopt->sopt_name == MRT_ADD_MFC)
263	    error = add_mfc(&mfc);
264	else
265	    error = del_mfc(&mfc);
266	break;
267
268    case MRT_ASSERT:
269	error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval);
270	if (error)
271	    break;
272	set_assert(optval);
273	break;
274
275    default:
276	error = EOPNOTSUPP;
277	break;
278    }
279    return error;
280}
281
282/*
283 * Handle MRT getsockopt commands
284 */
285static int
286X_ip_mrouter_get(struct socket *so, struct sockopt *sopt)
287{
288    int error;
289    static int version = 0x0305; /* !!! why is this here? XXX */
290
291    switch (sopt->sopt_name) {
292    case MRT_VERSION:
293	error = sooptcopyout(sopt, &version, sizeof version);
294	break;
295
296    case MRT_ASSERT:
297	error = sooptcopyout(sopt, &pim_assert, sizeof pim_assert);
298	break;
299
300    default:
301	error = EOPNOTSUPP;
302	break;
303    }
304    return error;
305}
306
307/*
308 * Handle ioctl commands to obtain information from the cache
309 */
310static int
311X_mrt_ioctl(int cmd, caddr_t data)
312{
313    int error = 0;
314
315    switch (cmd) {
316    case (SIOCGETVIFCNT):
317	error = get_vif_cnt((struct sioc_vif_req *)data);
318	break;
319
320    case (SIOCGETSGCNT):
321	error = get_sg_cnt((struct sioc_sg_req *)data);
322	break;
323
324    default:
325	error = EINVAL;
326	break;
327    }
328    return error;
329}
330
331/*
332 * returns the packet, byte, rpf-failure count for the source group provided
333 */
334static int
335get_sg_cnt(struct sioc_sg_req *req)
336{
337    int s;
338    struct mfc *rt;
339
340    s = splnet();
341    rt = mfc_find(req->src.s_addr, req->grp.s_addr);
342    splx(s);
343    if (rt == NULL) {
344	req->pktcnt = req->bytecnt = req->wrong_if = 0xffffffff;
345	return EADDRNOTAVAIL;
346    }
347    req->pktcnt = rt->mfc_pkt_cnt;
348    req->bytecnt = rt->mfc_byte_cnt;
349    req->wrong_if = rt->mfc_wrong_if;
350    return 0;
351}
352
353/*
354 * returns the input and output packet and byte counts on the vif provided
355 */
356static int
357get_vif_cnt(struct sioc_vif_req *req)
358{
359    vifi_t vifi = req->vifi;
360
361    if (vifi >= numvifs)
362	return EINVAL;
363
364    req->icount = viftable[vifi].v_pkt_in;
365    req->ocount = viftable[vifi].v_pkt_out;
366    req->ibytes = viftable[vifi].v_bytes_in;
367    req->obytes = viftable[vifi].v_bytes_out;
368
369    return 0;
370}
371
372/*
373 * Enable multicast routing
374 */
375static int
376ip_mrouter_init(struct socket *so, int version)
377{
378    if (mrtdebug)
379	log(LOG_DEBUG, "ip_mrouter_init: so_type = %d, pr_protocol = %d\n",
380	    so->so_type, so->so_proto->pr_protocol);
381
382    if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_IGMP)
383	return EOPNOTSUPP;
384
385    if (version != 1)
386	return ENOPROTOOPT;
387
388    if (ip_mrouter != NULL)
389	return EADDRINUSE;
390
391    ip_mrouter = so;
392
393    bzero((caddr_t)mfctable, sizeof(mfctable));
394    bzero((caddr_t)nexpire, sizeof(nexpire));
395
396    pim_assert = 0;
397
398    expire_upcalls_ch = timeout(expire_upcalls, NULL, EXPIRE_TIMEOUT);
399
400    if (mrtdebug)
401	log(LOG_DEBUG, "ip_mrouter_init\n");
402
403    return 0;
404}
405
406/*
407 * Disable multicast routing
408 */
409static int
410X_ip_mrouter_done(void)
411{
412    vifi_t vifi;
413    int i;
414    struct ifnet *ifp;
415    struct ifreq ifr;
416    struct mfc *rt;
417    struct rtdetq *rte;
418    int s;
419
420    s = splnet();
421
422    /*
423     * For each phyint in use, disable promiscuous reception of all IP
424     * multicasts.
425     */
426    for (vifi = 0; vifi < numvifs; vifi++) {
427	if (viftable[vifi].v_lcl_addr.s_addr != 0 &&
428		!(viftable[vifi].v_flags & VIFF_TUNNEL)) {
429	    struct sockaddr_in *so = (struct sockaddr_in *)&(ifr.ifr_addr);
430
431	    so->sin_len = sizeof(struct sockaddr_in);
432	    so->sin_family = AF_INET;
433	    so->sin_addr.s_addr = INADDR_ANY;
434	    ifp = viftable[vifi].v_ifp;
435	    if_allmulti(ifp, 0);
436	}
437    }
438    bzero((caddr_t)tbftable, sizeof(tbftable));
439    bzero((caddr_t)viftable, sizeof(viftable));
440    numvifs = 0;
441    pim_assert = 0;
442
443    untimeout(expire_upcalls, NULL, expire_upcalls_ch);
444
445    /*
446     * Free all multicast forwarding cache entries.
447     */
448    for (i = 0; i < MFCTBLSIZ; i++) {
449	for (rt = mfctable[i]; rt != NULL; ) {
450	    struct mfc *nr = rt->mfc_next;
451
452	    for (rte = rt->mfc_stall; rte != NULL; ) {
453		struct rtdetq *n = rte->next;
454
455		m_freem(rte->m);
456		free(rte, M_MRTABLE);
457		rte = n;
458	    }
459	    free(rt, M_MRTABLE);
460	    rt = nr;
461	}
462    }
463
464    bzero((caddr_t)mfctable, sizeof(mfctable));
465
466    /*
467     * Reset de-encapsulation cache
468     */
469    last_encap_src = INADDR_ANY;
470    last_encap_vif = NULL;
471    if (encap_cookie) {
472	encap_detach(encap_cookie);
473	encap_cookie = NULL;
474    }
475
476    ip_mrouter = NULL;
477
478    splx(s);
479
480    if (mrtdebug)
481	log(LOG_DEBUG, "ip_mrouter_done\n");
482
483    return 0;
484}
485
486/*
487 * Set PIM assert processing global
488 */
489static int
490set_assert(int i)
491{
492    if ((i != 1) && (i != 0))
493	return EINVAL;
494
495    pim_assert = i;
496
497    return 0;
498}
499
500/*
501 * Decide if a packet is from a tunnelled peer.
502 * Return 0 if not, 64 if so.  XXX yuck.. 64 ???
503 */
504static int
505mroute_encapcheck(const struct mbuf *m, int off, int proto, void *arg)
506{
507    struct ip *ip = mtod(m, struct ip *);
508    int hlen = ip->ip_hl << 2;
509
510    /*
511     * don't claim the packet if it's not to a multicast destination or if
512     * we don't have an encapsulating tunnel with the source.
513     * Note:  This code assumes that the remote site IP address
514     * uniquely identifies the tunnel (i.e., that this site has
515     * at most one tunnel with the remote site).
516     */
517    if (!IN_MULTICAST(ntohl(((struct ip *)((char *)ip+hlen))->ip_dst.s_addr)))
518	return 0;
519    if (ip->ip_src.s_addr != last_encap_src) {
520	struct vif *vifp = viftable;
521	struct vif *vife = vifp + numvifs;
522
523	last_encap_src = ip->ip_src.s_addr;
524	last_encap_vif = NULL;
525	for ( ; vifp < vife; ++vifp)
526	    if (vifp->v_rmt_addr.s_addr == ip->ip_src.s_addr) {
527		if ((vifp->v_flags & (VIFF_TUNNEL|VIFF_SRCRT)) == VIFF_TUNNEL)
528		    last_encap_vif = vifp;
529		break;
530	    }
531    }
532    if (last_encap_vif == NULL) {
533	last_encap_src = INADDR_ANY;
534	return 0;
535    }
536    return 64;
537}
538
539/*
540 * De-encapsulate a packet and feed it back through ip input (this
541 * routine is called whenever IP gets a packet that mroute_encap_func()
542 * claimed).
543 */
544static void
545mroute_encap_input(struct mbuf *m, int off)
546{
547    struct ip *ip = mtod(m, struct ip *);
548    int hlen = ip->ip_hl << 2;
549
550    if (hlen > sizeof(struct ip))
551	ip_stripoptions(m, (struct mbuf *) 0);
552    m->m_data += sizeof(struct ip);
553    m->m_len -= sizeof(struct ip);
554    m->m_pkthdr.len -= sizeof(struct ip);
555
556    m->m_pkthdr.rcvif = last_encap_vif->v_ifp;
557
558    netisr_queue(NETISR_IP, m);
559    /*
560     * normally we would need a "schednetisr(NETISR_IP)"
561     * here but we were called by ip_input and it is going
562     * to loop back & try to dequeue the packet we just
563     * queued as soon as we return so we avoid the
564     * unnecessary software interrrupt.
565     *
566     * XXX
567     * This no longer holds - we may have direct-dispatched the packet,
568     * or there may be a queue processing limit.
569     */
570}
571
572extern struct domain inetdomain;
573static struct protosw mroute_encap_protosw =
574{ SOCK_RAW,	&inetdomain,	IPPROTO_IPV4,	PR_ATOMIC|PR_ADDR,
575  mroute_encap_input,	0,	0,		rip_ctloutput,
576  0,
577  0,		0,		0,		0,
578  &rip_usrreqs
579};
580
581/*
582 * Add a vif to the vif table
583 */
584static int
585add_vif(struct vifctl *vifcp)
586{
587    struct vif *vifp = viftable + vifcp->vifc_vifi;
588    struct sockaddr_in sin = {sizeof sin, AF_INET};
589    struct ifaddr *ifa;
590    struct ifnet *ifp;
591    int error, s;
592    struct tbf *v_tbf = tbftable + vifcp->vifc_vifi;
593
594    if (vifcp->vifc_vifi >= MAXVIFS)
595	return EINVAL;
596    if (vifp->v_lcl_addr.s_addr != INADDR_ANY)
597	return EADDRINUSE;
598    if (vifcp->vifc_lcl_addr.s_addr == INADDR_ANY)
599	return EADDRNOTAVAIL;
600
601    /* Find the interface with an address in AF_INET family */
602    sin.sin_addr = vifcp->vifc_lcl_addr;
603    ifa = ifa_ifwithaddr((struct sockaddr *)&sin);
604    if (ifa == NULL)
605	return EADDRNOTAVAIL;
606    ifp = ifa->ifa_ifp;
607
608    if (vifcp->vifc_flags & VIFF_TUNNEL) {
609	if ((vifcp->vifc_flags & VIFF_SRCRT) == 0) {
610	    /*
611	     * An encapsulating tunnel is wanted.  Tell
612	     * mroute_encap_input() to start paying attention
613	     * to encapsulated packets.
614	     */
615	    if (encap_cookie == NULL) {
616		encap_cookie = encap_attach_func(AF_INET, IPPROTO_IPV4,
617				mroute_encapcheck,
618				(struct protosw *)&mroute_encap_protosw, NULL);
619
620		if (encap_cookie == NULL) {
621		    printf("ip_mroute: unable to attach encap\n");
622		    return EIO;	/* XXX */
623		}
624		for (s = 0; s < MAXVIFS; ++s) {
625		    multicast_decap_if[s].if_name = "mdecap";
626		    multicast_decap_if[s].if_unit = s;
627		}
628	    }
629	    /*
630	     * Set interface to fake encapsulator interface
631	     */
632	    ifp = &multicast_decap_if[vifcp->vifc_vifi];
633	    /*
634	     * Prepare cached route entry
635	     */
636	    bzero(&vifp->v_route, sizeof(vifp->v_route));
637	} else {
638	    log(LOG_ERR, "source routed tunnels not supported\n");
639	    return EOPNOTSUPP;
640	}
641    } else {		/* Make sure the interface supports multicast */
642	if ((ifp->if_flags & IFF_MULTICAST) == 0)
643	    return EOPNOTSUPP;
644
645	/* Enable promiscuous reception of all IP multicasts from the if */
646	s = splnet();
647	error = if_allmulti(ifp, 1);
648	splx(s);
649	if (error)
650	    return error;
651    }
652
653    s = splnet();
654    /* define parameters for the tbf structure */
655    vifp->v_tbf = v_tbf;
656    GET_TIME(vifp->v_tbf->tbf_last_pkt_t);
657    vifp->v_tbf->tbf_n_tok = 0;
658    vifp->v_tbf->tbf_q_len = 0;
659    vifp->v_tbf->tbf_max_q_len = MAXQSIZE;
660    vifp->v_tbf->tbf_q = vifp->v_tbf->tbf_t = NULL;
661
662    vifp->v_flags     = vifcp->vifc_flags;
663    vifp->v_threshold = vifcp->vifc_threshold;
664    vifp->v_lcl_addr  = vifcp->vifc_lcl_addr;
665    vifp->v_rmt_addr  = vifcp->vifc_rmt_addr;
666    vifp->v_ifp       = ifp;
667    /* scaling up here allows division by 1024 in critical code */
668    vifp->v_rate_limit= vifcp->vifc_rate_limit * 1024 / 1000;
669    vifp->v_rsvp_on   = 0;
670    vifp->v_rsvpd     = NULL;
671    /* initialize per vif pkt counters */
672    vifp->v_pkt_in    = 0;
673    vifp->v_pkt_out   = 0;
674    vifp->v_bytes_in  = 0;
675    vifp->v_bytes_out = 0;
676    splx(s);
677
678    /* Adjust numvifs up if the vifi is higher than numvifs */
679    if (numvifs <= vifcp->vifc_vifi) numvifs = vifcp->vifc_vifi + 1;
680
681    if (mrtdebug)
682	log(LOG_DEBUG, "add_vif #%d, lcladdr %lx, %s %lx, thresh %x, rate %d\n",
683	    vifcp->vifc_vifi,
684	    (u_long)ntohl(vifcp->vifc_lcl_addr.s_addr),
685	    (vifcp->vifc_flags & VIFF_TUNNEL) ? "rmtaddr" : "mask",
686	    (u_long)ntohl(vifcp->vifc_rmt_addr.s_addr),
687	    vifcp->vifc_threshold,
688	    vifcp->vifc_rate_limit);
689
690    return 0;
691}
692
693/*
694 * Delete a vif from the vif table
695 */
696static int
697del_vif(vifi_t vifi)
698{
699    struct vif *vifp;
700    int s;
701
702    if (vifi >= numvifs)
703	return EINVAL;
704    vifp = &viftable[vifi];
705    if (vifp->v_lcl_addr.s_addr == INADDR_ANY)
706	return EADDRNOTAVAIL;
707
708    s = splnet();
709
710    if (!(vifp->v_flags & VIFF_TUNNEL))
711	if_allmulti(vifp->v_ifp, 0);
712
713    if (vifp == last_encap_vif) {
714	last_encap_vif = NULL;
715	last_encap_src = INADDR_ANY;
716    }
717
718    /*
719     * Free packets queued at the interface
720     */
721    while (vifp->v_tbf->tbf_q) {
722	struct mbuf *m = vifp->v_tbf->tbf_q;
723
724	vifp->v_tbf->tbf_q = m->m_act;
725	m_freem(m);
726    }
727
728    bzero((caddr_t)vifp->v_tbf, sizeof(*(vifp->v_tbf)));
729    bzero((caddr_t)vifp, sizeof (*vifp));
730
731    if (mrtdebug)
732	log(LOG_DEBUG, "del_vif %d, numvifs %d\n", vifi, numvifs);
733
734    /* Adjust numvifs down */
735    for (vifi = numvifs; vifi > 0; vifi--)
736	if (viftable[vifi-1].v_lcl_addr.s_addr != INADDR_ANY)
737	    break;
738    numvifs = vifi;
739
740    splx(s);
741
742    return 0;
743}
744
745/*
746 * update an mfc entry without resetting counters and S,G addresses.
747 */
748static void
749update_mfc_params(struct mfc *rt, struct mfcctl *mfccp)
750{
751    int i;
752
753    rt->mfc_parent = mfccp->mfcc_parent;
754    for (i = 0; i < numvifs; i++)
755	rt->mfc_ttls[i] = mfccp->mfcc_ttls[i];
756}
757
758/*
759 * fully initialize an mfc entry from the parameter.
760 */
761static void
762init_mfc_params(struct mfc *rt, struct mfcctl *mfccp)
763{
764    rt->mfc_origin     = mfccp->mfcc_origin;
765    rt->mfc_mcastgrp   = mfccp->mfcc_mcastgrp;
766
767    update_mfc_params(rt, mfccp);
768
769    /* initialize pkt counters per src-grp */
770    rt->mfc_pkt_cnt    = 0;
771    rt->mfc_byte_cnt   = 0;
772    rt->mfc_wrong_if   = 0;
773    rt->mfc_last_assert.tv_sec = rt->mfc_last_assert.tv_usec = 0;
774}
775
776
777/*
778 * Add an mfc entry
779 */
780static int
781add_mfc(struct mfcctl *mfccp)
782{
783    struct mfc *rt;
784    u_long hash;
785    struct rtdetq *rte;
786    u_short nstl;
787    int s;
788
789    rt = mfc_find(mfccp->mfcc_origin.s_addr, mfccp->mfcc_mcastgrp.s_addr);
790
791    /* If an entry already exists, just update the fields */
792    if (rt) {
793	if (mrtdebug & DEBUG_MFC)
794	    log(LOG_DEBUG,"add_mfc update o %lx g %lx p %x\n",
795		(u_long)ntohl(mfccp->mfcc_origin.s_addr),
796		(u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr),
797		mfccp->mfcc_parent);
798
799	s = splnet();
800	update_mfc_params(rt, mfccp);
801	splx(s);
802	return 0;
803    }
804
805    /*
806     * Find the entry for which the upcall was made and update
807     */
808    s = splnet();
809    hash = MFCHASH(mfccp->mfcc_origin.s_addr, mfccp->mfcc_mcastgrp.s_addr);
810    for (rt = mfctable[hash], nstl = 0; rt; rt = rt->mfc_next) {
811
812	if ((rt->mfc_origin.s_addr == mfccp->mfcc_origin.s_addr) &&
813		(rt->mfc_mcastgrp.s_addr == mfccp->mfcc_mcastgrp.s_addr) &&
814		(rt->mfc_stall != NULL)) {
815
816	    if (nstl++)
817		log(LOG_ERR, "add_mfc %s o %lx g %lx p %x dbx %p\n",
818		    "multiple kernel entries",
819		    (u_long)ntohl(mfccp->mfcc_origin.s_addr),
820		    (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr),
821		    mfccp->mfcc_parent, (void *)rt->mfc_stall);
822
823	    if (mrtdebug & DEBUG_MFC)
824		log(LOG_DEBUG,"add_mfc o %lx g %lx p %x dbg %p\n",
825		    (u_long)ntohl(mfccp->mfcc_origin.s_addr),
826		    (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr),
827		    mfccp->mfcc_parent, (void *)rt->mfc_stall);
828
829	    init_mfc_params(rt, mfccp);
830
831	    rt->mfc_expire = 0;	/* Don't clean this guy up */
832	    nexpire[hash]--;
833
834	    /* free packets Qed at the end of this entry */
835	    for (rte = rt->mfc_stall; rte != NULL; ) {
836		struct rtdetq *n = rte->next;
837
838		ip_mdq(rte->m, rte->ifp, rt, -1);
839		m_freem(rte->m);
840		free(rte, M_MRTABLE);
841		rte = n;
842	    }
843	    rt->mfc_stall = NULL;
844	}
845    }
846
847    /*
848     * It is possible that an entry is being inserted without an upcall
849     */
850    if (nstl == 0) {
851	if (mrtdebug & DEBUG_MFC)
852	    log(LOG_DEBUG,"add_mfc no upcall h %lu o %lx g %lx p %x\n",
853		hash, (u_long)ntohl(mfccp->mfcc_origin.s_addr),
854		(u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr),
855		mfccp->mfcc_parent);
856
857	for (rt = mfctable[hash]; rt != NULL; rt = rt->mfc_next) {
858	    if ((rt->mfc_origin.s_addr == mfccp->mfcc_origin.s_addr) &&
859		    (rt->mfc_mcastgrp.s_addr == mfccp->mfcc_mcastgrp.s_addr)) {
860		init_mfc_params(rt, mfccp);
861		if (rt->mfc_expire)
862		    nexpire[hash]--;
863		rt->mfc_expire = 0;
864		break; /* XXX */
865	    }
866	}
867	if (rt == NULL) {		/* no upcall, so make a new entry */
868	    rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT);
869	    if (rt == NULL) {
870		splx(s);
871		return ENOBUFS;
872	    }
873
874	    init_mfc_params(rt, mfccp);
875	    rt->mfc_expire     = 0;
876	    rt->mfc_stall      = NULL;
877
878	    /* insert new entry at head of hash chain */
879	    rt->mfc_next = mfctable[hash];
880	    mfctable[hash] = rt;
881	}
882    }
883    splx(s);
884    return 0;
885}
886
887/*
888 * Delete an mfc entry
889 */
890static int
891del_mfc(struct mfcctl *mfccp)
892{
893    struct in_addr 	origin;
894    struct in_addr 	mcastgrp;
895    struct mfc 		*rt;
896    struct mfc	 	**nptr;
897    u_long 		hash;
898    int s;
899
900    origin = mfccp->mfcc_origin;
901    mcastgrp = mfccp->mfcc_mcastgrp;
902
903    if (mrtdebug & DEBUG_MFC)
904	log(LOG_DEBUG,"del_mfc orig %lx mcastgrp %lx\n",
905	    (u_long)ntohl(origin.s_addr), (u_long)ntohl(mcastgrp.s_addr));
906
907    s = splnet();
908
909    hash = MFCHASH(origin.s_addr, mcastgrp.s_addr);
910    for (nptr = &mfctable[hash]; (rt = *nptr) != NULL; nptr = &rt->mfc_next)
911	if (origin.s_addr == rt->mfc_origin.s_addr &&
912		mcastgrp.s_addr == rt->mfc_mcastgrp.s_addr &&
913		rt->mfc_stall == NULL)
914	    break;
915    if (rt == NULL) {
916	splx(s);
917	return EADDRNOTAVAIL;
918    }
919
920    *nptr = rt->mfc_next;
921    free(rt, M_MRTABLE);
922
923    splx(s);
924
925    return 0;
926}
927
928/*
929 * Send a message to mrouted on the multicast routing socket
930 */
931static int
932socket_send(struct socket *s, struct mbuf *mm, struct sockaddr_in *src)
933{
934    if (s) {
935	if (sbappendaddr(&s->so_rcv, (struct sockaddr *)src, mm, NULL) != 0) {
936	    sorwakeup(s);
937	    return 0;
938	}
939    }
940    m_freem(mm);
941    return -1;
942}
943
944/*
945 * IP multicast forwarding function. This function assumes that the packet
946 * pointed to by "ip" has arrived on (or is about to be sent to) the interface
947 * pointed to by "ifp", and the packet is to be relayed to other networks
948 * that have members of the packet's destination IP multicast group.
949 *
950 * The packet is returned unscathed to the caller, unless it is
951 * erroneous, in which case a non-zero return value tells the caller to
952 * discard it.
953 */
954
955#define TUNNEL_LEN  12  /* # bytes of IP option for tunnel encapsulation  */
956
957static int
958X_ip_mforward(struct ip *ip, struct ifnet *ifp,
959	struct mbuf *m, struct ip_moptions *imo)
960{
961    struct mfc *rt;
962    int s;
963    vifi_t vifi;
964
965    if (mrtdebug & DEBUG_FORWARD)
966	log(LOG_DEBUG, "ip_mforward: src %lx, dst %lx, ifp %p\n",
967	    (u_long)ntohl(ip->ip_src.s_addr), (u_long)ntohl(ip->ip_dst.s_addr),
968	    (void *)ifp);
969
970    if (ip->ip_hl < (sizeof(struct ip) + TUNNEL_LEN) >> 2 ||
971		((u_char *)(ip + 1))[1] != IPOPT_LSRR ) {
972	/*
973	 * Packet arrived via a physical interface or
974	 * an encapsulated tunnel.
975	 */
976    } else {
977	/*
978	 * Packet arrived through a source-route tunnel.
979	 * Source-route tunnels are no longer supported.
980	 */
981	static int last_log;
982	if (last_log != time_second) {
983	    last_log = time_second;
984	    log(LOG_ERR,
985		"ip_mforward: received source-routed packet from %lx\n",
986		(u_long)ntohl(ip->ip_src.s_addr));
987	}
988	return 1;
989    }
990
991    if ((imo) && ((vifi = imo->imo_multicast_vif) < numvifs)) {
992	if (ip->ip_ttl < 255)
993	    ip->ip_ttl++;	/* compensate for -1 in *_send routines */
994	if (rsvpdebug && ip->ip_p == IPPROTO_RSVP) {
995	    struct vif *vifp = viftable + vifi;
996
997	    printf("Sending IPPROTO_RSVP from %lx to %lx on vif %d (%s%s%d)\n",
998		(long)ntohl(ip->ip_src.s_addr), (long)ntohl(ip->ip_dst.s_addr),
999		vifi,
1000		(vifp->v_flags & VIFF_TUNNEL) ? "tunnel on " : "",
1001		vifp->v_ifp->if_name, vifp->v_ifp->if_unit);
1002	}
1003	return ip_mdq(m, ifp, NULL, vifi);
1004    }
1005    if (rsvpdebug && ip->ip_p == IPPROTO_RSVP) {
1006	printf("Warning: IPPROTO_RSVP from %lx to %lx without vif option\n",
1007	    (long)ntohl(ip->ip_src.s_addr), (long)ntohl(ip->ip_dst.s_addr));
1008	if (!imo)
1009	    printf("In fact, no options were specified at all\n");
1010    }
1011
1012    /*
1013     * Don't forward a packet with time-to-live of zero or one,
1014     * or a packet destined to a local-only group.
1015     */
1016    if (ip->ip_ttl <= 1 || ntohl(ip->ip_dst.s_addr) <= INADDR_MAX_LOCAL_GROUP)
1017	return 0;
1018
1019    /*
1020     * Determine forwarding vifs from the forwarding cache table
1021     */
1022    s = splnet();
1023    ++mrtstat.mrts_mfc_lookups;
1024    rt = mfc_find(ip->ip_src.s_addr, ip->ip_dst.s_addr);
1025
1026    /* Entry exists, so forward if necessary */
1027    if (rt != NULL) {
1028	splx(s);
1029	return ip_mdq(m, ifp, rt, -1);
1030    } else {
1031	/*
1032	 * If we don't have a route for packet's origin,
1033	 * Make a copy of the packet & send message to routing daemon
1034	 */
1035
1036	struct mbuf *mb0;
1037	struct rtdetq *rte;
1038	u_long hash;
1039	int hlen = ip->ip_hl << 2;
1040
1041	++mrtstat.mrts_mfc_misses;
1042
1043	mrtstat.mrts_no_route++;
1044	if (mrtdebug & (DEBUG_FORWARD | DEBUG_MFC))
1045	    log(LOG_DEBUG, "ip_mforward: no rte s %lx g %lx\n",
1046		(u_long)ntohl(ip->ip_src.s_addr),
1047		(u_long)ntohl(ip->ip_dst.s_addr));
1048
1049	/*
1050	 * Allocate mbufs early so that we don't do extra work if we are
1051	 * just going to fail anyway.  Make sure to pullup the header so
1052	 * that other people can't step on it.
1053	 */
1054	rte = (struct rtdetq *)malloc((sizeof *rte), M_MRTABLE, M_NOWAIT);
1055	if (rte == NULL) {
1056	    splx(s);
1057	    return ENOBUFS;
1058	}
1059	mb0 = m_copy(m, 0, M_COPYALL);
1060	if (mb0 && (M_HASCL(mb0) || mb0->m_len < hlen))
1061	    mb0 = m_pullup(mb0, hlen);
1062	if (mb0 == NULL) {
1063	    free(rte, M_MRTABLE);
1064	    splx(s);
1065	    return ENOBUFS;
1066	}
1067
1068	/* is there an upcall waiting for this flow ? */
1069	hash = MFCHASH(ip->ip_src.s_addr, ip->ip_dst.s_addr);
1070	for (rt = mfctable[hash]; rt; rt = rt->mfc_next) {
1071	    if ((ip->ip_src.s_addr == rt->mfc_origin.s_addr) &&
1072		    (ip->ip_dst.s_addr == rt->mfc_mcastgrp.s_addr) &&
1073		    (rt->mfc_stall != NULL))
1074		break;
1075	}
1076
1077	if (rt == NULL) {
1078	    int i;
1079	    struct igmpmsg *im;
1080	    struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
1081	    struct mbuf *mm;
1082
1083	    /*
1084	     * Locate the vifi for the incoming interface for this packet.
1085	     * If none found, drop packet.
1086	     */
1087	    for (vifi=0; vifi<numvifs && viftable[vifi].v_ifp != ifp; vifi++)
1088		;
1089            if (vifi >= numvifs)	/* vif not found, drop packet */
1090		goto non_fatal;
1091
1092	    /* no upcall, so make a new entry */
1093	    rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT);
1094	    if (rt == NULL)
1095		goto fail;
1096	    /* Make a copy of the header to send to the user level process */
1097	    mm = m_copy(mb0, 0, hlen);
1098	    if (mm == NULL)
1099		goto fail1;
1100
1101	    /*
1102	     * Send message to routing daemon to install
1103	     * a route into the kernel table
1104	     */
1105
1106	    im = mtod(mm, struct igmpmsg *);
1107	    im->im_msgtype = IGMPMSG_NOCACHE;
1108	    im->im_mbz = 0;
1109	    im->im_vif = vifi;
1110
1111	    mrtstat.mrts_upcalls++;
1112
1113	    k_igmpsrc.sin_addr = ip->ip_src;
1114	    if (socket_send(ip_mrouter, mm, &k_igmpsrc) < 0) {
1115		log(LOG_WARNING, "ip_mforward: ip_mrouter socket queue full\n");
1116		++mrtstat.mrts_upq_sockfull;
1117fail1:
1118		free(rt, M_MRTABLE);
1119fail:
1120		free(rte, M_MRTABLE);
1121		m_freem(mb0);
1122		splx(s);
1123		return ENOBUFS;
1124	    }
1125
1126	    /* insert new entry at head of hash chain */
1127	    rt->mfc_origin.s_addr     = ip->ip_src.s_addr;
1128	    rt->mfc_mcastgrp.s_addr   = ip->ip_dst.s_addr;
1129	    rt->mfc_expire	      = UPCALL_EXPIRE;
1130	    nexpire[hash]++;
1131	    for (i = 0; i < numvifs; i++)
1132		rt->mfc_ttls[i] = 0;
1133	    rt->mfc_parent = -1;
1134
1135	    /* link into table */
1136	    rt->mfc_next   = mfctable[hash];
1137	    mfctable[hash] = rt;
1138	    rt->mfc_stall = rte;
1139
1140	} else {
1141	    /* determine if q has overflowed */
1142	    int npkts = 0;
1143	    struct rtdetq **p;
1144
1145	    /*
1146	     * XXX ouch! we need to append to the list, but we
1147	     * only have a pointer to the front, so we have to
1148	     * scan the entire list every time.
1149	     */
1150	    for (p = &rt->mfc_stall; *p != NULL; p = &(*p)->next)
1151		npkts++;
1152
1153	    if (npkts > MAX_UPQ) {
1154		mrtstat.mrts_upq_ovflw++;
1155non_fatal:
1156		free(rte, M_MRTABLE);
1157		m_freem(mb0);
1158		splx(s);
1159		return 0;
1160	    }
1161
1162	    /* Add this entry to the end of the queue */
1163	    *p = rte;
1164	}
1165
1166	rte->m 			= mb0;
1167	rte->ifp 		= ifp;
1168	rte->next		= NULL;
1169
1170	splx(s);
1171
1172	return 0;
1173    }
1174}
1175
1176/*
1177 * Clean up the cache entry if upcall is not serviced
1178 */
1179static void
1180expire_upcalls(void *unused)
1181{
1182    struct rtdetq *rte;
1183    struct mfc *mfc, **nptr;
1184    int i;
1185    int s;
1186
1187    s = splnet();
1188    for (i = 0; i < MFCTBLSIZ; i++) {
1189	if (nexpire[i] == 0)
1190	    continue;
1191	nptr = &mfctable[i];
1192	for (mfc = *nptr; mfc != NULL; mfc = *nptr) {
1193	    /*
1194	     * Skip real cache entries
1195	     * Make sure it wasn't marked to not expire (shouldn't happen)
1196	     * If it expires now
1197	     */
1198	    if (mfc->mfc_stall != NULL && mfc->mfc_expire != 0 &&
1199		    --mfc->mfc_expire == 0) {
1200		if (mrtdebug & DEBUG_EXPIRE)
1201		    log(LOG_DEBUG, "expire_upcalls: expiring (%lx %lx)\n",
1202			(u_long)ntohl(mfc->mfc_origin.s_addr),
1203			(u_long)ntohl(mfc->mfc_mcastgrp.s_addr));
1204		/*
1205		 * drop all the packets
1206		 * free the mbuf with the pkt, if, timing info
1207		 */
1208		for (rte = mfc->mfc_stall; rte; ) {
1209		    struct rtdetq *n = rte->next;
1210
1211		    m_freem(rte->m);
1212		    free(rte, M_MRTABLE);
1213		    rte = n;
1214		}
1215		++mrtstat.mrts_cache_cleanups;
1216		nexpire[i]--;
1217
1218		*nptr = mfc->mfc_next;
1219		free(mfc, M_MRTABLE);
1220	    } else {
1221		nptr = &mfc->mfc_next;
1222	    }
1223	}
1224    }
1225    splx(s);
1226    expire_upcalls_ch = timeout(expire_upcalls, NULL, EXPIRE_TIMEOUT);
1227}
1228
1229/*
1230 * Packet forwarding routine once entry in the cache is made
1231 */
1232static int
1233ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt, vifi_t xmt_vif)
1234{
1235    struct ip  *ip = mtod(m, struct ip *);
1236    vifi_t vifi;
1237    int plen = ip->ip_len;
1238
1239/*
1240 * Macro to send packet on vif.  Since RSVP packets don't get counted on
1241 * input, they shouldn't get counted on output, so statistics keeping is
1242 * separate.
1243 */
1244#define MC_SEND(ip,vifp,m) {                             \
1245                if ((vifp)->v_flags & VIFF_TUNNEL)  	 \
1246                    encap_send((ip), (vifp), (m));       \
1247                else                                     \
1248                    phyint_send((ip), (vifp), (m));      \
1249}
1250
1251    /*
1252     * If xmt_vif is not -1, send on only the requested vif.
1253     *
1254     * (since vifi_t is u_short, -1 becomes MAXUSHORT, which > numvifs.)
1255     */
1256    if (xmt_vif < numvifs) {
1257	MC_SEND(ip, viftable + xmt_vif, m);
1258	return 1;
1259    }
1260
1261    /*
1262     * Don't forward if it didn't arrive from the parent vif for its origin.
1263     */
1264    vifi = rt->mfc_parent;
1265    if ((vifi >= numvifs) || (viftable[vifi].v_ifp != ifp)) {
1266	/* came in the wrong interface */
1267	if (mrtdebug & DEBUG_FORWARD)
1268	    log(LOG_DEBUG, "wrong if: ifp %p vifi %d vififp %p\n",
1269		(void *)ifp, vifi, (void *)viftable[vifi].v_ifp);
1270	++mrtstat.mrts_wrong_if;
1271	++rt->mfc_wrong_if;
1272	/*
1273	 * If we are doing PIM assert processing, and we are forwarding
1274	 * packets on this interface, and it is a broadcast medium
1275	 * interface (and not a tunnel), send a message to the routing daemon.
1276	 */
1277	if (pim_assert && rt->mfc_ttls[vifi] &&
1278		(ifp->if_flags & IFF_BROADCAST) &&
1279		!(viftable[vifi].v_flags & VIFF_TUNNEL)) {
1280	    struct timeval now;
1281	    u_long delta;
1282
1283	    GET_TIME(now);
1284
1285	    TV_DELTA(rt->mfc_last_assert, now, delta);
1286
1287	    if (delta > ASSERT_MSG_TIME) {
1288		struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
1289		struct igmpmsg *im;
1290		int hlen = ip->ip_hl << 2;
1291		struct mbuf *mm = m_copy(m, 0, hlen);
1292
1293		if (mm && (M_HASCL(mm) || mm->m_len < hlen))
1294		    mm = m_pullup(mm, hlen);
1295		if (mm == NULL)
1296		    return ENOBUFS;
1297
1298		rt->mfc_last_assert = now;
1299
1300		im = mtod(mm, struct igmpmsg *);
1301		im->im_msgtype	= IGMPMSG_WRONGVIF;
1302		im->im_mbz		= 0;
1303		im->im_vif		= vifi;
1304
1305		k_igmpsrc.sin_addr = im->im_src;
1306
1307		if (socket_send(ip_mrouter, mm, &k_igmpsrc) < 0) {
1308		    log(LOG_WARNING,
1309			"ip_mforward: ip_mrouter socket queue full\n");
1310		    ++mrtstat.mrts_upq_sockfull;
1311		    return ENOBUFS;
1312		}
1313	    }
1314	}
1315	return 0;
1316    }
1317
1318    /* If I sourced this packet, it counts as output, else it was input. */
1319    if (ip->ip_src.s_addr == viftable[vifi].v_lcl_addr.s_addr) {
1320	viftable[vifi].v_pkt_out++;
1321	viftable[vifi].v_bytes_out += plen;
1322    } else {
1323	viftable[vifi].v_pkt_in++;
1324	viftable[vifi].v_bytes_in += plen;
1325    }
1326    rt->mfc_pkt_cnt++;
1327    rt->mfc_byte_cnt += plen;
1328
1329    /*
1330     * For each vif, decide if a copy of the packet should be forwarded.
1331     * Forward if:
1332     *		- the ttl exceeds the vif's threshold
1333     *		- there are group members downstream on interface
1334     */
1335    for (vifi = 0; vifi < numvifs; vifi++)
1336	if ((rt->mfc_ttls[vifi] > 0) && (ip->ip_ttl > rt->mfc_ttls[vifi])) {
1337	    viftable[vifi].v_pkt_out++;
1338	    viftable[vifi].v_bytes_out += plen;
1339	    MC_SEND(ip, viftable+vifi, m);
1340	}
1341
1342    return 0;
1343}
1344
1345/*
1346 * check if a vif number is legal/ok. This is used by ip_output.
1347 */
1348static int
1349X_legal_vif_num(int vif)
1350{
1351    return (vif >= 0 && vif < numvifs);
1352}
1353
1354/*
1355 * Return the local address used by this vif
1356 */
1357static u_long
1358X_ip_mcast_src(int vifi)
1359{
1360    if (vifi >= 0 && vifi < numvifs)
1361	return viftable[vifi].v_lcl_addr.s_addr;
1362    else
1363	return INADDR_ANY;
1364}
1365
1366static void
1367phyint_send(struct ip *ip, struct vif *vifp, struct mbuf *m)
1368{
1369    struct mbuf *mb_copy;
1370    int hlen = ip->ip_hl << 2;
1371
1372    /*
1373     * Make a new reference to the packet; make sure that
1374     * the IP header is actually copied, not just referenced,
1375     * so that ip_output() only scribbles on the copy.
1376     */
1377    mb_copy = m_copy(m, 0, M_COPYALL);
1378    if (mb_copy && (M_HASCL(mb_copy) || mb_copy->m_len < hlen))
1379	mb_copy = m_pullup(mb_copy, hlen);
1380    if (mb_copy == NULL)
1381	return;
1382
1383    if (vifp->v_rate_limit == 0)
1384	tbf_send_packet(vifp, mb_copy);
1385    else
1386	tbf_control(vifp, mb_copy, mtod(mb_copy, struct ip *), ip->ip_len);
1387}
1388
1389static void
1390encap_send(struct ip *ip, struct vif *vifp, struct mbuf *m)
1391{
1392    struct mbuf *mb_copy;
1393    struct ip *ip_copy;
1394    int i, len = ip->ip_len;
1395
1396    /*
1397     * XXX: take care of delayed checksums.
1398     * XXX: if network interfaces are capable of computing checksum for
1399     * encapsulated multicast data packets, we need to reconsider this.
1400     */
1401    if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
1402	in_delayed_cksum(m);
1403	m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
1404    }
1405
1406    /*
1407     * copy the old packet & pullup its IP header into the
1408     * new mbuf so we can modify it.  Try to fill the new
1409     * mbuf since if we don't the ethernet driver will.
1410     */
1411    MGETHDR(mb_copy, M_DONTWAIT, MT_HEADER);
1412    if (mb_copy == NULL)
1413	return;
1414#ifdef MAC
1415    mac_create_mbuf_multicast_encap(m, vifp->v_ifp, mb_copy);
1416#endif
1417    mb_copy->m_data += max_linkhdr;
1418    mb_copy->m_len = sizeof(multicast_encap_iphdr);
1419
1420    if ((mb_copy->m_next = m_copy(m, 0, M_COPYALL)) == NULL) {
1421	m_freem(mb_copy);
1422	return;
1423    }
1424    i = MHLEN - M_LEADINGSPACE(mb_copy);
1425    if (i > len)
1426	i = len;
1427    mb_copy = m_pullup(mb_copy, i);
1428    if (mb_copy == NULL)
1429	return;
1430    mb_copy->m_pkthdr.len = len + sizeof(multicast_encap_iphdr);
1431
1432    /*
1433     * fill in the encapsulating IP header.
1434     */
1435    ip_copy = mtod(mb_copy, struct ip *);
1436    *ip_copy = multicast_encap_iphdr;
1437#ifdef RANDOM_IP_ID
1438    ip_copy->ip_id = ip_randomid();
1439#else
1440    ip_copy->ip_id = htons(ip_id++);
1441#endif
1442    ip_copy->ip_len += len;
1443    ip_copy->ip_src = vifp->v_lcl_addr;
1444    ip_copy->ip_dst = vifp->v_rmt_addr;
1445
1446    /*
1447     * turn the encapsulated IP header back into a valid one.
1448     */
1449    ip = (struct ip *)((caddr_t)ip_copy + sizeof(multicast_encap_iphdr));
1450    --ip->ip_ttl;
1451    ip->ip_len = htons(ip->ip_len);
1452    ip->ip_off = htons(ip->ip_off);
1453    ip->ip_sum = 0;
1454    mb_copy->m_data += sizeof(multicast_encap_iphdr);
1455    ip->ip_sum = in_cksum(mb_copy, ip->ip_hl << 2);
1456    mb_copy->m_data -= sizeof(multicast_encap_iphdr);
1457
1458    if (vifp->v_rate_limit == 0)
1459	tbf_send_packet(vifp, mb_copy);
1460    else
1461	tbf_control(vifp, mb_copy, ip, ip_copy->ip_len);
1462}
1463
1464/*
1465 * Token bucket filter module
1466 */
1467
1468static void
1469tbf_control(struct vif *vifp, struct mbuf *m, struct ip *ip, u_long p_len)
1470{
1471    struct tbf *t = vifp->v_tbf;
1472
1473    if (p_len > MAX_BKT_SIZE) {		/* drop if packet is too large */
1474	mrtstat.mrts_pkt2large++;
1475	m_freem(m);
1476	return;
1477    }
1478
1479    tbf_update_tokens(vifp);
1480
1481    if (t->tbf_q_len == 0) {		/* queue empty...		*/
1482	if (p_len <= t->tbf_n_tok) {	/* send packet if enough tokens */
1483	    t->tbf_n_tok -= p_len;
1484	    tbf_send_packet(vifp, m);
1485	} else {			/* no, queue packet and try later */
1486	    tbf_queue(vifp, m);
1487	    timeout(tbf_reprocess_q, (caddr_t)vifp, TBF_REPROCESS);
1488	}
1489    } else if (t->tbf_q_len < t->tbf_max_q_len) {
1490	/* finite queue length, so queue pkts and process queue */
1491	tbf_queue(vifp, m);
1492	tbf_process_q(vifp);
1493    } else {
1494	/* queue full, try to dq and queue and process */
1495	if (!tbf_dq_sel(vifp, ip)) {
1496	    mrtstat.mrts_q_overflow++;
1497	    m_freem(m);
1498	} else {
1499	    tbf_queue(vifp, m);
1500	    tbf_process_q(vifp);
1501	}
1502    }
1503}
1504
1505/*
1506 * adds a packet to the queue at the interface
1507 */
1508static void
1509tbf_queue(struct vif *vifp, struct mbuf *m)
1510{
1511    int s = splnet();
1512    struct tbf *t = vifp->v_tbf;
1513
1514    if (t->tbf_t == NULL)	/* Queue was empty */
1515	t->tbf_q = m;
1516    else			/* Insert at tail */
1517	t->tbf_t->m_act = m;
1518
1519    t->tbf_t = m;		/* Set new tail pointer */
1520
1521#ifdef DIAGNOSTIC
1522    /* Make sure we didn't get fed a bogus mbuf */
1523    if (m->m_act)
1524	panic("tbf_queue: m_act");
1525#endif
1526    m->m_act = NULL;
1527
1528    t->tbf_q_len++;
1529
1530    splx(s);
1531}
1532
1533/*
1534 * processes the queue at the interface
1535 */
1536static void
1537tbf_process_q(struct vif *vifp)
1538{
1539    int s = splnet();
1540    struct tbf *t = vifp->v_tbf;
1541
1542    /* loop through the queue at the interface and send as many packets
1543     * as possible
1544     */
1545    while (t->tbf_q_len > 0) {
1546	struct mbuf *m = t->tbf_q;
1547	int len = mtod(m, struct ip *)->ip_len;
1548
1549	/* determine if the packet can be sent */
1550	if (len > t->tbf_n_tok)	/* not enough tokens, we are done */
1551	    break;
1552	/* ok, reduce no of tokens, dequeue and send the packet. */
1553	t->tbf_n_tok -= len;
1554
1555	t->tbf_q = m->m_act;
1556	if (--t->tbf_q_len == 0)
1557	    t->tbf_t = NULL;
1558
1559	m->m_act = NULL;
1560	tbf_send_packet(vifp, m);
1561    }
1562    splx(s);
1563}
1564
1565static void
1566tbf_reprocess_q(void *xvifp)
1567{
1568    struct vif *vifp = xvifp;
1569
1570    if (ip_mrouter == NULL)
1571	return;
1572    tbf_update_tokens(vifp);
1573    tbf_process_q(vifp);
1574    if (vifp->v_tbf->tbf_q_len)
1575	timeout(tbf_reprocess_q, (caddr_t)vifp, TBF_REPROCESS);
1576}
1577
1578/* function that will selectively discard a member of the queue
1579 * based on the precedence value and the priority
1580 */
1581static int
1582tbf_dq_sel(struct vif *vifp, struct ip *ip)
1583{
1584    int s = splnet();
1585    u_int p;
1586    struct mbuf *m, *last;
1587    struct mbuf **np;
1588    struct tbf *t = vifp->v_tbf;
1589
1590    p = priority(vifp, ip);
1591
1592    np = &t->tbf_q;
1593    last = NULL;
1594    while ((m = *np) != NULL) {
1595	if (p > priority(vifp, mtod(m, struct ip *))) {
1596	    *np = m->m_act;
1597	    /* If we're removing the last packet, fix the tail pointer */
1598	    if (m == t->tbf_t)
1599		t->tbf_t = last;
1600	    m_freem(m);
1601	    /* It's impossible for the queue to be empty, but check anyways. */
1602	    if (--t->tbf_q_len == 0)
1603		t->tbf_t = NULL;
1604	    splx(s);
1605	    mrtstat.mrts_drop_sel++;
1606	    return 1;
1607	}
1608	np = &m->m_act;
1609	last = m;
1610    }
1611    splx(s);
1612    return 0;
1613}
1614
1615static void
1616tbf_send_packet(struct vif *vifp, struct mbuf *m)
1617{
1618    int s = splnet();
1619
1620    if (vifp->v_flags & VIFF_TUNNEL)	/* If tunnel options */
1621	ip_output(m, NULL, &vifp->v_route, IP_FORWARDING, NULL, NULL);
1622    else {
1623	struct ip_moptions imo;
1624	int error;
1625	static struct route ro; /* XXX check this */
1626
1627	imo.imo_multicast_ifp  = vifp->v_ifp;
1628	imo.imo_multicast_ttl  = mtod(m, struct ip *)->ip_ttl - 1;
1629	imo.imo_multicast_loop = 1;
1630	imo.imo_multicast_vif  = -1;
1631
1632	/*
1633	 * Re-entrancy should not be a problem here, because
1634	 * the packets that we send out and are looped back at us
1635	 * should get rejected because they appear to come from
1636	 * the loopback interface, thus preventing looping.
1637	 */
1638	error = ip_output(m, NULL, &ro, IP_FORWARDING, &imo, NULL);
1639
1640	if (mrtdebug & DEBUG_XMIT)
1641	    log(LOG_DEBUG, "phyint_send on vif %d err %d\n",
1642		(int)(vifp - viftable), error);
1643    }
1644    splx(s);
1645}
1646
1647/* determine the current time and then
1648 * the elapsed time (between the last time and time now)
1649 * in milliseconds & update the no. of tokens in the bucket
1650 */
1651static void
1652tbf_update_tokens(struct vif *vifp)
1653{
1654    struct timeval tp;
1655    u_long tm;
1656    int s = splnet();
1657    struct tbf *t = vifp->v_tbf;
1658
1659    GET_TIME(tp);
1660
1661    TV_DELTA(tp, t->tbf_last_pkt_t, tm);
1662
1663    /*
1664     * This formula is actually
1665     * "time in seconds" * "bytes/second".
1666     *
1667     * (tm / 1000000) * (v_rate_limit * 1000 * (1000/1024) / 8)
1668     *
1669     * The (1000/1024) was introduced in add_vif to optimize
1670     * this divide into a shift.
1671     */
1672    t->tbf_n_tok += tm * vifp->v_rate_limit / 1024 / 8;
1673    t->tbf_last_pkt_t = tp;
1674
1675    if (t->tbf_n_tok > MAX_BKT_SIZE)
1676	t->tbf_n_tok = MAX_BKT_SIZE;
1677
1678    splx(s);
1679}
1680
1681static int
1682priority(struct vif *vifp, struct ip *ip)
1683{
1684    int prio = 50; /* the lowest priority -- default case */
1685
1686    /* temporary hack; may add general packet classifier some day */
1687
1688    /*
1689     * The UDP port space is divided up into four priority ranges:
1690     * [0, 16384)     : unclassified - lowest priority
1691     * [16384, 32768) : audio - highest priority
1692     * [32768, 49152) : whiteboard - medium priority
1693     * [49152, 65536) : video - low priority
1694     *
1695     * Everything else gets lowest priority.
1696     */
1697    if (ip->ip_p == IPPROTO_UDP) {
1698	struct udphdr *udp = (struct udphdr *)(((char *)ip) + (ip->ip_hl << 2));
1699	switch (ntohs(udp->uh_dport) & 0xc000) {
1700	case 0x4000:
1701	    prio = 70;
1702	    break;
1703	case 0x8000:
1704	    prio = 60;
1705	    break;
1706	case 0xc000:
1707	    prio = 55;
1708	    break;
1709	}
1710    }
1711    return prio;
1712}
1713
1714/*
1715 * End of token bucket filter modifications
1716 */
1717
1718static int
1719X_ip_rsvp_vif(struct socket *so, struct sockopt *sopt)
1720{
1721    int error, vifi, s;
1722
1723    if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_RSVP)
1724	return EOPNOTSUPP;
1725
1726    error = sooptcopyin(sopt, &vifi, sizeof vifi, sizeof vifi);
1727    if (error)
1728	return error;
1729
1730    s = splnet();
1731
1732    if (vifi < 0 || vifi >= numvifs) {	/* Error if vif is invalid */
1733	splx(s);
1734	return EADDRNOTAVAIL;
1735    }
1736
1737    if (sopt->sopt_name == IP_RSVP_VIF_ON) {
1738	/* Check if socket is available. */
1739	if (viftable[vifi].v_rsvpd != NULL) {
1740	    splx(s);
1741	    return EADDRINUSE;
1742	}
1743
1744	viftable[vifi].v_rsvpd = so;
1745	/* This may seem silly, but we need to be sure we don't over-increment
1746	 * the RSVP counter, in case something slips up.
1747	 */
1748	if (!viftable[vifi].v_rsvp_on) {
1749	    viftable[vifi].v_rsvp_on = 1;
1750	    rsvp_on++;
1751	}
1752    } else { /* must be VIF_OFF */
1753	/*
1754	 * XXX as an additional consistency check, one could make sure
1755	 * that viftable[vifi].v_rsvpd == so, otherwise passing so as
1756	 * first parameter is pretty useless.
1757	 */
1758	viftable[vifi].v_rsvpd = NULL;
1759	/*
1760	 * This may seem silly, but we need to be sure we don't over-decrement
1761	 * the RSVP counter, in case something slips up.
1762	 */
1763	if (viftable[vifi].v_rsvp_on) {
1764	    viftable[vifi].v_rsvp_on = 0;
1765	    rsvp_on--;
1766	}
1767    }
1768    splx(s);
1769    return 0;
1770}
1771
1772static void
1773X_ip_rsvp_force_done(struct socket *so)
1774{
1775    int vifi;
1776    int s;
1777
1778    /* Don't bother if it is not the right type of socket. */
1779    if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_RSVP)
1780	return;
1781
1782    s = splnet();
1783
1784    /* The socket may be attached to more than one vif...this
1785     * is perfectly legal.
1786     */
1787    for (vifi = 0; vifi < numvifs; vifi++) {
1788	if (viftable[vifi].v_rsvpd == so) {
1789	    viftable[vifi].v_rsvpd = NULL;
1790	    /* This may seem silly, but we need to be sure we don't
1791	     * over-decrement the RSVP counter, in case something slips up.
1792	     */
1793	    if (viftable[vifi].v_rsvp_on) {
1794		viftable[vifi].v_rsvp_on = 0;
1795		rsvp_on--;
1796	    }
1797	}
1798    }
1799
1800    splx(s);
1801}
1802
1803static void
1804X_rsvp_input(struct mbuf *m, int off)
1805{
1806    int vifi;
1807    struct ip *ip = mtod(m, struct ip *);
1808    struct sockaddr_in rsvp_src = { sizeof rsvp_src, AF_INET };
1809    int s;
1810    struct ifnet *ifp;
1811
1812    if (rsvpdebug)
1813	printf("rsvp_input: rsvp_on %d\n",rsvp_on);
1814
1815    /* Can still get packets with rsvp_on = 0 if there is a local member
1816     * of the group to which the RSVP packet is addressed.  But in this
1817     * case we want to throw the packet away.
1818     */
1819    if (!rsvp_on) {
1820	m_freem(m);
1821	return;
1822    }
1823
1824    s = splnet();
1825
1826    if (rsvpdebug)
1827	printf("rsvp_input: check vifs\n");
1828
1829#ifdef DIAGNOSTIC
1830    if (!(m->m_flags & M_PKTHDR))
1831	panic("rsvp_input no hdr");
1832#endif
1833
1834    ifp = m->m_pkthdr.rcvif;
1835    /* Find which vif the packet arrived on. */
1836    for (vifi = 0; vifi < numvifs; vifi++)
1837	if (viftable[vifi].v_ifp == ifp)
1838	    break;
1839
1840    if (vifi == numvifs || viftable[vifi].v_rsvpd == NULL) {
1841	/*
1842	 * If the old-style non-vif-associated socket is set,
1843	 * then use it.  Otherwise, drop packet since there
1844	 * is no specific socket for this vif.
1845	 */
1846	if (ip_rsvpd != NULL) {
1847	    if (rsvpdebug)
1848		printf("rsvp_input: Sending packet up old-style socket\n");
1849	    rip_input(m, off);  /* xxx */
1850	} else {
1851	    if (rsvpdebug && vifi == numvifs)
1852		printf("rsvp_input: Can't find vif for packet.\n");
1853	    else if (rsvpdebug && viftable[vifi].v_rsvpd == NULL)
1854		printf("rsvp_input: No socket defined for vif %d\n",vifi);
1855	    m_freem(m);
1856	}
1857	splx(s);
1858	return;
1859    }
1860    rsvp_src.sin_addr = ip->ip_src;
1861
1862    if (rsvpdebug && m)
1863	printf("rsvp_input: m->m_len = %d, sbspace() = %ld\n",
1864	       m->m_len,sbspace(&(viftable[vifi].v_rsvpd->so_rcv)));
1865
1866    if (socket_send(viftable[vifi].v_rsvpd, m, &rsvp_src) < 0) {
1867	if (rsvpdebug)
1868	    printf("rsvp_input: Failed to append to socket\n");
1869    } else {
1870	if (rsvpdebug)
1871	    printf("rsvp_input: send packet up\n");
1872    }
1873
1874    splx(s);
1875}
1876
1877static int
1878ip_mroute_modevent(module_t mod, int type, void *unused)
1879{
1880    int s;
1881
1882    switch (type) {
1883    case MOD_LOAD:
1884	s = splnet();
1885	/* XXX Protect against multiple loading */
1886	ip_mcast_src = X_ip_mcast_src;
1887	ip_mforward = X_ip_mforward;
1888	ip_mrouter_done = X_ip_mrouter_done;
1889	ip_mrouter_get = X_ip_mrouter_get;
1890	ip_mrouter_set = X_ip_mrouter_set;
1891	ip_rsvp_force_done = X_ip_rsvp_force_done;
1892	ip_rsvp_vif = X_ip_rsvp_vif;
1893	legal_vif_num = X_legal_vif_num;
1894	mrt_ioctl = X_mrt_ioctl;
1895	rsvp_input_p = X_rsvp_input;
1896	splx(s);
1897	break;
1898
1899    case MOD_UNLOAD:
1900	if (ip_mrouter)
1901	    return EINVAL;
1902
1903	s = splnet();
1904	ip_mcast_src = NULL;
1905	ip_mforward = NULL;
1906	ip_mrouter_done = NULL;
1907	ip_mrouter_get = NULL;
1908	ip_mrouter_set = NULL;
1909	ip_rsvp_force_done = NULL;
1910	ip_rsvp_vif = NULL;
1911	legal_vif_num = NULL;
1912	mrt_ioctl = NULL;
1913	rsvp_input_p = NULL;
1914	splx(s);
1915	break;
1916    }
1917    return 0;
1918}
1919
1920static moduledata_t ip_mroutemod = {
1921    "ip_mroute",
1922    ip_mroute_modevent,
1923    0
1924};
1925DECLARE_MODULE(ip_mroute, ip_mroutemod, SI_SUB_PSEUDO, SI_ORDER_ANY);
1926