ip_mroute.c revision 166549
1/*-
2 * Copyright (c) 1989 Stephen Deering
3 * Copyright (c) 1992, 1993
4 *      The Regents of the University of California.  All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * Stephen Deering of Stanford University.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *      @(#)ip_mroute.c 8.2 (Berkeley) 11/15/93
34 */
35
36/*
37 * IP multicast forwarding procedures
38 *
39 * Written by David Waitzman, BBN Labs, August 1988.
40 * Modified by Steve Deering, Stanford, February 1989.
41 * Modified by Mark J. Steiglitz, Stanford, May, 1991
42 * Modified by Van Jacobson, LBL, January 1993
43 * Modified by Ajit Thyagarajan, PARC, August 1993
44 * Modified by Bill Fenner, PARC, April 1995
45 * Modified by Ahmed Helmy, SGI, June 1996
46 * Modified by George Edmond Eddy (Rusty), ISI, February 1998
47 * Modified by Pavlin Radoslavov, USC/ISI, May 1998, August 1999, October 2000
48 * Modified by Hitoshi Asaeda, WIDE, August 2000
49 * Modified by Pavlin Radoslavov, ICSI, October 2002
50 *
51 * MROUTING Revision: 3.5
52 * and PIM-SMv2 and PIM-DM support, advanced API support,
53 * bandwidth metering and signaling
54 *
55 * $FreeBSD: head/sys/netinet/ip_mroute.c 166549 2007-02-07 16:04:13Z bms $
56 */
57
58#include "opt_mac.h"
59#include "opt_mrouting.h"
60
61#ifdef PIM
62#define _PIM_VT 1
63#endif
64
65#include <sys/param.h>
66#include <sys/kernel.h>
67#include <sys/lock.h>
68#include <sys/malloc.h>
69#include <sys/mbuf.h>
70#include <sys/module.h>
71#include <sys/priv.h>
72#include <sys/protosw.h>
73#include <sys/signalvar.h>
74#include <sys/socket.h>
75#include <sys/socketvar.h>
76#include <sys/sockio.h>
77#include <sys/sx.h>
78#include <sys/sysctl.h>
79#include <sys/syslog.h>
80#include <sys/systm.h>
81#include <sys/time.h>
82#include <net/if.h>
83#include <net/netisr.h>
84#include <net/route.h>
85#include <netinet/in.h>
86#include <netinet/igmp.h>
87#include <netinet/in_systm.h>
88#include <netinet/in_var.h>
89#include <netinet/ip.h>
90#include <netinet/ip_encap.h>
91#include <netinet/ip_mroute.h>
92#include <netinet/ip_var.h>
93#include <netinet/ip_options.h>
94#ifdef PIM
95#include <netinet/pim.h>
96#include <netinet/pim_var.h>
97#endif
98#include <netinet/udp.h>
99#include <machine/in_cksum.h>
100
101#include <security/mac/mac_framework.h>
102
103/*
104 * Control debugging code for rsvp and multicast routing code.
105 * Can only set them with the debugger.
106 */
107static u_int    rsvpdebug;		/* non-zero enables debugging	*/
108
109static u_int	mrtdebug;		/* any set of the flags below	*/
110#define		DEBUG_MFC	0x02
111#define		DEBUG_FORWARD	0x04
112#define		DEBUG_EXPIRE	0x08
113#define		DEBUG_XMIT	0x10
114#define		DEBUG_PIM	0x20
115
116#define		VIFI_INVALID	((vifi_t) -1)
117
118#define M_HASCL(m)	((m)->m_flags & M_EXT)
119
120static MALLOC_DEFINE(M_MRTABLE, "mroutetbl", "multicast routing tables");
121
122/*
123 * Locking.  We use two locks: one for the virtual interface table and
124 * one for the forwarding table.  These locks may be nested in which case
125 * the VIF lock must always be taken first.  Note that each lock is used
126 * to cover not only the specific data structure but also related data
127 * structures.  It may be better to add more fine-grained locking later;
128 * it's not clear how performance-critical this code is.
129 *
130 * XXX: This module could particularly benefit from being cleaned
131 *      up to use the <sys/queue.h> macros.
132 *
133 */
134
135static struct mrtstat	mrtstat;
136SYSCTL_STRUCT(_net_inet_ip, OID_AUTO, mrtstat, CTLFLAG_RW,
137    &mrtstat, mrtstat,
138    "Multicast Routing Statistics (struct mrtstat, netinet/ip_mroute.h)");
139
140static struct mfc	*mfctable[MFCTBLSIZ];
141SYSCTL_OPAQUE(_net_inet_ip, OID_AUTO, mfctable, CTLFLAG_RD,
142    &mfctable, sizeof(mfctable), "S,*mfc[MFCTBLSIZ]",
143    "Multicast Forwarding Table (struct *mfc[MFCTBLSIZ], netinet/ip_mroute.h)");
144
145static struct mtx mfc_mtx;
146#define	MFC_LOCK()	mtx_lock(&mfc_mtx)
147#define	MFC_UNLOCK()	mtx_unlock(&mfc_mtx)
148#define	MFC_LOCK_ASSERT()	do {					\
149	mtx_assert(&mfc_mtx, MA_OWNED);					\
150	NET_ASSERT_GIANT();						\
151} while (0)
152#define	MFC_LOCK_INIT()	mtx_init(&mfc_mtx, "mroute mfc table", NULL, MTX_DEF)
153#define	MFC_LOCK_DESTROY()	mtx_destroy(&mfc_mtx)
154
155static struct vif	viftable[MAXVIFS];
156SYSCTL_OPAQUE(_net_inet_ip, OID_AUTO, viftable, CTLFLAG_RD,
157    &viftable, sizeof(viftable), "S,vif[MAXVIFS]",
158    "Multicast Virtual Interfaces (struct vif[MAXVIFS], netinet/ip_mroute.h)");
159
160static struct mtx vif_mtx;
161#define	VIF_LOCK()	mtx_lock(&vif_mtx)
162#define	VIF_UNLOCK()	mtx_unlock(&vif_mtx)
163#define	VIF_LOCK_ASSERT()	mtx_assert(&vif_mtx, MA_OWNED)
164#define	VIF_LOCK_INIT()	mtx_init(&vif_mtx, "mroute vif table", NULL, MTX_DEF)
165#define	VIF_LOCK_DESTROY()	mtx_destroy(&vif_mtx)
166
167static u_char		nexpire[MFCTBLSIZ];
168
169static eventhandler_tag if_detach_event_tag = NULL;
170
171static struct callout expire_upcalls_ch;
172
173#define		EXPIRE_TIMEOUT	(hz / 4)	/* 4x / second		*/
174#define		UPCALL_EXPIRE	6		/* number of timeouts	*/
175
176/*
177 * Define the token bucket filter structures
178 * tbftable -> each vif has one of these for storing info
179 */
180
181static struct tbf tbftable[MAXVIFS];
182#define		TBF_REPROCESS	(hz / 100)	/* 100x / second */
183
184#define ENCAP_TTL 64
185
186/*
187 * Bandwidth meter variables and constants
188 */
189static MALLOC_DEFINE(M_BWMETER, "bwmeter", "multicast upcall bw meters");
190/*
191 * Pending timeouts are stored in a hash table, the key being the
192 * expiration time. Periodically, the entries are analysed and processed.
193 */
194#define BW_METER_BUCKETS	1024
195static struct bw_meter *bw_meter_timers[BW_METER_BUCKETS];
196static struct callout bw_meter_ch;
197#define BW_METER_PERIOD (hz)		/* periodical handling of bw meters */
198
199/*
200 * Pending upcalls are stored in a vector which is flushed when
201 * full, or periodically
202 */
203static struct bw_upcall	bw_upcalls[BW_UPCALLS_MAX];
204static u_int	bw_upcalls_n; /* # of pending upcalls */
205static struct callout bw_upcalls_ch;
206#define BW_UPCALLS_PERIOD (hz)		/* periodical flush of bw upcalls */
207
208#ifdef PIM
209static struct pimstat pimstat;
210SYSCTL_STRUCT(_net_inet_pim, PIMCTL_STATS, stats, CTLFLAG_RD,
211    &pimstat, pimstat,
212    "PIM Statistics (struct pimstat, netinet/pim_var.h)");
213
214/*
215 * Note: the PIM Register encapsulation adds the following in front of a
216 * data packet:
217 *
218 * struct pim_encap_hdr {
219 *    struct ip ip;
220 *    struct pim_encap_pimhdr  pim;
221 * }
222 *
223 */
224
225struct pim_encap_pimhdr {
226	struct pim pim;
227	uint32_t   flags;
228};
229
230static struct ip pim_encap_iphdr = {
231#if BYTE_ORDER == LITTLE_ENDIAN
232	sizeof(struct ip) >> 2,
233	IPVERSION,
234#else
235	IPVERSION,
236	sizeof(struct ip) >> 2,
237#endif
238	0,			/* tos */
239	sizeof(struct ip),	/* total length */
240	0,			/* id */
241	0,			/* frag offset */
242	ENCAP_TTL,
243	IPPROTO_PIM,
244	0,			/* checksum */
245};
246
247static struct pim_encap_pimhdr pim_encap_pimhdr = {
248    {
249	PIM_MAKE_VT(PIM_VERSION, PIM_REGISTER), /* PIM vers and message type */
250	0,			/* reserved */
251	0,			/* checksum */
252    },
253    0				/* flags */
254};
255
256static struct ifnet multicast_register_if;
257static vifi_t reg_vif_num = VIFI_INVALID;
258#endif /* PIM */
259
260/*
261 * Private variables.
262 */
263static vifi_t	   numvifs;
264
265/*
266 * Callout for queue processing.
267 */
268static struct callout tbf_reprocess_ch;
269
270static u_long	X_ip_mcast_src(int vifi);
271static int	X_ip_mforward(struct ip *ip, struct ifnet *ifp,
272			struct mbuf *m, struct ip_moptions *imo);
273static int	X_ip_mrouter_done(void);
274static int	X_ip_mrouter_get(struct socket *so, struct sockopt *m);
275static int	X_ip_mrouter_set(struct socket *so, struct sockopt *m);
276static int	X_legal_vif_num(int vif);
277static int	X_mrt_ioctl(int cmd, caddr_t data);
278
279static int get_sg_cnt(struct sioc_sg_req *);
280static int get_vif_cnt(struct sioc_vif_req *);
281static void if_detached_event(void *arg __unused, struct ifnet *);
282static int ip_mrouter_init(struct socket *, int);
283static int add_vif(struct vifctl *);
284static int del_vif_locked(vifi_t);
285static int del_vif(vifi_t);
286static int add_mfc(struct mfcctl2 *);
287static int del_mfc(struct mfcctl2 *);
288static int set_api_config(uint32_t *); /* chose API capabilities */
289static int socket_send(struct socket *, struct mbuf *, struct sockaddr_in *);
290static int set_assert(int);
291static void expire_upcalls(void *);
292static int ip_mdq(struct mbuf *, struct ifnet *, struct mfc *, vifi_t);
293static void phyint_send(struct ip *, struct vif *, struct mbuf *);
294static void tbf_control(struct vif *, struct mbuf *, struct ip *, u_long);
295static void tbf_queue(struct vif *, struct mbuf *);
296static void tbf_process_q(struct vif *);
297static void tbf_reprocess_q(void *);
298static int tbf_dq_sel(struct vif *, struct ip *);
299static void tbf_send_packet(struct vif *, struct mbuf *);
300static void tbf_update_tokens(struct vif *);
301static int priority(struct vif *, struct ip *);
302
303/*
304 * Bandwidth monitoring
305 */
306static void free_bw_list(struct bw_meter *list);
307static int add_bw_upcall(struct bw_upcall *);
308static int del_bw_upcall(struct bw_upcall *);
309static void bw_meter_receive_packet(struct bw_meter *x, int plen,
310		struct timeval *nowp);
311static void bw_meter_prepare_upcall(struct bw_meter *x, struct timeval *nowp);
312static void bw_upcalls_send(void);
313static void schedule_bw_meter(struct bw_meter *x, struct timeval *nowp);
314static void unschedule_bw_meter(struct bw_meter *x);
315static void bw_meter_process(void);
316static void expire_bw_upcalls_send(void *);
317static void expire_bw_meter_process(void *);
318
319#ifdef PIM
320static int pim_register_send(struct ip *, struct vif *,
321		struct mbuf *, struct mfc *);
322static int pim_register_send_rp(struct ip *, struct vif *,
323		struct mbuf *, struct mfc *);
324static int pim_register_send_upcall(struct ip *, struct vif *,
325		struct mbuf *, struct mfc *);
326static struct mbuf *pim_register_prepare(struct ip *, struct mbuf *);
327#endif
328
329/*
330 * whether or not special PIM assert processing is enabled.
331 */
332static int pim_assert;
333/*
334 * Rate limit for assert notification messages, in usec
335 */
336#define ASSERT_MSG_TIME		3000000
337
338/*
339 * Kernel multicast routing API capabilities and setup.
340 * If more API capabilities are added to the kernel, they should be
341 * recorded in `mrt_api_support'.
342 */
343static const uint32_t mrt_api_support = (MRT_MFC_FLAGS_DISABLE_WRONGVIF |
344					 MRT_MFC_FLAGS_BORDER_VIF |
345					 MRT_MFC_RP |
346					 MRT_MFC_BW_UPCALL);
347static uint32_t mrt_api_config = 0;
348
349/*
350 * Hash function for a source, group entry
351 */
352#define MFCHASH(a, g) MFCHASHMOD(((a) >> 20) ^ ((a) >> 10) ^ (a) ^ \
353			((g) >> 20) ^ ((g) >> 10) ^ (g))
354
355/*
356 * Find a route for a given origin IP address and Multicast group address
357 * Type of service parameter to be added in the future!!!
358 * Statistics are updated by the caller if needed
359 * (mrtstat.mrts_mfc_lookups and mrtstat.mrts_mfc_misses)
360 */
361static struct mfc *
362mfc_find(in_addr_t o, in_addr_t g)
363{
364    struct mfc *rt;
365
366    MFC_LOCK_ASSERT();
367
368    for (rt = mfctable[MFCHASH(o,g)]; rt; rt = rt->mfc_next)
369	if ((rt->mfc_origin.s_addr == o) &&
370		(rt->mfc_mcastgrp.s_addr == g) && (rt->mfc_stall == NULL))
371	    break;
372    return rt;
373}
374
375/*
376 * Macros to compute elapsed time efficiently
377 * Borrowed from Van Jacobson's scheduling code
378 */
379#define TV_DELTA(a, b, delta) {					\
380	int xxs;						\
381	delta = (a).tv_usec - (b).tv_usec;			\
382	if ((xxs = (a).tv_sec - (b).tv_sec)) {			\
383		switch (xxs) {					\
384		case 2:						\
385		      delta += 1000000;				\
386		      /* FALLTHROUGH */				\
387		case 1:						\
388		      delta += 1000000;				\
389		      break;					\
390		default:					\
391		      delta += (1000000 * xxs);			\
392		}						\
393	}							\
394}
395
396#define TV_LT(a, b) (((a).tv_usec < (b).tv_usec && \
397	      (a).tv_sec <= (b).tv_sec) || (a).tv_sec < (b).tv_sec)
398
399/*
400 * Handle MRT setsockopt commands to modify the multicast routing tables.
401 */
402static int
403X_ip_mrouter_set(struct socket *so, struct sockopt *sopt)
404{
405    int	error, optval;
406    vifi_t	vifi;
407    struct	vifctl vifc;
408    struct	mfcctl2 mfc;
409    struct	bw_upcall bw_upcall;
410    uint32_t	i;
411
412    if (so != ip_mrouter && sopt->sopt_name != MRT_INIT)
413	return EPERM;
414
415    error = 0;
416    switch (sopt->sopt_name) {
417    case MRT_INIT:
418	error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval);
419	if (error)
420	    break;
421	error = ip_mrouter_init(so, optval);
422	break;
423
424    case MRT_DONE:
425	error = ip_mrouter_done();
426	break;
427
428    case MRT_ADD_VIF:
429	error = sooptcopyin(sopt, &vifc, sizeof vifc, sizeof vifc);
430	if (error)
431	    break;
432	error = add_vif(&vifc);
433	break;
434
435    case MRT_DEL_VIF:
436	error = sooptcopyin(sopt, &vifi, sizeof vifi, sizeof vifi);
437	if (error)
438	    break;
439	error = del_vif(vifi);
440	break;
441
442    case MRT_ADD_MFC:
443    case MRT_DEL_MFC:
444	/*
445	 * select data size depending on API version.
446	 */
447	if (sopt->sopt_name == MRT_ADD_MFC &&
448		mrt_api_config & MRT_API_FLAGS_ALL) {
449	    error = sooptcopyin(sopt, &mfc, sizeof(struct mfcctl2),
450				sizeof(struct mfcctl2));
451	} else {
452	    error = sooptcopyin(sopt, &mfc, sizeof(struct mfcctl),
453				sizeof(struct mfcctl));
454	    bzero((caddr_t)&mfc + sizeof(struct mfcctl),
455			sizeof(mfc) - sizeof(struct mfcctl));
456	}
457	if (error)
458	    break;
459	if (sopt->sopt_name == MRT_ADD_MFC)
460	    error = add_mfc(&mfc);
461	else
462	    error = del_mfc(&mfc);
463	break;
464
465    case MRT_ASSERT:
466	error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval);
467	if (error)
468	    break;
469	set_assert(optval);
470	break;
471
472    case MRT_API_CONFIG:
473	error = sooptcopyin(sopt, &i, sizeof i, sizeof i);
474	if (!error)
475	    error = set_api_config(&i);
476	if (!error)
477	    error = sooptcopyout(sopt, &i, sizeof i);
478	break;
479
480    case MRT_ADD_BW_UPCALL:
481    case MRT_DEL_BW_UPCALL:
482	error = sooptcopyin(sopt, &bw_upcall, sizeof bw_upcall,
483				sizeof bw_upcall);
484	if (error)
485	    break;
486	if (sopt->sopt_name == MRT_ADD_BW_UPCALL)
487	    error = add_bw_upcall(&bw_upcall);
488	else
489	    error = del_bw_upcall(&bw_upcall);
490	break;
491
492    default:
493	error = EOPNOTSUPP;
494	break;
495    }
496    return error;
497}
498
499/*
500 * Handle MRT getsockopt commands
501 */
502static int
503X_ip_mrouter_get(struct socket *so, struct sockopt *sopt)
504{
505    int error;
506    static int version = 0x0305; /* !!! why is this here? XXX */
507
508    switch (sopt->sopt_name) {
509    case MRT_VERSION:
510	error = sooptcopyout(sopt, &version, sizeof version);
511	break;
512
513    case MRT_ASSERT:
514	error = sooptcopyout(sopt, &pim_assert, sizeof pim_assert);
515	break;
516
517    case MRT_API_SUPPORT:
518	error = sooptcopyout(sopt, &mrt_api_support, sizeof mrt_api_support);
519	break;
520
521    case MRT_API_CONFIG:
522	error = sooptcopyout(sopt, &mrt_api_config, sizeof mrt_api_config);
523	break;
524
525    default:
526	error = EOPNOTSUPP;
527	break;
528    }
529    return error;
530}
531
532/*
533 * Handle ioctl commands to obtain information from the cache
534 */
535static int
536X_mrt_ioctl(int cmd, caddr_t data)
537{
538    int error = 0;
539
540    /*
541     * Currently the only function calling this ioctl routine is rtioctl().
542     * Typically, only root can create the raw socket in order to execute
543     * this ioctl method, however the request might be coming from a prison
544     */
545    error = priv_check(curthread, PRIV_NETINET_MROUTE);
546    if (error)
547	return (error);
548    switch (cmd) {
549    case (SIOCGETVIFCNT):
550	error = get_vif_cnt((struct sioc_vif_req *)data);
551	break;
552
553    case (SIOCGETSGCNT):
554	error = get_sg_cnt((struct sioc_sg_req *)data);
555	break;
556
557    default:
558	error = EINVAL;
559	break;
560    }
561    return error;
562}
563
564/*
565 * returns the packet, byte, rpf-failure count for the source group provided
566 */
567static int
568get_sg_cnt(struct sioc_sg_req *req)
569{
570    struct mfc *rt;
571
572    MFC_LOCK();
573    rt = mfc_find(req->src.s_addr, req->grp.s_addr);
574    if (rt == NULL) {
575	MFC_UNLOCK();
576	req->pktcnt = req->bytecnt = req->wrong_if = 0xffffffff;
577	return EADDRNOTAVAIL;
578    }
579    req->pktcnt = rt->mfc_pkt_cnt;
580    req->bytecnt = rt->mfc_byte_cnt;
581    req->wrong_if = rt->mfc_wrong_if;
582    MFC_UNLOCK();
583    return 0;
584}
585
586/*
587 * returns the input and output packet and byte counts on the vif provided
588 */
589static int
590get_vif_cnt(struct sioc_vif_req *req)
591{
592    vifi_t vifi = req->vifi;
593
594    VIF_LOCK();
595    if (vifi >= numvifs) {
596	VIF_UNLOCK();
597	return EINVAL;
598    }
599
600    req->icount = viftable[vifi].v_pkt_in;
601    req->ocount = viftable[vifi].v_pkt_out;
602    req->ibytes = viftable[vifi].v_bytes_in;
603    req->obytes = viftable[vifi].v_bytes_out;
604    VIF_UNLOCK();
605
606    return 0;
607}
608
609static void
610ip_mrouter_reset(void)
611{
612    bzero((caddr_t)mfctable, sizeof(mfctable));
613    bzero((caddr_t)nexpire, sizeof(nexpire));
614
615    pim_assert = 0;
616    mrt_api_config = 0;
617
618    callout_init(&expire_upcalls_ch, NET_CALLOUT_MPSAFE);
619
620    bw_upcalls_n = 0;
621    bzero((caddr_t)bw_meter_timers, sizeof(bw_meter_timers));
622    callout_init(&bw_upcalls_ch, NET_CALLOUT_MPSAFE);
623    callout_init(&bw_meter_ch, NET_CALLOUT_MPSAFE);
624
625    callout_init(&tbf_reprocess_ch, NET_CALLOUT_MPSAFE);
626}
627
628static struct mtx mrouter_mtx;		/* used to synch init/done work */
629
630static void
631if_detached_event(void *arg __unused, struct ifnet *ifp)
632{
633    vifi_t vifi;
634    int i;
635    struct mfc *mfc;
636    struct mfc *nmfc;
637    struct mfc **ppmfc;	/* Pointer to previous node's next-pointer */
638    struct rtdetq *pq;
639    struct rtdetq *npq;
640
641    mtx_lock(&mrouter_mtx);
642    if (ip_mrouter == NULL) {
643	mtx_unlock(&mrouter_mtx);
644    }
645
646    /*
647     * Tear down multicast forwarder state associated with this ifnet.
648     * 1. Walk the vif list, matching vifs against this ifnet.
649     * 2. Walk the multicast forwarding cache (mfc) looking for
650     *    inner matches with this vif's index.
651     * 3. Free any pending mbufs for this mfc.
652     * 4. Free the associated mfc entry and state associated with this vif.
653     *    Be very careful about unlinking from a singly-linked list whose
654     *    "head node" is a pointer in a simple array.
655     * 5. Free vif state. This should disable ALLMULTI on the interface.
656     */
657    VIF_LOCK();
658    MFC_LOCK();
659    for (vifi = 0; vifi < numvifs; vifi++) {
660	if (viftable[vifi].v_ifp != ifp)
661		continue;
662	for (i = 0; i < MFCTBLSIZ; i++) {
663	    ppmfc = &mfctable[i];
664	    for (mfc = mfctable[i]; mfc != NULL; ) {
665		nmfc = mfc->mfc_next;
666		if (mfc->mfc_parent == vifi) {
667		    for (pq = mfc->mfc_stall; pq != NULL; ) {
668			npq = pq->next;
669			m_freem(pq->m);
670			free(pq, M_MRTABLE);
671			pq = npq;
672		    }
673		    free_bw_list(mfc->mfc_bw_meter);
674		    free(mfc, M_MRTABLE);
675		    *ppmfc = nmfc;
676		} else {
677		    ppmfc = &mfc->mfc_next;
678		}
679		mfc = nmfc;
680	    }
681	}
682	del_vif_locked(vifi);
683    }
684    MFC_UNLOCK();
685    VIF_UNLOCK();
686
687    mtx_unlock(&mrouter_mtx);
688}
689
690/*
691 * Enable multicast routing
692 */
693static int
694ip_mrouter_init(struct socket *so, int version)
695{
696    if (mrtdebug)
697	log(LOG_DEBUG, "ip_mrouter_init: so_type = %d, pr_protocol = %d\n",
698	    so->so_type, so->so_proto->pr_protocol);
699
700    if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_IGMP)
701	return EOPNOTSUPP;
702
703    if (version != 1)
704	return ENOPROTOOPT;
705
706    mtx_lock(&mrouter_mtx);
707
708    if (ip_mrouter != NULL) {
709	mtx_unlock(&mrouter_mtx);
710	return EADDRINUSE;
711    }
712
713    if_detach_event_tag = EVENTHANDLER_REGISTER(ifnet_departure_event,
714        if_detached_event, NULL, EVENTHANDLER_PRI_ANY);
715    if (if_detach_event_tag == NULL)
716	return (ENOMEM);
717
718    callout_reset(&expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls, NULL);
719
720    callout_reset(&bw_upcalls_ch, BW_UPCALLS_PERIOD,
721	expire_bw_upcalls_send, NULL);
722    callout_reset(&bw_meter_ch, BW_METER_PERIOD, expire_bw_meter_process, NULL);
723
724    ip_mrouter = so;
725
726    mtx_unlock(&mrouter_mtx);
727
728    if (mrtdebug)
729	log(LOG_DEBUG, "ip_mrouter_init\n");
730
731    return 0;
732}
733
734/*
735 * Disable multicast routing
736 */
737static int
738X_ip_mrouter_done(void)
739{
740    vifi_t vifi;
741    int i;
742    struct ifnet *ifp;
743    struct ifreq ifr;
744    struct mfc *rt;
745    struct rtdetq *rte;
746
747    mtx_lock(&mrouter_mtx);
748
749    if (ip_mrouter == NULL) {
750	mtx_unlock(&mrouter_mtx);
751	return EINVAL;
752    }
753
754    /*
755     * Detach/disable hooks to the reset of the system.
756     */
757    ip_mrouter = NULL;
758    mrt_api_config = 0;
759
760    callout_stop(&tbf_reprocess_ch);
761
762    VIF_LOCK();
763    /*
764     * For each phyint in use, disable promiscuous reception of all IP
765     * multicasts.
766     */
767    for (vifi = 0; vifi < numvifs; vifi++) {
768	if (viftable[vifi].v_lcl_addr.s_addr != 0 &&
769		!(viftable[vifi].v_flags & (VIFF_TUNNEL | VIFF_REGISTER))) {
770	    struct sockaddr_in *so = (struct sockaddr_in *)&(ifr.ifr_addr);
771
772	    so->sin_len = sizeof(struct sockaddr_in);
773	    so->sin_family = AF_INET;
774	    so->sin_addr.s_addr = INADDR_ANY;
775	    ifp = viftable[vifi].v_ifp;
776	    if_allmulti(ifp, 0);
777	}
778    }
779    bzero((caddr_t)tbftable, sizeof(tbftable));
780    bzero((caddr_t)viftable, sizeof(viftable));
781    numvifs = 0;
782    pim_assert = 0;
783    VIF_UNLOCK();
784    EVENTHANDLER_DEREGISTER(ifnet_departure_event, if_detach_event_tag);
785
786    /*
787     * Free all multicast forwarding cache entries.
788     */
789    callout_stop(&expire_upcalls_ch);
790    callout_stop(&bw_upcalls_ch);
791    callout_stop(&bw_meter_ch);
792
793    MFC_LOCK();
794    for (i = 0; i < MFCTBLSIZ; i++) {
795	for (rt = mfctable[i]; rt != NULL; ) {
796	    struct mfc *nr = rt->mfc_next;
797
798	    for (rte = rt->mfc_stall; rte != NULL; ) {
799		struct rtdetq *n = rte->next;
800
801		m_freem(rte->m);
802		free(rte, M_MRTABLE);
803		rte = n;
804	    }
805	    free_bw_list(rt->mfc_bw_meter);
806	    free(rt, M_MRTABLE);
807	    rt = nr;
808	}
809    }
810    bzero((caddr_t)mfctable, sizeof(mfctable));
811    bzero((caddr_t)nexpire, sizeof(nexpire));
812    bw_upcalls_n = 0;
813    bzero(bw_meter_timers, sizeof(bw_meter_timers));
814    MFC_UNLOCK();
815
816#ifdef PIM
817    reg_vif_num = VIFI_INVALID;
818#endif
819
820    mtx_unlock(&mrouter_mtx);
821
822    if (mrtdebug)
823	log(LOG_DEBUG, "ip_mrouter_done\n");
824
825    return 0;
826}
827
828/*
829 * Set PIM assert processing global
830 */
831static int
832set_assert(int i)
833{
834    if ((i != 1) && (i != 0))
835	return EINVAL;
836
837    pim_assert = i;
838
839    return 0;
840}
841
842/*
843 * Configure API capabilities
844 */
845int
846set_api_config(uint32_t *apival)
847{
848    int i;
849
850    /*
851     * We can set the API capabilities only if it is the first operation
852     * after MRT_INIT. I.e.:
853     *  - there are no vifs installed
854     *  - pim_assert is not enabled
855     *  - the MFC table is empty
856     */
857    if (numvifs > 0) {
858	*apival = 0;
859	return EPERM;
860    }
861    if (pim_assert) {
862	*apival = 0;
863	return EPERM;
864    }
865    for (i = 0; i < MFCTBLSIZ; i++) {
866	if (mfctable[i] != NULL) {
867	    *apival = 0;
868	    return EPERM;
869	}
870    }
871
872    mrt_api_config = *apival & mrt_api_support;
873    *apival = mrt_api_config;
874
875    return 0;
876}
877
878/*
879 * Add a vif to the vif table
880 */
881static int
882add_vif(struct vifctl *vifcp)
883{
884    struct vif *vifp = viftable + vifcp->vifc_vifi;
885    struct sockaddr_in sin = {sizeof sin, AF_INET};
886    struct ifaddr *ifa;
887    struct ifnet *ifp;
888    int error;
889    struct tbf *v_tbf = tbftable + vifcp->vifc_vifi;
890
891    VIF_LOCK();
892    if (vifcp->vifc_vifi >= MAXVIFS) {
893	VIF_UNLOCK();
894	return EINVAL;
895    }
896    if (vifp->v_lcl_addr.s_addr != INADDR_ANY) {
897	VIF_UNLOCK();
898	return EADDRINUSE;
899    }
900    if (vifcp->vifc_lcl_addr.s_addr == INADDR_ANY) {
901	VIF_UNLOCK();
902	return EADDRNOTAVAIL;
903    }
904
905    /* Find the interface with an address in AF_INET family */
906#ifdef PIM
907    if (vifcp->vifc_flags & VIFF_REGISTER) {
908	/*
909	 * XXX: Because VIFF_REGISTER does not really need a valid
910	 * local interface (e.g. it could be 127.0.0.2), we don't
911	 * check its address.
912	 */
913	ifp = NULL;
914    } else
915#endif
916    {
917	sin.sin_addr = vifcp->vifc_lcl_addr;
918	ifa = ifa_ifwithaddr((struct sockaddr *)&sin);
919	if (ifa == NULL) {
920	    VIF_UNLOCK();
921	    return EADDRNOTAVAIL;
922	}
923	ifp = ifa->ifa_ifp;
924    }
925
926    if ((vifcp->vifc_flags & VIFF_TUNNEL) != 0) {
927	log(LOG_ERR, "tunnels are no longer supported\n");
928	VIF_UNLOCK();
929	return EOPNOTSUPP;
930#ifdef PIM
931    } else if (vifcp->vifc_flags & VIFF_REGISTER) {
932	ifp = &multicast_register_if;
933	if (mrtdebug)
934	    log(LOG_DEBUG, "Adding a register vif, ifp: %p\n",
935		    (void *)&multicast_register_if);
936	if (reg_vif_num == VIFI_INVALID) {
937	    if_initname(&multicast_register_if, "register_vif", 0);
938	    multicast_register_if.if_flags = IFF_LOOPBACK;
939	    bzero(&vifp->v_route, sizeof(vifp->v_route));
940	    reg_vif_num = vifcp->vifc_vifi;
941	}
942#endif
943    } else {		/* Make sure the interface supports multicast */
944	if ((ifp->if_flags & IFF_MULTICAST) == 0) {
945	    VIF_UNLOCK();
946	    return EOPNOTSUPP;
947	}
948
949	/* Enable promiscuous reception of all IP multicasts from the if */
950	error = if_allmulti(ifp, 1);
951	if (error) {
952	    VIF_UNLOCK();
953	    return error;
954	}
955    }
956
957    /* define parameters for the tbf structure */
958    vifp->v_tbf = v_tbf;
959    GET_TIME(vifp->v_tbf->tbf_last_pkt_t);
960    vifp->v_tbf->tbf_n_tok = 0;
961    vifp->v_tbf->tbf_q_len = 0;
962    vifp->v_tbf->tbf_max_q_len = MAXQSIZE;
963    vifp->v_tbf->tbf_q = vifp->v_tbf->tbf_t = NULL;
964
965    vifp->v_flags     = vifcp->vifc_flags;
966    vifp->v_threshold = vifcp->vifc_threshold;
967    vifp->v_lcl_addr  = vifcp->vifc_lcl_addr;
968    vifp->v_rmt_addr  = vifcp->vifc_rmt_addr;
969    vifp->v_ifp       = ifp;
970    /* scaling up here allows division by 1024 in critical code */
971    vifp->v_rate_limit= vifcp->vifc_rate_limit * 1024 / 1000;
972    vifp->v_rsvp_on   = 0;
973    vifp->v_rsvpd     = NULL;
974    /* initialize per vif pkt counters */
975    vifp->v_pkt_in    = 0;
976    vifp->v_pkt_out   = 0;
977    vifp->v_bytes_in  = 0;
978    vifp->v_bytes_out = 0;
979
980    /* Adjust numvifs up if the vifi is higher than numvifs */
981    if (numvifs <= vifcp->vifc_vifi) numvifs = vifcp->vifc_vifi + 1;
982
983    VIF_UNLOCK();
984
985    if (mrtdebug)
986	log(LOG_DEBUG, "add_vif #%d, lcladdr %lx, %s %lx, thresh %x, rate %d\n",
987	    vifcp->vifc_vifi,
988	    (u_long)ntohl(vifcp->vifc_lcl_addr.s_addr),
989	    (vifcp->vifc_flags & VIFF_TUNNEL) ? "rmtaddr" : "mask",
990	    (u_long)ntohl(vifcp->vifc_rmt_addr.s_addr),
991	    vifcp->vifc_threshold,
992	    vifcp->vifc_rate_limit);
993
994    return 0;
995}
996
997/*
998 * Delete a vif from the vif table
999 */
1000static int
1001del_vif_locked(vifi_t vifi)
1002{
1003    struct vif *vifp;
1004
1005    VIF_LOCK_ASSERT();
1006
1007    if (vifi >= numvifs) {
1008	return EINVAL;
1009    }
1010    vifp = &viftable[vifi];
1011    if (vifp->v_lcl_addr.s_addr == INADDR_ANY) {
1012	return EADDRNOTAVAIL;
1013    }
1014
1015    if (!(vifp->v_flags & (VIFF_TUNNEL | VIFF_REGISTER)))
1016	if_allmulti(vifp->v_ifp, 0);
1017
1018    /*
1019     * Free packets queued at the interface
1020     */
1021    while (vifp->v_tbf->tbf_q) {
1022	struct mbuf *m = vifp->v_tbf->tbf_q;
1023
1024	vifp->v_tbf->tbf_q = m->m_act;
1025	m_freem(m);
1026    }
1027
1028#ifdef PIM
1029    if (vifp->v_flags & VIFF_REGISTER)
1030	reg_vif_num = VIFI_INVALID;
1031#endif
1032
1033    bzero((caddr_t)vifp->v_tbf, sizeof(*(vifp->v_tbf)));
1034    bzero((caddr_t)vifp, sizeof (*vifp));
1035
1036    if (mrtdebug)
1037	log(LOG_DEBUG, "del_vif %d, numvifs %d\n", vifi, numvifs);
1038
1039    /* Adjust numvifs down */
1040    for (vifi = numvifs; vifi > 0; vifi--)
1041	if (viftable[vifi-1].v_lcl_addr.s_addr != INADDR_ANY)
1042	    break;
1043    numvifs = vifi;
1044
1045    return 0;
1046}
1047
1048static int
1049del_vif(vifi_t vifi)
1050{
1051    int cc;
1052
1053    VIF_LOCK();
1054    cc = del_vif_locked(vifi);
1055    VIF_UNLOCK();
1056
1057    return cc;
1058}
1059
1060/*
1061 * update an mfc entry without resetting counters and S,G addresses.
1062 */
1063static void
1064update_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp)
1065{
1066    int i;
1067
1068    rt->mfc_parent = mfccp->mfcc_parent;
1069    for (i = 0; i < numvifs; i++) {
1070	rt->mfc_ttls[i] = mfccp->mfcc_ttls[i];
1071	rt->mfc_flags[i] = mfccp->mfcc_flags[i] & mrt_api_config &
1072	    MRT_MFC_FLAGS_ALL;
1073    }
1074    /* set the RP address */
1075    if (mrt_api_config & MRT_MFC_RP)
1076	rt->mfc_rp = mfccp->mfcc_rp;
1077    else
1078	rt->mfc_rp.s_addr = INADDR_ANY;
1079}
1080
1081/*
1082 * fully initialize an mfc entry from the parameter.
1083 */
1084static void
1085init_mfc_params(struct mfc *rt, struct mfcctl2 *mfccp)
1086{
1087    rt->mfc_origin     = mfccp->mfcc_origin;
1088    rt->mfc_mcastgrp   = mfccp->mfcc_mcastgrp;
1089
1090    update_mfc_params(rt, mfccp);
1091
1092    /* initialize pkt counters per src-grp */
1093    rt->mfc_pkt_cnt    = 0;
1094    rt->mfc_byte_cnt   = 0;
1095    rt->mfc_wrong_if   = 0;
1096    rt->mfc_last_assert.tv_sec = rt->mfc_last_assert.tv_usec = 0;
1097}
1098
1099
1100/*
1101 * Add an mfc entry
1102 */
1103static int
1104add_mfc(struct mfcctl2 *mfccp)
1105{
1106    struct mfc *rt;
1107    u_long hash;
1108    struct rtdetq *rte;
1109    u_short nstl;
1110
1111    VIF_LOCK();
1112    MFC_LOCK();
1113
1114    rt = mfc_find(mfccp->mfcc_origin.s_addr, mfccp->mfcc_mcastgrp.s_addr);
1115
1116    /* If an entry already exists, just update the fields */
1117    if (rt) {
1118	if (mrtdebug & DEBUG_MFC)
1119	    log(LOG_DEBUG,"add_mfc update o %lx g %lx p %x\n",
1120		(u_long)ntohl(mfccp->mfcc_origin.s_addr),
1121		(u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr),
1122		mfccp->mfcc_parent);
1123
1124	update_mfc_params(rt, mfccp);
1125	MFC_UNLOCK();
1126	VIF_UNLOCK();
1127	return 0;
1128    }
1129
1130    /*
1131     * Find the entry for which the upcall was made and update
1132     */
1133    hash = MFCHASH(mfccp->mfcc_origin.s_addr, mfccp->mfcc_mcastgrp.s_addr);
1134    for (rt = mfctable[hash], nstl = 0; rt; rt = rt->mfc_next) {
1135
1136	if ((rt->mfc_origin.s_addr == mfccp->mfcc_origin.s_addr) &&
1137		(rt->mfc_mcastgrp.s_addr == mfccp->mfcc_mcastgrp.s_addr) &&
1138		(rt->mfc_stall != NULL)) {
1139
1140	    if (nstl++)
1141		log(LOG_ERR, "add_mfc %s o %lx g %lx p %x dbx %p\n",
1142		    "multiple kernel entries",
1143		    (u_long)ntohl(mfccp->mfcc_origin.s_addr),
1144		    (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr),
1145		    mfccp->mfcc_parent, (void *)rt->mfc_stall);
1146
1147	    if (mrtdebug & DEBUG_MFC)
1148		log(LOG_DEBUG,"add_mfc o %lx g %lx p %x dbg %p\n",
1149		    (u_long)ntohl(mfccp->mfcc_origin.s_addr),
1150		    (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr),
1151		    mfccp->mfcc_parent, (void *)rt->mfc_stall);
1152
1153	    init_mfc_params(rt, mfccp);
1154
1155	    rt->mfc_expire = 0;	/* Don't clean this guy up */
1156	    nexpire[hash]--;
1157
1158	    /* free packets Qed at the end of this entry */
1159	    for (rte = rt->mfc_stall; rte != NULL; ) {
1160		struct rtdetq *n = rte->next;
1161
1162		ip_mdq(rte->m, rte->ifp, rt, -1);
1163		m_freem(rte->m);
1164		free(rte, M_MRTABLE);
1165		rte = n;
1166	    }
1167	    rt->mfc_stall = NULL;
1168	}
1169    }
1170
1171    /*
1172     * It is possible that an entry is being inserted without an upcall
1173     */
1174    if (nstl == 0) {
1175	if (mrtdebug & DEBUG_MFC)
1176	    log(LOG_DEBUG,"add_mfc no upcall h %lu o %lx g %lx p %x\n",
1177		hash, (u_long)ntohl(mfccp->mfcc_origin.s_addr),
1178		(u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr),
1179		mfccp->mfcc_parent);
1180
1181	for (rt = mfctable[hash]; rt != NULL; rt = rt->mfc_next) {
1182	    if ((rt->mfc_origin.s_addr == mfccp->mfcc_origin.s_addr) &&
1183		    (rt->mfc_mcastgrp.s_addr == mfccp->mfcc_mcastgrp.s_addr)) {
1184		init_mfc_params(rt, mfccp);
1185		if (rt->mfc_expire)
1186		    nexpire[hash]--;
1187		rt->mfc_expire = 0;
1188		break; /* XXX */
1189	    }
1190	}
1191	if (rt == NULL) {		/* no upcall, so make a new entry */
1192	    rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT);
1193	    if (rt == NULL) {
1194		MFC_UNLOCK();
1195		VIF_UNLOCK();
1196		return ENOBUFS;
1197	    }
1198
1199	    init_mfc_params(rt, mfccp);
1200	    rt->mfc_expire     = 0;
1201	    rt->mfc_stall      = NULL;
1202
1203	    rt->mfc_bw_meter = NULL;
1204	    /* insert new entry at head of hash chain */
1205	    rt->mfc_next = mfctable[hash];
1206	    mfctable[hash] = rt;
1207	}
1208    }
1209    MFC_UNLOCK();
1210    VIF_UNLOCK();
1211    return 0;
1212}
1213
1214/*
1215 * Delete an mfc entry
1216 */
1217static int
1218del_mfc(struct mfcctl2 *mfccp)
1219{
1220    struct in_addr	origin;
1221    struct in_addr	mcastgrp;
1222    struct mfc		*rt;
1223    struct mfc		**nptr;
1224    u_long		hash;
1225    struct bw_meter	*list;
1226
1227    origin = mfccp->mfcc_origin;
1228    mcastgrp = mfccp->mfcc_mcastgrp;
1229
1230    if (mrtdebug & DEBUG_MFC)
1231	log(LOG_DEBUG,"del_mfc orig %lx mcastgrp %lx\n",
1232	    (u_long)ntohl(origin.s_addr), (u_long)ntohl(mcastgrp.s_addr));
1233
1234    MFC_LOCK();
1235
1236    hash = MFCHASH(origin.s_addr, mcastgrp.s_addr);
1237    for (nptr = &mfctable[hash]; (rt = *nptr) != NULL; nptr = &rt->mfc_next)
1238	if (origin.s_addr == rt->mfc_origin.s_addr &&
1239		mcastgrp.s_addr == rt->mfc_mcastgrp.s_addr &&
1240		rt->mfc_stall == NULL)
1241	    break;
1242    if (rt == NULL) {
1243	MFC_UNLOCK();
1244	return EADDRNOTAVAIL;
1245    }
1246
1247    *nptr = rt->mfc_next;
1248
1249    /*
1250     * free the bw_meter entries
1251     */
1252    list = rt->mfc_bw_meter;
1253    rt->mfc_bw_meter = NULL;
1254
1255    free(rt, M_MRTABLE);
1256
1257    free_bw_list(list);
1258
1259    MFC_UNLOCK();
1260
1261    return 0;
1262}
1263
1264/*
1265 * Send a message to the routing daemon on the multicast routing socket
1266 */
1267static int
1268socket_send(struct socket *s, struct mbuf *mm, struct sockaddr_in *src)
1269{
1270    if (s) {
1271	SOCKBUF_LOCK(&s->so_rcv);
1272	if (sbappendaddr_locked(&s->so_rcv, (struct sockaddr *)src, mm,
1273	    NULL) != 0) {
1274	    sorwakeup_locked(s);
1275	    return 0;
1276	}
1277	SOCKBUF_UNLOCK(&s->so_rcv);
1278    }
1279    m_freem(mm);
1280    return -1;
1281}
1282
1283/*
1284 * IP multicast forwarding function. This function assumes that the packet
1285 * pointed to by "ip" has arrived on (or is about to be sent to) the interface
1286 * pointed to by "ifp", and the packet is to be relayed to other networks
1287 * that have members of the packet's destination IP multicast group.
1288 *
1289 * The packet is returned unscathed to the caller, unless it is
1290 * erroneous, in which case a non-zero return value tells the caller to
1291 * discard it.
1292 */
1293
1294#define TUNNEL_LEN  12  /* # bytes of IP option for tunnel encapsulation  */
1295
1296static int
1297X_ip_mforward(struct ip *ip, struct ifnet *ifp, struct mbuf *m,
1298    struct ip_moptions *imo)
1299{
1300    struct mfc *rt;
1301    int error;
1302    vifi_t vifi;
1303
1304    if (mrtdebug & DEBUG_FORWARD)
1305	log(LOG_DEBUG, "ip_mforward: src %lx, dst %lx, ifp %p\n",
1306	    (u_long)ntohl(ip->ip_src.s_addr), (u_long)ntohl(ip->ip_dst.s_addr),
1307	    (void *)ifp);
1308
1309    if (ip->ip_hl < (sizeof(struct ip) + TUNNEL_LEN) >> 2 ||
1310		((u_char *)(ip + 1))[1] != IPOPT_LSRR ) {
1311	/*
1312	 * Packet arrived via a physical interface or
1313	 * an encapsulated tunnel or a register_vif.
1314	 */
1315    } else {
1316	/*
1317	 * Packet arrived through a source-route tunnel.
1318	 * Source-route tunnels are no longer supported.
1319	 */
1320	static int last_log;
1321	if (last_log != time_uptime) {
1322	    last_log = time_uptime;
1323	    log(LOG_ERR,
1324		"ip_mforward: received source-routed packet from %lx\n",
1325		(u_long)ntohl(ip->ip_src.s_addr));
1326	}
1327	return 1;
1328    }
1329
1330    VIF_LOCK();
1331    MFC_LOCK();
1332    if (imo && ((vifi = imo->imo_multicast_vif) < numvifs)) {
1333	if (ip->ip_ttl < 255)
1334	    ip->ip_ttl++;	/* compensate for -1 in *_send routines */
1335	if (rsvpdebug && ip->ip_p == IPPROTO_RSVP) {
1336	    struct vif *vifp = viftable + vifi;
1337
1338	    printf("Sending IPPROTO_RSVP from %lx to %lx on vif %d (%s%s)\n",
1339		(long)ntohl(ip->ip_src.s_addr), (long)ntohl(ip->ip_dst.s_addr),
1340		vifi,
1341		(vifp->v_flags & VIFF_TUNNEL) ? "tunnel on " : "",
1342		vifp->v_ifp->if_xname);
1343	}
1344	error = ip_mdq(m, ifp, NULL, vifi);
1345	MFC_UNLOCK();
1346	VIF_UNLOCK();
1347	return error;
1348    }
1349    if (rsvpdebug && ip->ip_p == IPPROTO_RSVP) {
1350	printf("Warning: IPPROTO_RSVP from %lx to %lx without vif option\n",
1351	    (long)ntohl(ip->ip_src.s_addr), (long)ntohl(ip->ip_dst.s_addr));
1352	if (!imo)
1353	    printf("In fact, no options were specified at all\n");
1354    }
1355
1356    /*
1357     * Don't forward a packet with time-to-live of zero or one,
1358     * or a packet destined to a local-only group.
1359     */
1360    if (ip->ip_ttl <= 1 || ntohl(ip->ip_dst.s_addr) <= INADDR_MAX_LOCAL_GROUP) {
1361	MFC_UNLOCK();
1362	VIF_UNLOCK();
1363	return 0;
1364    }
1365
1366    /*
1367     * Determine forwarding vifs from the forwarding cache table
1368     */
1369    ++mrtstat.mrts_mfc_lookups;
1370    rt = mfc_find(ip->ip_src.s_addr, ip->ip_dst.s_addr);
1371
1372    /* Entry exists, so forward if necessary */
1373    if (rt != NULL) {
1374	error = ip_mdq(m, ifp, rt, -1);
1375	MFC_UNLOCK();
1376	VIF_UNLOCK();
1377	return error;
1378    } else {
1379	/*
1380	 * If we don't have a route for packet's origin,
1381	 * Make a copy of the packet & send message to routing daemon
1382	 */
1383
1384	struct mbuf *mb0;
1385	struct rtdetq *rte;
1386	u_long hash;
1387	int hlen = ip->ip_hl << 2;
1388
1389	++mrtstat.mrts_mfc_misses;
1390
1391	mrtstat.mrts_no_route++;
1392	if (mrtdebug & (DEBUG_FORWARD | DEBUG_MFC))
1393	    log(LOG_DEBUG, "ip_mforward: no rte s %lx g %lx\n",
1394		(u_long)ntohl(ip->ip_src.s_addr),
1395		(u_long)ntohl(ip->ip_dst.s_addr));
1396
1397	/*
1398	 * Allocate mbufs early so that we don't do extra work if we are
1399	 * just going to fail anyway.  Make sure to pullup the header so
1400	 * that other people can't step on it.
1401	 */
1402	rte = (struct rtdetq *)malloc((sizeof *rte), M_MRTABLE, M_NOWAIT);
1403	if (rte == NULL) {
1404	    MFC_UNLOCK();
1405	    VIF_UNLOCK();
1406	    return ENOBUFS;
1407	}
1408	mb0 = m_copypacket(m, M_DONTWAIT);
1409	if (mb0 && (M_HASCL(mb0) || mb0->m_len < hlen))
1410	    mb0 = m_pullup(mb0, hlen);
1411	if (mb0 == NULL) {
1412	    free(rte, M_MRTABLE);
1413	    MFC_UNLOCK();
1414	    VIF_UNLOCK();
1415	    return ENOBUFS;
1416	}
1417
1418	/* is there an upcall waiting for this flow ? */
1419	hash = MFCHASH(ip->ip_src.s_addr, ip->ip_dst.s_addr);
1420	for (rt = mfctable[hash]; rt; rt = rt->mfc_next) {
1421	    if ((ip->ip_src.s_addr == rt->mfc_origin.s_addr) &&
1422		    (ip->ip_dst.s_addr == rt->mfc_mcastgrp.s_addr) &&
1423		    (rt->mfc_stall != NULL))
1424		break;
1425	}
1426
1427	if (rt == NULL) {
1428	    int i;
1429	    struct igmpmsg *im;
1430	    struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
1431	    struct mbuf *mm;
1432
1433	    /*
1434	     * Locate the vifi for the incoming interface for this packet.
1435	     * If none found, drop packet.
1436	     */
1437	    for (vifi=0; vifi < numvifs && viftable[vifi].v_ifp != ifp; vifi++)
1438		;
1439	    if (vifi >= numvifs)	/* vif not found, drop packet */
1440		goto non_fatal;
1441
1442	    /* no upcall, so make a new entry */
1443	    rt = (struct mfc *)malloc(sizeof(*rt), M_MRTABLE, M_NOWAIT);
1444	    if (rt == NULL)
1445		goto fail;
1446	    /* Make a copy of the header to send to the user level process */
1447	    mm = m_copy(mb0, 0, hlen);
1448	    if (mm == NULL)
1449		goto fail1;
1450
1451	    /*
1452	     * Send message to routing daemon to install
1453	     * a route into the kernel table
1454	     */
1455
1456	    im = mtod(mm, struct igmpmsg *);
1457	    im->im_msgtype = IGMPMSG_NOCACHE;
1458	    im->im_mbz = 0;
1459	    im->im_vif = vifi;
1460
1461	    mrtstat.mrts_upcalls++;
1462
1463	    k_igmpsrc.sin_addr = ip->ip_src;
1464	    if (socket_send(ip_mrouter, mm, &k_igmpsrc) < 0) {
1465		log(LOG_WARNING, "ip_mforward: ip_mrouter socket queue full\n");
1466		++mrtstat.mrts_upq_sockfull;
1467fail1:
1468		free(rt, M_MRTABLE);
1469fail:
1470		free(rte, M_MRTABLE);
1471		m_freem(mb0);
1472		MFC_UNLOCK();
1473		VIF_UNLOCK();
1474		return ENOBUFS;
1475	    }
1476
1477	    /* insert new entry at head of hash chain */
1478	    rt->mfc_origin.s_addr     = ip->ip_src.s_addr;
1479	    rt->mfc_mcastgrp.s_addr   = ip->ip_dst.s_addr;
1480	    rt->mfc_expire	      = UPCALL_EXPIRE;
1481	    nexpire[hash]++;
1482	    for (i = 0; i < numvifs; i++) {
1483		rt->mfc_ttls[i] = 0;
1484		rt->mfc_flags[i] = 0;
1485	    }
1486	    rt->mfc_parent = -1;
1487
1488	    rt->mfc_rp.s_addr = INADDR_ANY; /* clear the RP address */
1489
1490	    rt->mfc_bw_meter = NULL;
1491
1492	    /* link into table */
1493	    rt->mfc_next   = mfctable[hash];
1494	    mfctable[hash] = rt;
1495	    rt->mfc_stall = rte;
1496
1497	} else {
1498	    /* determine if q has overflowed */
1499	    int npkts = 0;
1500	    struct rtdetq **p;
1501
1502	    /*
1503	     * XXX ouch! we need to append to the list, but we
1504	     * only have a pointer to the front, so we have to
1505	     * scan the entire list every time.
1506	     */
1507	    for (p = &rt->mfc_stall; *p != NULL; p = &(*p)->next)
1508		npkts++;
1509
1510	    if (npkts > MAX_UPQ) {
1511		mrtstat.mrts_upq_ovflw++;
1512non_fatal:
1513		free(rte, M_MRTABLE);
1514		m_freem(mb0);
1515		MFC_UNLOCK();
1516		VIF_UNLOCK();
1517		return 0;
1518	    }
1519
1520	    /* Add this entry to the end of the queue */
1521	    *p = rte;
1522	}
1523
1524	rte->m			= mb0;
1525	rte->ifp		= ifp;
1526	rte->next		= NULL;
1527
1528	MFC_UNLOCK();
1529	VIF_UNLOCK();
1530
1531	return 0;
1532    }
1533}
1534
1535/*
1536 * Clean up the cache entry if upcall is not serviced
1537 */
1538static void
1539expire_upcalls(void *unused)
1540{
1541    struct rtdetq *rte;
1542    struct mfc *mfc, **nptr;
1543    int i;
1544
1545    MFC_LOCK();
1546    for (i = 0; i < MFCTBLSIZ; i++) {
1547	if (nexpire[i] == 0)
1548	    continue;
1549	nptr = &mfctable[i];
1550	for (mfc = *nptr; mfc != NULL; mfc = *nptr) {
1551	    /*
1552	     * Skip real cache entries
1553	     * Make sure it wasn't marked to not expire (shouldn't happen)
1554	     * If it expires now
1555	     */
1556	    if (mfc->mfc_stall != NULL && mfc->mfc_expire != 0 &&
1557		    --mfc->mfc_expire == 0) {
1558		if (mrtdebug & DEBUG_EXPIRE)
1559		    log(LOG_DEBUG, "expire_upcalls: expiring (%lx %lx)\n",
1560			(u_long)ntohl(mfc->mfc_origin.s_addr),
1561			(u_long)ntohl(mfc->mfc_mcastgrp.s_addr));
1562		/*
1563		 * drop all the packets
1564		 * free the mbuf with the pkt, if, timing info
1565		 */
1566		for (rte = mfc->mfc_stall; rte; ) {
1567		    struct rtdetq *n = rte->next;
1568
1569		    m_freem(rte->m);
1570		    free(rte, M_MRTABLE);
1571		    rte = n;
1572		}
1573		++mrtstat.mrts_cache_cleanups;
1574		nexpire[i]--;
1575
1576		/*
1577		 * free the bw_meter entries
1578		 */
1579		while (mfc->mfc_bw_meter != NULL) {
1580		    struct bw_meter *x = mfc->mfc_bw_meter;
1581
1582		    mfc->mfc_bw_meter = x->bm_mfc_next;
1583		    free(x, M_BWMETER);
1584		}
1585
1586		*nptr = mfc->mfc_next;
1587		free(mfc, M_MRTABLE);
1588	    } else {
1589		nptr = &mfc->mfc_next;
1590	    }
1591	}
1592    }
1593    MFC_UNLOCK();
1594
1595    callout_reset(&expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls, NULL);
1596}
1597
1598/*
1599 * Packet forwarding routine once entry in the cache is made
1600 */
1601static int
1602ip_mdq(struct mbuf *m, struct ifnet *ifp, struct mfc *rt, vifi_t xmt_vif)
1603{
1604    struct ip  *ip = mtod(m, struct ip *);
1605    vifi_t vifi;
1606    int plen = ip->ip_len;
1607
1608    VIF_LOCK_ASSERT();
1609/*
1610 * Macro to send packet on vif.  Since RSVP packets don't get counted on
1611 * input, they shouldn't get counted on output, so statistics keeping is
1612 * separate.
1613 */
1614#define MC_SEND(ip,vifp,m) {				\
1615		if (((vifp)->v_flags & VIFF_TUNNEL) == 0)	\
1616		    phyint_send((ip), (vifp), (m));	\
1617}
1618
1619    /*
1620     * If xmt_vif is not -1, send on only the requested vif.
1621     *
1622     * (since vifi_t is u_short, -1 becomes MAXUSHORT, which > numvifs.)
1623     */
1624    if (xmt_vif < numvifs) {
1625#ifdef PIM
1626	if (viftable[xmt_vif].v_flags & VIFF_REGISTER)
1627	    pim_register_send(ip, viftable + xmt_vif, m, rt);
1628	else
1629#endif
1630	MC_SEND(ip, viftable + xmt_vif, m);
1631	return 1;
1632    }
1633
1634    /*
1635     * Don't forward if it didn't arrive from the parent vif for its origin.
1636     */
1637    vifi = rt->mfc_parent;
1638    if ((vifi >= numvifs) || (viftable[vifi].v_ifp != ifp)) {
1639	/* came in the wrong interface */
1640	if (mrtdebug & DEBUG_FORWARD)
1641	    log(LOG_DEBUG, "wrong if: ifp %p vifi %d vififp %p\n",
1642		(void *)ifp, vifi, (void *)viftable[vifi].v_ifp);
1643	++mrtstat.mrts_wrong_if;
1644	++rt->mfc_wrong_if;
1645	/*
1646	 * If we are doing PIM assert processing, send a message
1647	 * to the routing daemon.
1648	 *
1649	 * XXX: A PIM-SM router needs the WRONGVIF detection so it
1650	 * can complete the SPT switch, regardless of the type
1651	 * of the iif (broadcast media, GRE tunnel, etc).
1652	 */
1653	if (pim_assert && (vifi < numvifs) && viftable[vifi].v_ifp) {
1654	    struct timeval now;
1655	    u_long delta;
1656
1657#ifdef PIM
1658	    if (ifp == &multicast_register_if)
1659		pimstat.pims_rcv_registers_wrongiif++;
1660#endif
1661
1662	    /* Get vifi for the incoming packet */
1663	    for (vifi=0; vifi < numvifs && viftable[vifi].v_ifp != ifp; vifi++)
1664		;
1665	    if (vifi >= numvifs)
1666		return 0;	/* The iif is not found: ignore the packet. */
1667
1668	    if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_DISABLE_WRONGVIF)
1669		return 0;	/* WRONGVIF disabled: ignore the packet */
1670
1671	    GET_TIME(now);
1672
1673	    TV_DELTA(now, rt->mfc_last_assert, delta);
1674
1675	    if (delta > ASSERT_MSG_TIME) {
1676		struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
1677		struct igmpmsg *im;
1678		int hlen = ip->ip_hl << 2;
1679		struct mbuf *mm = m_copy(m, 0, hlen);
1680
1681		if (mm && (M_HASCL(mm) || mm->m_len < hlen))
1682		    mm = m_pullup(mm, hlen);
1683		if (mm == NULL)
1684		    return ENOBUFS;
1685
1686		rt->mfc_last_assert = now;
1687
1688		im = mtod(mm, struct igmpmsg *);
1689		im->im_msgtype	= IGMPMSG_WRONGVIF;
1690		im->im_mbz		= 0;
1691		im->im_vif		= vifi;
1692
1693		mrtstat.mrts_upcalls++;
1694
1695		k_igmpsrc.sin_addr = im->im_src;
1696		if (socket_send(ip_mrouter, mm, &k_igmpsrc) < 0) {
1697		    log(LOG_WARNING,
1698			"ip_mforward: ip_mrouter socket queue full\n");
1699		    ++mrtstat.mrts_upq_sockfull;
1700		    return ENOBUFS;
1701		}
1702	    }
1703	}
1704	return 0;
1705    }
1706
1707    /* If I sourced this packet, it counts as output, else it was input. */
1708    if (ip->ip_src.s_addr == viftable[vifi].v_lcl_addr.s_addr) {
1709	viftable[vifi].v_pkt_out++;
1710	viftable[vifi].v_bytes_out += plen;
1711    } else {
1712	viftable[vifi].v_pkt_in++;
1713	viftable[vifi].v_bytes_in += plen;
1714    }
1715    rt->mfc_pkt_cnt++;
1716    rt->mfc_byte_cnt += plen;
1717
1718    /*
1719     * For each vif, decide if a copy of the packet should be forwarded.
1720     * Forward if:
1721     *		- the ttl exceeds the vif's threshold
1722     *		- there are group members downstream on interface
1723     */
1724    for (vifi = 0; vifi < numvifs; vifi++)
1725	if ((rt->mfc_ttls[vifi] > 0) && (ip->ip_ttl > rt->mfc_ttls[vifi])) {
1726	    viftable[vifi].v_pkt_out++;
1727	    viftable[vifi].v_bytes_out += plen;
1728#ifdef PIM
1729	    if (viftable[vifi].v_flags & VIFF_REGISTER)
1730		pim_register_send(ip, viftable + vifi, m, rt);
1731	    else
1732#endif
1733	    MC_SEND(ip, viftable+vifi, m);
1734	}
1735
1736    /*
1737     * Perform upcall-related bw measuring.
1738     */
1739    if (rt->mfc_bw_meter != NULL) {
1740	struct bw_meter *x;
1741	struct timeval now;
1742
1743	GET_TIME(now);
1744	MFC_LOCK_ASSERT();
1745	for (x = rt->mfc_bw_meter; x != NULL; x = x->bm_mfc_next)
1746	    bw_meter_receive_packet(x, plen, &now);
1747    }
1748
1749    return 0;
1750}
1751
1752/*
1753 * check if a vif number is legal/ok. This is used by ip_output.
1754 */
1755static int
1756X_legal_vif_num(int vif)
1757{
1758    /* XXX unlocked, matter? */
1759    return (vif >= 0 && vif < numvifs);
1760}
1761
1762/*
1763 * Return the local address used by this vif
1764 */
1765static u_long
1766X_ip_mcast_src(int vifi)
1767{
1768    /* XXX unlocked, matter? */
1769    if (vifi >= 0 && vifi < numvifs)
1770	return viftable[vifi].v_lcl_addr.s_addr;
1771    else
1772	return INADDR_ANY;
1773}
1774
1775static void
1776phyint_send(struct ip *ip, struct vif *vifp, struct mbuf *m)
1777{
1778    struct mbuf *mb_copy;
1779    int hlen = ip->ip_hl << 2;
1780
1781    VIF_LOCK_ASSERT();
1782
1783    /*
1784     * Make a new reference to the packet; make sure that
1785     * the IP header is actually copied, not just referenced,
1786     * so that ip_output() only scribbles on the copy.
1787     */
1788    mb_copy = m_copypacket(m, M_DONTWAIT);
1789    if (mb_copy && (M_HASCL(mb_copy) || mb_copy->m_len < hlen))
1790	mb_copy = m_pullup(mb_copy, hlen);
1791    if (mb_copy == NULL)
1792	return;
1793
1794    if (vifp->v_rate_limit == 0)
1795	tbf_send_packet(vifp, mb_copy);
1796    else
1797	tbf_control(vifp, mb_copy, mtod(mb_copy, struct ip *), ip->ip_len);
1798}
1799
1800/*
1801 * Token bucket filter module
1802 */
1803
1804static void
1805tbf_control(struct vif *vifp, struct mbuf *m, struct ip *ip, u_long p_len)
1806{
1807    struct tbf *t = vifp->v_tbf;
1808
1809    VIF_LOCK_ASSERT();
1810
1811    if (p_len > MAX_BKT_SIZE) {		/* drop if packet is too large */
1812	mrtstat.mrts_pkt2large++;
1813	m_freem(m);
1814	return;
1815    }
1816
1817    tbf_update_tokens(vifp);
1818
1819    if (t->tbf_q_len == 0) {		/* queue empty...		*/
1820	if (p_len <= t->tbf_n_tok) {	/* send packet if enough tokens */
1821	    t->tbf_n_tok -= p_len;
1822	    tbf_send_packet(vifp, m);
1823	} else {			/* no, queue packet and try later */
1824	    tbf_queue(vifp, m);
1825	    callout_reset(&tbf_reprocess_ch, TBF_REPROCESS,
1826		tbf_reprocess_q, vifp);
1827	}
1828    } else if (t->tbf_q_len < t->tbf_max_q_len) {
1829	/* finite queue length, so queue pkts and process queue */
1830	tbf_queue(vifp, m);
1831	tbf_process_q(vifp);
1832    } else {
1833	/* queue full, try to dq and queue and process */
1834	if (!tbf_dq_sel(vifp, ip)) {
1835	    mrtstat.mrts_q_overflow++;
1836	    m_freem(m);
1837	} else {
1838	    tbf_queue(vifp, m);
1839	    tbf_process_q(vifp);
1840	}
1841    }
1842}
1843
1844/*
1845 * adds a packet to the queue at the interface
1846 */
1847static void
1848tbf_queue(struct vif *vifp, struct mbuf *m)
1849{
1850    struct tbf *t = vifp->v_tbf;
1851
1852    VIF_LOCK_ASSERT();
1853
1854    if (t->tbf_t == NULL)	/* Queue was empty */
1855	t->tbf_q = m;
1856    else			/* Insert at tail */
1857	t->tbf_t->m_act = m;
1858
1859    t->tbf_t = m;		/* Set new tail pointer */
1860
1861#ifdef DIAGNOSTIC
1862    /* Make sure we didn't get fed a bogus mbuf */
1863    if (m->m_act)
1864	panic("tbf_queue: m_act");
1865#endif
1866    m->m_act = NULL;
1867
1868    t->tbf_q_len++;
1869}
1870
1871/*
1872 * processes the queue at the interface
1873 */
1874static void
1875tbf_process_q(struct vif *vifp)
1876{
1877    struct tbf *t = vifp->v_tbf;
1878
1879    VIF_LOCK_ASSERT();
1880
1881    /* loop through the queue at the interface and send as many packets
1882     * as possible
1883     */
1884    while (t->tbf_q_len > 0) {
1885	struct mbuf *m = t->tbf_q;
1886	int len = mtod(m, struct ip *)->ip_len;
1887
1888	/* determine if the packet can be sent */
1889	if (len > t->tbf_n_tok)	/* not enough tokens, we are done */
1890	    break;
1891	/* ok, reduce no of tokens, dequeue and send the packet. */
1892	t->tbf_n_tok -= len;
1893
1894	t->tbf_q = m->m_act;
1895	if (--t->tbf_q_len == 0)
1896	    t->tbf_t = NULL;
1897
1898	m->m_act = NULL;
1899	tbf_send_packet(vifp, m);
1900    }
1901}
1902
1903static void
1904tbf_reprocess_q(void *xvifp)
1905{
1906    struct vif *vifp = xvifp;
1907
1908    if (ip_mrouter == NULL)
1909	return;
1910    VIF_LOCK();
1911    tbf_update_tokens(vifp);
1912    tbf_process_q(vifp);
1913    if (vifp->v_tbf->tbf_q_len)
1914	callout_reset(&tbf_reprocess_ch, TBF_REPROCESS, tbf_reprocess_q, vifp);
1915    VIF_UNLOCK();
1916}
1917
1918/* function that will selectively discard a member of the queue
1919 * based on the precedence value and the priority
1920 */
1921static int
1922tbf_dq_sel(struct vif *vifp, struct ip *ip)
1923{
1924    u_int p;
1925    struct mbuf *m, *last;
1926    struct mbuf **np;
1927    struct tbf *t = vifp->v_tbf;
1928
1929    VIF_LOCK_ASSERT();
1930
1931    p = priority(vifp, ip);
1932
1933    np = &t->tbf_q;
1934    last = NULL;
1935    while ((m = *np) != NULL) {
1936	if (p > priority(vifp, mtod(m, struct ip *))) {
1937	    *np = m->m_act;
1938	    /* If we're removing the last packet, fix the tail pointer */
1939	    if (m == t->tbf_t)
1940		t->tbf_t = last;
1941	    m_freem(m);
1942	    /* It's impossible for the queue to be empty, but check anyways. */
1943	    if (--t->tbf_q_len == 0)
1944		t->tbf_t = NULL;
1945	    mrtstat.mrts_drop_sel++;
1946	    return 1;
1947	}
1948	np = &m->m_act;
1949	last = m;
1950    }
1951    return 0;
1952}
1953
1954static void
1955tbf_send_packet(struct vif *vifp, struct mbuf *m)
1956{
1957    VIF_LOCK_ASSERT();
1958
1959    if ((vifp->v_flags & VIFF_TUNNEL) == 0) {
1960	struct ip_moptions imo;
1961	struct in_multi *imm[2];
1962	int error;
1963	static struct route ro; /* XXX check this */
1964
1965	imo.imo_multicast_ifp  = vifp->v_ifp;
1966	imo.imo_multicast_ttl  = mtod(m, struct ip *)->ip_ttl - 1;
1967	imo.imo_multicast_loop = 1;
1968	imo.imo_multicast_vif  = -1;
1969	imo.imo_num_memberships = 0;
1970	imo.imo_max_memberships = 2;
1971	imo.imo_membership  = &imm[0];
1972
1973	/*
1974	 * Re-entrancy should not be a problem here, because
1975	 * the packets that we send out and are looped back at us
1976	 * should get rejected because they appear to come from
1977	 * the loopback interface, thus preventing looping.
1978	 */
1979	error = ip_output(m, NULL, &ro, IP_FORWARDING, &imo, NULL);
1980
1981	if (mrtdebug & DEBUG_XMIT)
1982	    log(LOG_DEBUG, "phyint_send on vif %td err %d\n",
1983		vifp - viftable, error);
1984    }
1985}
1986
1987/* determine the current time and then
1988 * the elapsed time (between the last time and time now)
1989 * in milliseconds & update the no. of tokens in the bucket
1990 */
1991static void
1992tbf_update_tokens(struct vif *vifp)
1993{
1994    struct timeval tp;
1995    u_long tm;
1996    struct tbf *t = vifp->v_tbf;
1997
1998    VIF_LOCK_ASSERT();
1999
2000    GET_TIME(tp);
2001
2002    TV_DELTA(tp, t->tbf_last_pkt_t, tm);
2003
2004    /*
2005     * This formula is actually
2006     * "time in seconds" * "bytes/second".
2007     *
2008     * (tm / 1000000) * (v_rate_limit * 1000 * (1000/1024) / 8)
2009     *
2010     * The (1000/1024) was introduced in add_vif to optimize
2011     * this divide into a shift.
2012     */
2013    t->tbf_n_tok += tm * vifp->v_rate_limit / 1024 / 8;
2014    t->tbf_last_pkt_t = tp;
2015
2016    if (t->tbf_n_tok > MAX_BKT_SIZE)
2017	t->tbf_n_tok = MAX_BKT_SIZE;
2018}
2019
2020static int
2021priority(struct vif *vifp, struct ip *ip)
2022{
2023    int prio = 50; /* the lowest priority -- default case */
2024
2025    /* temporary hack; may add general packet classifier some day */
2026
2027    /*
2028     * The UDP port space is divided up into four priority ranges:
2029     * [0, 16384)     : unclassified - lowest priority
2030     * [16384, 32768) : audio - highest priority
2031     * [32768, 49152) : whiteboard - medium priority
2032     * [49152, 65536) : video - low priority
2033     *
2034     * Everything else gets lowest priority.
2035     */
2036    if (ip->ip_p == IPPROTO_UDP) {
2037	struct udphdr *udp = (struct udphdr *)(((char *)ip) + (ip->ip_hl << 2));
2038	switch (ntohs(udp->uh_dport) & 0xc000) {
2039	case 0x4000:
2040	    prio = 70;
2041	    break;
2042	case 0x8000:
2043	    prio = 60;
2044	    break;
2045	case 0xc000:
2046	    prio = 55;
2047	    break;
2048	}
2049    }
2050    return prio;
2051}
2052
2053/*
2054 * End of token bucket filter modifications
2055 */
2056
2057static int
2058X_ip_rsvp_vif(struct socket *so, struct sockopt *sopt)
2059{
2060    int error, vifi;
2061
2062    if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_RSVP)
2063	return EOPNOTSUPP;
2064
2065    error = sooptcopyin(sopt, &vifi, sizeof vifi, sizeof vifi);
2066    if (error)
2067	return error;
2068
2069    VIF_LOCK();
2070
2071    if (vifi < 0 || vifi >= numvifs) {	/* Error if vif is invalid */
2072	VIF_UNLOCK();
2073	return EADDRNOTAVAIL;
2074    }
2075
2076    if (sopt->sopt_name == IP_RSVP_VIF_ON) {
2077	/* Check if socket is available. */
2078	if (viftable[vifi].v_rsvpd != NULL) {
2079	    VIF_UNLOCK();
2080	    return EADDRINUSE;
2081	}
2082
2083	viftable[vifi].v_rsvpd = so;
2084	/* This may seem silly, but we need to be sure we don't over-increment
2085	 * the RSVP counter, in case something slips up.
2086	 */
2087	if (!viftable[vifi].v_rsvp_on) {
2088	    viftable[vifi].v_rsvp_on = 1;
2089	    rsvp_on++;
2090	}
2091    } else { /* must be VIF_OFF */
2092	/*
2093	 * XXX as an additional consistency check, one could make sure
2094	 * that viftable[vifi].v_rsvpd == so, otherwise passing so as
2095	 * first parameter is pretty useless.
2096	 */
2097	viftable[vifi].v_rsvpd = NULL;
2098	/*
2099	 * This may seem silly, but we need to be sure we don't over-decrement
2100	 * the RSVP counter, in case something slips up.
2101	 */
2102	if (viftable[vifi].v_rsvp_on) {
2103	    viftable[vifi].v_rsvp_on = 0;
2104	    rsvp_on--;
2105	}
2106    }
2107    VIF_UNLOCK();
2108    return 0;
2109}
2110
2111static void
2112X_ip_rsvp_force_done(struct socket *so)
2113{
2114    int vifi;
2115
2116    /* Don't bother if it is not the right type of socket. */
2117    if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_RSVP)
2118	return;
2119
2120    VIF_LOCK();
2121
2122    /* The socket may be attached to more than one vif...this
2123     * is perfectly legal.
2124     */
2125    for (vifi = 0; vifi < numvifs; vifi++) {
2126	if (viftable[vifi].v_rsvpd == so) {
2127	    viftable[vifi].v_rsvpd = NULL;
2128	    /* This may seem silly, but we need to be sure we don't
2129	     * over-decrement the RSVP counter, in case something slips up.
2130	     */
2131	    if (viftable[vifi].v_rsvp_on) {
2132		viftable[vifi].v_rsvp_on = 0;
2133		rsvp_on--;
2134	    }
2135	}
2136    }
2137
2138    VIF_UNLOCK();
2139}
2140
2141static void
2142X_rsvp_input(struct mbuf *m, int off)
2143{
2144    int vifi;
2145    struct ip *ip = mtod(m, struct ip *);
2146    struct sockaddr_in rsvp_src = { sizeof rsvp_src, AF_INET };
2147    struct ifnet *ifp;
2148
2149    if (rsvpdebug)
2150	printf("rsvp_input: rsvp_on %d\n",rsvp_on);
2151
2152    /* Can still get packets with rsvp_on = 0 if there is a local member
2153     * of the group to which the RSVP packet is addressed.  But in this
2154     * case we want to throw the packet away.
2155     */
2156    if (!rsvp_on) {
2157	m_freem(m);
2158	return;
2159    }
2160
2161    if (rsvpdebug)
2162	printf("rsvp_input: check vifs\n");
2163
2164#ifdef DIAGNOSTIC
2165    M_ASSERTPKTHDR(m);
2166#endif
2167
2168    ifp = m->m_pkthdr.rcvif;
2169
2170    VIF_LOCK();
2171    /* Find which vif the packet arrived on. */
2172    for (vifi = 0; vifi < numvifs; vifi++)
2173	if (viftable[vifi].v_ifp == ifp)
2174	    break;
2175
2176    if (vifi == numvifs || viftable[vifi].v_rsvpd == NULL) {
2177	/*
2178	 * Drop the lock here to avoid holding it across rip_input.
2179	 * This could make rsvpdebug printfs wrong.  If you care,
2180	 * record the state of stuff before dropping the lock.
2181	 */
2182	VIF_UNLOCK();
2183	/*
2184	 * If the old-style non-vif-associated socket is set,
2185	 * then use it.  Otherwise, drop packet since there
2186	 * is no specific socket for this vif.
2187	 */
2188	if (ip_rsvpd != NULL) {
2189	    if (rsvpdebug)
2190		printf("rsvp_input: Sending packet up old-style socket\n");
2191	    rip_input(m, off);  /* xxx */
2192	} else {
2193	    if (rsvpdebug && vifi == numvifs)
2194		printf("rsvp_input: Can't find vif for packet.\n");
2195	    else if (rsvpdebug && viftable[vifi].v_rsvpd == NULL)
2196		printf("rsvp_input: No socket defined for vif %d\n",vifi);
2197	    m_freem(m);
2198	}
2199	return;
2200    }
2201    rsvp_src.sin_addr = ip->ip_src;
2202
2203    if (rsvpdebug && m)
2204	printf("rsvp_input: m->m_len = %d, sbspace() = %ld\n",
2205	       m->m_len,sbspace(&(viftable[vifi].v_rsvpd->so_rcv)));
2206
2207    if (socket_send(viftable[vifi].v_rsvpd, m, &rsvp_src) < 0) {
2208	if (rsvpdebug)
2209	    printf("rsvp_input: Failed to append to socket\n");
2210    } else {
2211	if (rsvpdebug)
2212	    printf("rsvp_input: send packet up\n");
2213    }
2214    VIF_UNLOCK();
2215}
2216
2217/*
2218 * Code for bandwidth monitors
2219 */
2220
2221/*
2222 * Define common interface for timeval-related methods
2223 */
2224#define	BW_TIMEVALCMP(tvp, uvp, cmp) timevalcmp((tvp), (uvp), cmp)
2225#define	BW_TIMEVALDECR(vvp, uvp) timevalsub((vvp), (uvp))
2226#define	BW_TIMEVALADD(vvp, uvp) timevaladd((vvp), (uvp))
2227
2228static uint32_t
2229compute_bw_meter_flags(struct bw_upcall *req)
2230{
2231    uint32_t flags = 0;
2232
2233    if (req->bu_flags & BW_UPCALL_UNIT_PACKETS)
2234	flags |= BW_METER_UNIT_PACKETS;
2235    if (req->bu_flags & BW_UPCALL_UNIT_BYTES)
2236	flags |= BW_METER_UNIT_BYTES;
2237    if (req->bu_flags & BW_UPCALL_GEQ)
2238	flags |= BW_METER_GEQ;
2239    if (req->bu_flags & BW_UPCALL_LEQ)
2240	flags |= BW_METER_LEQ;
2241
2242    return flags;
2243}
2244
2245/*
2246 * Add a bw_meter entry
2247 */
2248static int
2249add_bw_upcall(struct bw_upcall *req)
2250{
2251    struct mfc *mfc;
2252    struct timeval delta = { BW_UPCALL_THRESHOLD_INTERVAL_MIN_SEC,
2253		BW_UPCALL_THRESHOLD_INTERVAL_MIN_USEC };
2254    struct timeval now;
2255    struct bw_meter *x;
2256    uint32_t flags;
2257
2258    if (!(mrt_api_config & MRT_MFC_BW_UPCALL))
2259	return EOPNOTSUPP;
2260
2261    /* Test if the flags are valid */
2262    if (!(req->bu_flags & (BW_UPCALL_UNIT_PACKETS | BW_UPCALL_UNIT_BYTES)))
2263	return EINVAL;
2264    if (!(req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ)))
2265	return EINVAL;
2266    if ((req->bu_flags & (BW_UPCALL_GEQ | BW_UPCALL_LEQ))
2267	    == (BW_UPCALL_GEQ | BW_UPCALL_LEQ))
2268	return EINVAL;
2269
2270    /* Test if the threshold time interval is valid */
2271    if (BW_TIMEVALCMP(&req->bu_threshold.b_time, &delta, <))
2272	return EINVAL;
2273
2274    flags = compute_bw_meter_flags(req);
2275
2276    /*
2277     * Find if we have already same bw_meter entry
2278     */
2279    MFC_LOCK();
2280    mfc = mfc_find(req->bu_src.s_addr, req->bu_dst.s_addr);
2281    if (mfc == NULL) {
2282	MFC_UNLOCK();
2283	return EADDRNOTAVAIL;
2284    }
2285    for (x = mfc->mfc_bw_meter; x != NULL; x = x->bm_mfc_next) {
2286	if ((BW_TIMEVALCMP(&x->bm_threshold.b_time,
2287			   &req->bu_threshold.b_time, ==)) &&
2288	    (x->bm_threshold.b_packets == req->bu_threshold.b_packets) &&
2289	    (x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) &&
2290	    (x->bm_flags & BW_METER_USER_FLAGS) == flags)  {
2291	    MFC_UNLOCK();
2292	    return 0;		/* XXX Already installed */
2293	}
2294    }
2295
2296    /* Allocate the new bw_meter entry */
2297    x = (struct bw_meter *)malloc(sizeof(*x), M_BWMETER, M_NOWAIT);
2298    if (x == NULL) {
2299	MFC_UNLOCK();
2300	return ENOBUFS;
2301    }
2302
2303    /* Set the new bw_meter entry */
2304    x->bm_threshold.b_time = req->bu_threshold.b_time;
2305    GET_TIME(now);
2306    x->bm_start_time = now;
2307    x->bm_threshold.b_packets = req->bu_threshold.b_packets;
2308    x->bm_threshold.b_bytes = req->bu_threshold.b_bytes;
2309    x->bm_measured.b_packets = 0;
2310    x->bm_measured.b_bytes = 0;
2311    x->bm_flags = flags;
2312    x->bm_time_next = NULL;
2313    x->bm_time_hash = BW_METER_BUCKETS;
2314
2315    /* Add the new bw_meter entry to the front of entries for this MFC */
2316    x->bm_mfc = mfc;
2317    x->bm_mfc_next = mfc->mfc_bw_meter;
2318    mfc->mfc_bw_meter = x;
2319    schedule_bw_meter(x, &now);
2320    MFC_UNLOCK();
2321
2322    return 0;
2323}
2324
2325static void
2326free_bw_list(struct bw_meter *list)
2327{
2328    while (list != NULL) {
2329	struct bw_meter *x = list;
2330
2331	list = list->bm_mfc_next;
2332	unschedule_bw_meter(x);
2333	free(x, M_BWMETER);
2334    }
2335}
2336
2337/*
2338 * Delete one or multiple bw_meter entries
2339 */
2340static int
2341del_bw_upcall(struct bw_upcall *req)
2342{
2343    struct mfc *mfc;
2344    struct bw_meter *x;
2345
2346    if (!(mrt_api_config & MRT_MFC_BW_UPCALL))
2347	return EOPNOTSUPP;
2348
2349    MFC_LOCK();
2350    /* Find the corresponding MFC entry */
2351    mfc = mfc_find(req->bu_src.s_addr, req->bu_dst.s_addr);
2352    if (mfc == NULL) {
2353	MFC_UNLOCK();
2354	return EADDRNOTAVAIL;
2355    } else if (req->bu_flags & BW_UPCALL_DELETE_ALL) {
2356	/*
2357	 * Delete all bw_meter entries for this mfc
2358	 */
2359	struct bw_meter *list;
2360
2361	list = mfc->mfc_bw_meter;
2362	mfc->mfc_bw_meter = NULL;
2363	free_bw_list(list);
2364	MFC_UNLOCK();
2365	return 0;
2366    } else {			/* Delete a single bw_meter entry */
2367	struct bw_meter *prev;
2368	uint32_t flags = 0;
2369
2370	flags = compute_bw_meter_flags(req);
2371
2372	/* Find the bw_meter entry to delete */
2373	for (prev = NULL, x = mfc->mfc_bw_meter; x != NULL;
2374	     prev = x, x = x->bm_mfc_next) {
2375	    if ((BW_TIMEVALCMP(&x->bm_threshold.b_time,
2376			       &req->bu_threshold.b_time, ==)) &&
2377		(x->bm_threshold.b_packets == req->bu_threshold.b_packets) &&
2378		(x->bm_threshold.b_bytes == req->bu_threshold.b_bytes) &&
2379		(x->bm_flags & BW_METER_USER_FLAGS) == flags)
2380		break;
2381	}
2382	if (x != NULL) { /* Delete entry from the list for this MFC */
2383	    if (prev != NULL)
2384		prev->bm_mfc_next = x->bm_mfc_next;	/* remove from middle*/
2385	    else
2386		x->bm_mfc->mfc_bw_meter = x->bm_mfc_next;/* new head of list */
2387
2388	    unschedule_bw_meter(x);
2389	    MFC_UNLOCK();
2390	    /* Free the bw_meter entry */
2391	    free(x, M_BWMETER);
2392	    return 0;
2393	} else {
2394	    MFC_UNLOCK();
2395	    return EINVAL;
2396	}
2397    }
2398    /* NOTREACHED */
2399}
2400
2401/*
2402 * Perform bandwidth measurement processing that may result in an upcall
2403 */
2404static void
2405bw_meter_receive_packet(struct bw_meter *x, int plen, struct timeval *nowp)
2406{
2407    struct timeval delta;
2408
2409    MFC_LOCK_ASSERT();
2410
2411    delta = *nowp;
2412    BW_TIMEVALDECR(&delta, &x->bm_start_time);
2413
2414    if (x->bm_flags & BW_METER_GEQ) {
2415	/*
2416	 * Processing for ">=" type of bw_meter entry
2417	 */
2418	if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) {
2419	    /* Reset the bw_meter entry */
2420	    x->bm_start_time = *nowp;
2421	    x->bm_measured.b_packets = 0;
2422	    x->bm_measured.b_bytes = 0;
2423	    x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
2424	}
2425
2426	/* Record that a packet is received */
2427	x->bm_measured.b_packets++;
2428	x->bm_measured.b_bytes += plen;
2429
2430	/*
2431	 * Test if we should deliver an upcall
2432	 */
2433	if (!(x->bm_flags & BW_METER_UPCALL_DELIVERED)) {
2434	    if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
2435		 (x->bm_measured.b_packets >= x->bm_threshold.b_packets)) ||
2436		((x->bm_flags & BW_METER_UNIT_BYTES) &&
2437		 (x->bm_measured.b_bytes >= x->bm_threshold.b_bytes))) {
2438		/* Prepare an upcall for delivery */
2439		bw_meter_prepare_upcall(x, nowp);
2440		x->bm_flags |= BW_METER_UPCALL_DELIVERED;
2441	    }
2442	}
2443    } else if (x->bm_flags & BW_METER_LEQ) {
2444	/*
2445	 * Processing for "<=" type of bw_meter entry
2446	 */
2447	if (BW_TIMEVALCMP(&delta, &x->bm_threshold.b_time, >)) {
2448	    /*
2449	     * We are behind time with the multicast forwarding table
2450	     * scanning for "<=" type of bw_meter entries, so test now
2451	     * if we should deliver an upcall.
2452	     */
2453	    if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
2454		 (x->bm_measured.b_packets <= x->bm_threshold.b_packets)) ||
2455		((x->bm_flags & BW_METER_UNIT_BYTES) &&
2456		 (x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) {
2457		/* Prepare an upcall for delivery */
2458		bw_meter_prepare_upcall(x, nowp);
2459	    }
2460	    /* Reschedule the bw_meter entry */
2461	    unschedule_bw_meter(x);
2462	    schedule_bw_meter(x, nowp);
2463	}
2464
2465	/* Record that a packet is received */
2466	x->bm_measured.b_packets++;
2467	x->bm_measured.b_bytes += plen;
2468
2469	/*
2470	 * Test if we should restart the measuring interval
2471	 */
2472	if ((x->bm_flags & BW_METER_UNIT_PACKETS &&
2473	     x->bm_measured.b_packets <= x->bm_threshold.b_packets) ||
2474	    (x->bm_flags & BW_METER_UNIT_BYTES &&
2475	     x->bm_measured.b_bytes <= x->bm_threshold.b_bytes)) {
2476	    /* Don't restart the measuring interval */
2477	} else {
2478	    /* Do restart the measuring interval */
2479	    /*
2480	     * XXX: note that we don't unschedule and schedule, because this
2481	     * might be too much overhead per packet. Instead, when we process
2482	     * all entries for a given timer hash bin, we check whether it is
2483	     * really a timeout. If not, we reschedule at that time.
2484	     */
2485	    x->bm_start_time = *nowp;
2486	    x->bm_measured.b_packets = 0;
2487	    x->bm_measured.b_bytes = 0;
2488	    x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
2489	}
2490    }
2491}
2492
2493/*
2494 * Prepare a bandwidth-related upcall
2495 */
2496static void
2497bw_meter_prepare_upcall(struct bw_meter *x, struct timeval *nowp)
2498{
2499    struct timeval delta;
2500    struct bw_upcall *u;
2501
2502    MFC_LOCK_ASSERT();
2503
2504    /*
2505     * Compute the measured time interval
2506     */
2507    delta = *nowp;
2508    BW_TIMEVALDECR(&delta, &x->bm_start_time);
2509
2510    /*
2511     * If there are too many pending upcalls, deliver them now
2512     */
2513    if (bw_upcalls_n >= BW_UPCALLS_MAX)
2514	bw_upcalls_send();
2515
2516    /*
2517     * Set the bw_upcall entry
2518     */
2519    u = &bw_upcalls[bw_upcalls_n++];
2520    u->bu_src = x->bm_mfc->mfc_origin;
2521    u->bu_dst = x->bm_mfc->mfc_mcastgrp;
2522    u->bu_threshold.b_time = x->bm_threshold.b_time;
2523    u->bu_threshold.b_packets = x->bm_threshold.b_packets;
2524    u->bu_threshold.b_bytes = x->bm_threshold.b_bytes;
2525    u->bu_measured.b_time = delta;
2526    u->bu_measured.b_packets = x->bm_measured.b_packets;
2527    u->bu_measured.b_bytes = x->bm_measured.b_bytes;
2528    u->bu_flags = 0;
2529    if (x->bm_flags & BW_METER_UNIT_PACKETS)
2530	u->bu_flags |= BW_UPCALL_UNIT_PACKETS;
2531    if (x->bm_flags & BW_METER_UNIT_BYTES)
2532	u->bu_flags |= BW_UPCALL_UNIT_BYTES;
2533    if (x->bm_flags & BW_METER_GEQ)
2534	u->bu_flags |= BW_UPCALL_GEQ;
2535    if (x->bm_flags & BW_METER_LEQ)
2536	u->bu_flags |= BW_UPCALL_LEQ;
2537}
2538
2539/*
2540 * Send the pending bandwidth-related upcalls
2541 */
2542static void
2543bw_upcalls_send(void)
2544{
2545    struct mbuf *m;
2546    int len = bw_upcalls_n * sizeof(bw_upcalls[0]);
2547    struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
2548    static struct igmpmsg igmpmsg = { 0,		/* unused1 */
2549				      0,		/* unused2 */
2550				      IGMPMSG_BW_UPCALL,/* im_msgtype */
2551				      0,		/* im_mbz  */
2552				      0,		/* im_vif  */
2553				      0,		/* unused3 */
2554				      { 0 },		/* im_src  */
2555				      { 0 } };		/* im_dst  */
2556
2557    MFC_LOCK_ASSERT();
2558
2559    if (bw_upcalls_n == 0)
2560	return;			/* No pending upcalls */
2561
2562    bw_upcalls_n = 0;
2563
2564    /*
2565     * Allocate a new mbuf, initialize it with the header and
2566     * the payload for the pending calls.
2567     */
2568    MGETHDR(m, M_DONTWAIT, MT_DATA);
2569    if (m == NULL) {
2570	log(LOG_WARNING, "bw_upcalls_send: cannot allocate mbuf\n");
2571	return;
2572    }
2573
2574    m->m_len = m->m_pkthdr.len = 0;
2575    m_copyback(m, 0, sizeof(struct igmpmsg), (caddr_t)&igmpmsg);
2576    m_copyback(m, sizeof(struct igmpmsg), len, (caddr_t)&bw_upcalls[0]);
2577
2578    /*
2579     * Send the upcalls
2580     * XXX do we need to set the address in k_igmpsrc ?
2581     */
2582    mrtstat.mrts_upcalls++;
2583    if (socket_send(ip_mrouter, m, &k_igmpsrc) < 0) {
2584	log(LOG_WARNING, "bw_upcalls_send: ip_mrouter socket queue full\n");
2585	++mrtstat.mrts_upq_sockfull;
2586    }
2587}
2588
2589/*
2590 * Compute the timeout hash value for the bw_meter entries
2591 */
2592#define	BW_METER_TIMEHASH(bw_meter, hash)				\
2593    do {								\
2594	struct timeval next_timeval = (bw_meter)->bm_start_time;	\
2595									\
2596	BW_TIMEVALADD(&next_timeval, &(bw_meter)->bm_threshold.b_time); \
2597	(hash) = next_timeval.tv_sec;					\
2598	if (next_timeval.tv_usec)					\
2599	    (hash)++; /* XXX: make sure we don't timeout early */	\
2600	(hash) %= BW_METER_BUCKETS;					\
2601    } while (0)
2602
2603/*
2604 * Schedule a timer to process periodically bw_meter entry of type "<="
2605 * by linking the entry in the proper hash bucket.
2606 */
2607static void
2608schedule_bw_meter(struct bw_meter *x, struct timeval *nowp)
2609{
2610    int time_hash;
2611
2612    MFC_LOCK_ASSERT();
2613
2614    if (!(x->bm_flags & BW_METER_LEQ))
2615	return;		/* XXX: we schedule timers only for "<=" entries */
2616
2617    /*
2618     * Reset the bw_meter entry
2619     */
2620    x->bm_start_time = *nowp;
2621    x->bm_measured.b_packets = 0;
2622    x->bm_measured.b_bytes = 0;
2623    x->bm_flags &= ~BW_METER_UPCALL_DELIVERED;
2624
2625    /*
2626     * Compute the timeout hash value and insert the entry
2627     */
2628    BW_METER_TIMEHASH(x, time_hash);
2629    x->bm_time_next = bw_meter_timers[time_hash];
2630    bw_meter_timers[time_hash] = x;
2631    x->bm_time_hash = time_hash;
2632}
2633
2634/*
2635 * Unschedule the periodic timer that processes bw_meter entry of type "<="
2636 * by removing the entry from the proper hash bucket.
2637 */
2638static void
2639unschedule_bw_meter(struct bw_meter *x)
2640{
2641    int time_hash;
2642    struct bw_meter *prev, *tmp;
2643
2644    MFC_LOCK_ASSERT();
2645
2646    if (!(x->bm_flags & BW_METER_LEQ))
2647	return;		/* XXX: we schedule timers only for "<=" entries */
2648
2649    /*
2650     * Compute the timeout hash value and delete the entry
2651     */
2652    time_hash = x->bm_time_hash;
2653    if (time_hash >= BW_METER_BUCKETS)
2654	return;		/* Entry was not scheduled */
2655
2656    for (prev = NULL, tmp = bw_meter_timers[time_hash];
2657	     tmp != NULL; prev = tmp, tmp = tmp->bm_time_next)
2658	if (tmp == x)
2659	    break;
2660
2661    if (tmp == NULL)
2662	panic("unschedule_bw_meter: bw_meter entry not found");
2663
2664    if (prev != NULL)
2665	prev->bm_time_next = x->bm_time_next;
2666    else
2667	bw_meter_timers[time_hash] = x->bm_time_next;
2668
2669    x->bm_time_next = NULL;
2670    x->bm_time_hash = BW_METER_BUCKETS;
2671}
2672
2673
2674/*
2675 * Process all "<=" type of bw_meter that should be processed now,
2676 * and for each entry prepare an upcall if necessary. Each processed
2677 * entry is rescheduled again for the (periodic) processing.
2678 *
2679 * This is run periodically (once per second normally). On each round,
2680 * all the potentially matching entries are in the hash slot that we are
2681 * looking at.
2682 */
2683static void
2684bw_meter_process()
2685{
2686    static uint32_t last_tv_sec;	/* last time we processed this */
2687
2688    uint32_t loops;
2689    int i;
2690    struct timeval now, process_endtime;
2691
2692    GET_TIME(now);
2693    if (last_tv_sec == now.tv_sec)
2694	return;		/* nothing to do */
2695
2696    loops = now.tv_sec - last_tv_sec;
2697    last_tv_sec = now.tv_sec;
2698    if (loops > BW_METER_BUCKETS)
2699	loops = BW_METER_BUCKETS;
2700
2701    MFC_LOCK();
2702    /*
2703     * Process all bins of bw_meter entries from the one after the last
2704     * processed to the current one. On entry, i points to the last bucket
2705     * visited, so we need to increment i at the beginning of the loop.
2706     */
2707    for (i = (now.tv_sec - loops) % BW_METER_BUCKETS; loops > 0; loops--) {
2708	struct bw_meter *x, *tmp_list;
2709
2710	if (++i >= BW_METER_BUCKETS)
2711	    i = 0;
2712
2713	/* Disconnect the list of bw_meter entries from the bin */
2714	tmp_list = bw_meter_timers[i];
2715	bw_meter_timers[i] = NULL;
2716
2717	/* Process the list of bw_meter entries */
2718	while (tmp_list != NULL) {
2719	    x = tmp_list;
2720	    tmp_list = tmp_list->bm_time_next;
2721
2722	    /* Test if the time interval is over */
2723	    process_endtime = x->bm_start_time;
2724	    BW_TIMEVALADD(&process_endtime, &x->bm_threshold.b_time);
2725	    if (BW_TIMEVALCMP(&process_endtime, &now, >)) {
2726		/* Not yet: reschedule, but don't reset */
2727		int time_hash;
2728
2729		BW_METER_TIMEHASH(x, time_hash);
2730		if (time_hash == i && process_endtime.tv_sec == now.tv_sec) {
2731		    /*
2732		     * XXX: somehow the bin processing is a bit ahead of time.
2733		     * Put the entry in the next bin.
2734		     */
2735		    if (++time_hash >= BW_METER_BUCKETS)
2736			time_hash = 0;
2737		}
2738		x->bm_time_next = bw_meter_timers[time_hash];
2739		bw_meter_timers[time_hash] = x;
2740		x->bm_time_hash = time_hash;
2741
2742		continue;
2743	    }
2744
2745	    /*
2746	     * Test if we should deliver an upcall
2747	     */
2748	    if (((x->bm_flags & BW_METER_UNIT_PACKETS) &&
2749		 (x->bm_measured.b_packets <= x->bm_threshold.b_packets)) ||
2750		((x->bm_flags & BW_METER_UNIT_BYTES) &&
2751		 (x->bm_measured.b_bytes <= x->bm_threshold.b_bytes))) {
2752		/* Prepare an upcall for delivery */
2753		bw_meter_prepare_upcall(x, &now);
2754	    }
2755
2756	    /*
2757	     * Reschedule for next processing
2758	     */
2759	    schedule_bw_meter(x, &now);
2760	}
2761    }
2762
2763    /* Send all upcalls that are pending delivery */
2764    bw_upcalls_send();
2765
2766    MFC_UNLOCK();
2767}
2768
2769/*
2770 * A periodic function for sending all upcalls that are pending delivery
2771 */
2772static void
2773expire_bw_upcalls_send(void *unused)
2774{
2775    MFC_LOCK();
2776    bw_upcalls_send();
2777    MFC_UNLOCK();
2778
2779    callout_reset(&bw_upcalls_ch, BW_UPCALLS_PERIOD,
2780	expire_bw_upcalls_send, NULL);
2781}
2782
2783/*
2784 * A periodic function for periodic scanning of the multicast forwarding
2785 * table for processing all "<=" bw_meter entries.
2786 */
2787static void
2788expire_bw_meter_process(void *unused)
2789{
2790    if (mrt_api_config & MRT_MFC_BW_UPCALL)
2791	bw_meter_process();
2792
2793    callout_reset(&bw_meter_ch, BW_METER_PERIOD, expire_bw_meter_process, NULL);
2794}
2795
2796/*
2797 * End of bandwidth monitoring code
2798 */
2799
2800#ifdef PIM
2801/*
2802 * Send the packet up to the user daemon, or eventually do kernel encapsulation
2803 *
2804 */
2805static int
2806pim_register_send(struct ip *ip, struct vif *vifp,
2807	struct mbuf *m, struct mfc *rt)
2808{
2809    struct mbuf *mb_copy, *mm;
2810
2811    if (mrtdebug & DEBUG_PIM)
2812	log(LOG_DEBUG, "pim_register_send: ");
2813
2814    mb_copy = pim_register_prepare(ip, m);
2815    if (mb_copy == NULL)
2816	return ENOBUFS;
2817
2818    /*
2819     * Send all the fragments. Note that the mbuf for each fragment
2820     * is freed by the sending machinery.
2821     */
2822    for (mm = mb_copy; mm; mm = mb_copy) {
2823	mb_copy = mm->m_nextpkt;
2824	mm->m_nextpkt = 0;
2825	mm = m_pullup(mm, sizeof(struct ip));
2826	if (mm != NULL) {
2827	    ip = mtod(mm, struct ip *);
2828	    if ((mrt_api_config & MRT_MFC_RP) &&
2829		(rt->mfc_rp.s_addr != INADDR_ANY)) {
2830		pim_register_send_rp(ip, vifp, mm, rt);
2831	    } else {
2832		pim_register_send_upcall(ip, vifp, mm, rt);
2833	    }
2834	}
2835    }
2836
2837    return 0;
2838}
2839
2840/*
2841 * Return a copy of the data packet that is ready for PIM Register
2842 * encapsulation.
2843 * XXX: Note that in the returned copy the IP header is a valid one.
2844 */
2845static struct mbuf *
2846pim_register_prepare(struct ip *ip, struct mbuf *m)
2847{
2848    struct mbuf *mb_copy = NULL;
2849    int mtu;
2850
2851    /* Take care of delayed checksums */
2852    if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
2853	in_delayed_cksum(m);
2854	m->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
2855    }
2856
2857    /*
2858     * Copy the old packet & pullup its IP header into the
2859     * new mbuf so we can modify it.
2860     */
2861    mb_copy = m_copypacket(m, M_DONTWAIT);
2862    if (mb_copy == NULL)
2863	return NULL;
2864    mb_copy = m_pullup(mb_copy, ip->ip_hl << 2);
2865    if (mb_copy == NULL)
2866	return NULL;
2867
2868    /* take care of the TTL */
2869    ip = mtod(mb_copy, struct ip *);
2870    --ip->ip_ttl;
2871
2872    /* Compute the MTU after the PIM Register encapsulation */
2873    mtu = 0xffff - sizeof(pim_encap_iphdr) - sizeof(pim_encap_pimhdr);
2874
2875    if (ip->ip_len <= mtu) {
2876	/* Turn the IP header into a valid one */
2877	ip->ip_len = htons(ip->ip_len);
2878	ip->ip_off = htons(ip->ip_off);
2879	ip->ip_sum = 0;
2880	ip->ip_sum = in_cksum(mb_copy, ip->ip_hl << 2);
2881    } else {
2882	/* Fragment the packet */
2883	if (ip_fragment(ip, &mb_copy, mtu, 0, CSUM_DELAY_IP) != 0) {
2884	    m_freem(mb_copy);
2885	    return NULL;
2886	}
2887    }
2888    return mb_copy;
2889}
2890
2891/*
2892 * Send an upcall with the data packet to the user-level process.
2893 */
2894static int
2895pim_register_send_upcall(struct ip *ip, struct vif *vifp,
2896	struct mbuf *mb_copy, struct mfc *rt)
2897{
2898    struct mbuf *mb_first;
2899    int len = ntohs(ip->ip_len);
2900    struct igmpmsg *im;
2901    struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET };
2902
2903    VIF_LOCK_ASSERT();
2904
2905    /*
2906     * Add a new mbuf with an upcall header
2907     */
2908    MGETHDR(mb_first, M_DONTWAIT, MT_DATA);
2909    if (mb_first == NULL) {
2910	m_freem(mb_copy);
2911	return ENOBUFS;
2912    }
2913    mb_first->m_data += max_linkhdr;
2914    mb_first->m_pkthdr.len = len + sizeof(struct igmpmsg);
2915    mb_first->m_len = sizeof(struct igmpmsg);
2916    mb_first->m_next = mb_copy;
2917
2918    /* Send message to routing daemon */
2919    im = mtod(mb_first, struct igmpmsg *);
2920    im->im_msgtype	= IGMPMSG_WHOLEPKT;
2921    im->im_mbz		= 0;
2922    im->im_vif		= vifp - viftable;
2923    im->im_src		= ip->ip_src;
2924    im->im_dst		= ip->ip_dst;
2925
2926    k_igmpsrc.sin_addr	= ip->ip_src;
2927
2928    mrtstat.mrts_upcalls++;
2929
2930    if (socket_send(ip_mrouter, mb_first, &k_igmpsrc) < 0) {
2931	if (mrtdebug & DEBUG_PIM)
2932	    log(LOG_WARNING,
2933		"mcast: pim_register_send_upcall: ip_mrouter socket queue full");
2934	++mrtstat.mrts_upq_sockfull;
2935	return ENOBUFS;
2936    }
2937
2938    /* Keep statistics */
2939    pimstat.pims_snd_registers_msgs++;
2940    pimstat.pims_snd_registers_bytes += len;
2941
2942    return 0;
2943}
2944
2945/*
2946 * Encapsulate the data packet in PIM Register message and send it to the RP.
2947 */
2948static int
2949pim_register_send_rp(struct ip *ip, struct vif *vifp,
2950	struct mbuf *mb_copy, struct mfc *rt)
2951{
2952    struct mbuf *mb_first;
2953    struct ip *ip_outer;
2954    struct pim_encap_pimhdr *pimhdr;
2955    int len = ntohs(ip->ip_len);
2956    vifi_t vifi = rt->mfc_parent;
2957
2958    VIF_LOCK_ASSERT();
2959
2960    if ((vifi >= numvifs) || (viftable[vifi].v_lcl_addr.s_addr == 0)) {
2961	m_freem(mb_copy);
2962	return EADDRNOTAVAIL;		/* The iif vif is invalid */
2963    }
2964
2965    /*
2966     * Add a new mbuf with the encapsulating header
2967     */
2968    MGETHDR(mb_first, M_DONTWAIT, MT_DATA);
2969    if (mb_first == NULL) {
2970	m_freem(mb_copy);
2971	return ENOBUFS;
2972    }
2973    mb_first->m_data += max_linkhdr;
2974    mb_first->m_len = sizeof(pim_encap_iphdr) + sizeof(pim_encap_pimhdr);
2975    mb_first->m_next = mb_copy;
2976
2977    mb_first->m_pkthdr.len = len + mb_first->m_len;
2978
2979    /*
2980     * Fill in the encapsulating IP and PIM header
2981     */
2982    ip_outer = mtod(mb_first, struct ip *);
2983    *ip_outer = pim_encap_iphdr;
2984    ip_outer->ip_id = ip_newid();
2985    ip_outer->ip_len = len + sizeof(pim_encap_iphdr) + sizeof(pim_encap_pimhdr);
2986    ip_outer->ip_src = viftable[vifi].v_lcl_addr;
2987    ip_outer->ip_dst = rt->mfc_rp;
2988    /*
2989     * Copy the inner header TOS to the outer header, and take care of the
2990     * IP_DF bit.
2991     */
2992    ip_outer->ip_tos = ip->ip_tos;
2993    if (ntohs(ip->ip_off) & IP_DF)
2994	ip_outer->ip_off |= IP_DF;
2995    pimhdr = (struct pim_encap_pimhdr *)((caddr_t)ip_outer
2996					 + sizeof(pim_encap_iphdr));
2997    *pimhdr = pim_encap_pimhdr;
2998    /* If the iif crosses a border, set the Border-bit */
2999    if (rt->mfc_flags[vifi] & MRT_MFC_FLAGS_BORDER_VIF & mrt_api_config)
3000	pimhdr->flags |= htonl(PIM_BORDER_REGISTER);
3001
3002    mb_first->m_data += sizeof(pim_encap_iphdr);
3003    pimhdr->pim.pim_cksum = in_cksum(mb_first, sizeof(pim_encap_pimhdr));
3004    mb_first->m_data -= sizeof(pim_encap_iphdr);
3005
3006    if (vifp->v_rate_limit == 0)
3007	tbf_send_packet(vifp, mb_first);
3008    else
3009	tbf_control(vifp, mb_first, ip, ip_outer->ip_len);
3010
3011    /* Keep statistics */
3012    pimstat.pims_snd_registers_msgs++;
3013    pimstat.pims_snd_registers_bytes += len;
3014
3015    return 0;
3016}
3017
3018/*
3019 * PIM-SMv2 and PIM-DM messages processing.
3020 * Receives and verifies the PIM control messages, and passes them
3021 * up to the listening socket, using rip_input().
3022 * The only message with special processing is the PIM_REGISTER message
3023 * (used by PIM-SM): the PIM header is stripped off, and the inner packet
3024 * is passed to if_simloop().
3025 */
3026void
3027pim_input(struct mbuf *m, int off)
3028{
3029    struct ip *ip = mtod(m, struct ip *);
3030    struct pim *pim;
3031    int minlen;
3032    int datalen = ip->ip_len;
3033    int ip_tos;
3034    int iphlen = off;
3035
3036    /* Keep statistics */
3037    pimstat.pims_rcv_total_msgs++;
3038    pimstat.pims_rcv_total_bytes += datalen;
3039
3040    /*
3041     * Validate lengths
3042     */
3043    if (datalen < PIM_MINLEN) {
3044	pimstat.pims_rcv_tooshort++;
3045	log(LOG_ERR, "pim_input: packet size too small %d from %lx\n",
3046	    datalen, (u_long)ip->ip_src.s_addr);
3047	m_freem(m);
3048	return;
3049    }
3050
3051    /*
3052     * If the packet is at least as big as a REGISTER, go agead
3053     * and grab the PIM REGISTER header size, to avoid another
3054     * possible m_pullup() later.
3055     *
3056     * PIM_MINLEN       == pimhdr + u_int32_t == 4 + 4 = 8
3057     * PIM_REG_MINLEN   == pimhdr + reghdr + encap_iphdr == 4 + 4 + 20 = 28
3058     */
3059    minlen = iphlen + (datalen >= PIM_REG_MINLEN ? PIM_REG_MINLEN : PIM_MINLEN);
3060    /*
3061     * Get the IP and PIM headers in contiguous memory, and
3062     * possibly the PIM REGISTER header.
3063     */
3064    if ((m->m_flags & M_EXT || m->m_len < minlen) &&
3065	(m = m_pullup(m, minlen)) == 0) {
3066	log(LOG_ERR, "pim_input: m_pullup failure\n");
3067	return;
3068    }
3069    /* m_pullup() may have given us a new mbuf so reset ip. */
3070    ip = mtod(m, struct ip *);
3071    ip_tos = ip->ip_tos;
3072
3073    /* adjust mbuf to point to the PIM header */
3074    m->m_data += iphlen;
3075    m->m_len  -= iphlen;
3076    pim = mtod(m, struct pim *);
3077
3078    /*
3079     * Validate checksum. If PIM REGISTER, exclude the data packet.
3080     *
3081     * XXX: some older PIMv2 implementations don't make this distinction,
3082     * so for compatibility reason perform the checksum over part of the
3083     * message, and if error, then over the whole message.
3084     */
3085    if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER && in_cksum(m, PIM_MINLEN) == 0) {
3086	/* do nothing, checksum okay */
3087    } else if (in_cksum(m, datalen)) {
3088	pimstat.pims_rcv_badsum++;
3089	if (mrtdebug & DEBUG_PIM)
3090	    log(LOG_DEBUG, "pim_input: invalid checksum");
3091	m_freem(m);
3092	return;
3093    }
3094
3095    /* PIM version check */
3096    if (PIM_VT_V(pim->pim_vt) < PIM_VERSION) {
3097	pimstat.pims_rcv_badversion++;
3098	log(LOG_ERR, "pim_input: incorrect version %d, expecting %d\n",
3099	    PIM_VT_V(pim->pim_vt), PIM_VERSION);
3100	m_freem(m);
3101	return;
3102    }
3103
3104    /* restore mbuf back to the outer IP */
3105    m->m_data -= iphlen;
3106    m->m_len  += iphlen;
3107
3108    if (PIM_VT_T(pim->pim_vt) == PIM_REGISTER) {
3109	/*
3110	 * Since this is a REGISTER, we'll make a copy of the register
3111	 * headers ip + pim + u_int32 + encap_ip, to be passed up to the
3112	 * routing daemon.
3113	 */
3114	struct sockaddr_in dst = { sizeof(dst), AF_INET };
3115	struct mbuf *mcp;
3116	struct ip *encap_ip;
3117	u_int32_t *reghdr;
3118	struct ifnet *vifp;
3119
3120	VIF_LOCK();
3121	if ((reg_vif_num >= numvifs) || (reg_vif_num == VIFI_INVALID)) {
3122	    VIF_UNLOCK();
3123	    if (mrtdebug & DEBUG_PIM)
3124		log(LOG_DEBUG,
3125		    "pim_input: register vif not set: %d\n", reg_vif_num);
3126	    m_freem(m);
3127	    return;
3128	}
3129	/* XXX need refcnt? */
3130	vifp = viftable[reg_vif_num].v_ifp;
3131	VIF_UNLOCK();
3132
3133	/*
3134	 * Validate length
3135	 */
3136	if (datalen < PIM_REG_MINLEN) {
3137	    pimstat.pims_rcv_tooshort++;
3138	    pimstat.pims_rcv_badregisters++;
3139	    log(LOG_ERR,
3140		"pim_input: register packet size too small %d from %lx\n",
3141		datalen, (u_long)ip->ip_src.s_addr);
3142	    m_freem(m);
3143	    return;
3144	}
3145
3146	reghdr = (u_int32_t *)(pim + 1);
3147	encap_ip = (struct ip *)(reghdr + 1);
3148
3149	if (mrtdebug & DEBUG_PIM) {
3150	    log(LOG_DEBUG,
3151		"pim_input[register], encap_ip: %lx -> %lx, encap_ip len %d\n",
3152		(u_long)ntohl(encap_ip->ip_src.s_addr),
3153		(u_long)ntohl(encap_ip->ip_dst.s_addr),
3154		ntohs(encap_ip->ip_len));
3155	}
3156
3157	/* verify the version number of the inner packet */
3158	if (encap_ip->ip_v != IPVERSION) {
3159	    pimstat.pims_rcv_badregisters++;
3160	    if (mrtdebug & DEBUG_PIM) {
3161		log(LOG_DEBUG, "pim_input: invalid IP version (%d) "
3162		    "of the inner packet\n", encap_ip->ip_v);
3163	    }
3164	    m_freem(m);
3165	    return;
3166	}
3167
3168	/* verify the inner packet is destined to a mcast group */
3169	if (!IN_MULTICAST(ntohl(encap_ip->ip_dst.s_addr))) {
3170	    pimstat.pims_rcv_badregisters++;
3171	    if (mrtdebug & DEBUG_PIM)
3172		log(LOG_DEBUG,
3173		    "pim_input: inner packet of register is not "
3174		    "multicast %lx\n",
3175		    (u_long)ntohl(encap_ip->ip_dst.s_addr));
3176	    m_freem(m);
3177	    return;
3178	}
3179
3180	/* If a NULL_REGISTER, pass it to the daemon */
3181	if ((ntohl(*reghdr) & PIM_NULL_REGISTER))
3182	    goto pim_input_to_daemon;
3183
3184	/*
3185	 * Copy the TOS from the outer IP header to the inner IP header.
3186	 */
3187	if (encap_ip->ip_tos != ip_tos) {
3188	    /* Outer TOS -> inner TOS */
3189	    encap_ip->ip_tos = ip_tos;
3190	    /* Recompute the inner header checksum. Sigh... */
3191
3192	    /* adjust mbuf to point to the inner IP header */
3193	    m->m_data += (iphlen + PIM_MINLEN);
3194	    m->m_len  -= (iphlen + PIM_MINLEN);
3195
3196	    encap_ip->ip_sum = 0;
3197	    encap_ip->ip_sum = in_cksum(m, encap_ip->ip_hl << 2);
3198
3199	    /* restore mbuf to point back to the outer IP header */
3200	    m->m_data -= (iphlen + PIM_MINLEN);
3201	    m->m_len  += (iphlen + PIM_MINLEN);
3202	}
3203
3204	/*
3205	 * Decapsulate the inner IP packet and loopback to forward it
3206	 * as a normal multicast packet. Also, make a copy of the
3207	 *     outer_iphdr + pimhdr + reghdr + encap_iphdr
3208	 * to pass to the daemon later, so it can take the appropriate
3209	 * actions (e.g., send back PIM_REGISTER_STOP).
3210	 * XXX: here m->m_data points to the outer IP header.
3211	 */
3212	mcp = m_copy(m, 0, iphlen + PIM_REG_MINLEN);
3213	if (mcp == NULL) {
3214	    log(LOG_ERR,
3215		"pim_input: pim register: could not copy register head\n");
3216	    m_freem(m);
3217	    return;
3218	}
3219
3220	/* Keep statistics */
3221	/* XXX: registers_bytes include only the encap. mcast pkt */
3222	pimstat.pims_rcv_registers_msgs++;
3223	pimstat.pims_rcv_registers_bytes += ntohs(encap_ip->ip_len);
3224
3225	/*
3226	 * forward the inner ip packet; point m_data at the inner ip.
3227	 */
3228	m_adj(m, iphlen + PIM_MINLEN);
3229
3230	if (mrtdebug & DEBUG_PIM) {
3231	    log(LOG_DEBUG,
3232		"pim_input: forwarding decapsulated register: "
3233		"src %lx, dst %lx, vif %d\n",
3234		(u_long)ntohl(encap_ip->ip_src.s_addr),
3235		(u_long)ntohl(encap_ip->ip_dst.s_addr),
3236		reg_vif_num);
3237	}
3238	/* NB: vifp was collected above; can it change on us? */
3239	if_simloop(vifp, m, dst.sin_family, 0);
3240
3241	/* prepare the register head to send to the mrouting daemon */
3242	m = mcp;
3243    }
3244
3245pim_input_to_daemon:
3246    /*
3247     * Pass the PIM message up to the daemon; if it is a Register message,
3248     * pass the 'head' only up to the daemon. This includes the
3249     * outer IP header, PIM header, PIM-Register header and the
3250     * inner IP header.
3251     * XXX: the outer IP header pkt size of a Register is not adjust to
3252     * reflect the fact that the inner multicast data is truncated.
3253     */
3254    rip_input(m, iphlen);
3255
3256    return;
3257}
3258#endif /* PIM */
3259
3260static int
3261ip_mroute_modevent(module_t mod, int type, void *unused)
3262{
3263    switch (type) {
3264    case MOD_LOAD:
3265	mtx_init(&mrouter_mtx, "mrouter initialization", NULL, MTX_DEF);
3266	MFC_LOCK_INIT();
3267	VIF_LOCK_INIT();
3268	ip_mrouter_reset();
3269	ip_mcast_src = X_ip_mcast_src;
3270	ip_mforward = X_ip_mforward;
3271	ip_mrouter_done = X_ip_mrouter_done;
3272	ip_mrouter_get = X_ip_mrouter_get;
3273	ip_mrouter_set = X_ip_mrouter_set;
3274	ip_rsvp_force_done = X_ip_rsvp_force_done;
3275	ip_rsvp_vif = X_ip_rsvp_vif;
3276	legal_vif_num = X_legal_vif_num;
3277	mrt_ioctl = X_mrt_ioctl;
3278	rsvp_input_p = X_rsvp_input;
3279	break;
3280
3281    case MOD_UNLOAD:
3282	/*
3283	 * Typically module unload happens after the user-level
3284	 * process has shutdown the kernel services (the check
3285	 * below insures someone can't just yank the module out
3286	 * from under a running process).  But if the module is
3287	 * just loaded and then unloaded w/o starting up a user
3288	 * process we still need to cleanup.
3289	 */
3290	if (ip_mrouter)
3291	    return EINVAL;
3292
3293	X_ip_mrouter_done();
3294	ip_mcast_src = NULL;
3295	ip_mforward = NULL;
3296	ip_mrouter_done = NULL;
3297	ip_mrouter_get = NULL;
3298	ip_mrouter_set = NULL;
3299	ip_rsvp_force_done = NULL;
3300	ip_rsvp_vif = NULL;
3301	legal_vif_num = NULL;
3302	mrt_ioctl = NULL;
3303	rsvp_input_p = NULL;
3304	VIF_LOCK_DESTROY();
3305	MFC_LOCK_DESTROY();
3306	mtx_destroy(&mrouter_mtx);
3307	break;
3308    default:
3309	return EOPNOTSUPP;
3310    }
3311    return 0;
3312}
3313
3314static moduledata_t ip_mroutemod = {
3315    "ip_mroute",
3316    ip_mroute_modevent,
3317    0
3318};
3319DECLARE_MODULE(ip_mroute, ip_mroutemod, SI_SUB_PSEUDO, SI_ORDER_ANY);
3320