prune.c revision 1.9
1/*	$NetBSD: prune.c,v 1.3 1995/12/10 10:07:09 mycroft Exp $	*/
2
3/*
4 * The mrouted program is covered by the license in the accompanying file
5 * named "LICENSE".  Use of the mrouted program represents acceptance of
6 * the terms and conditions listed in that file.
7 *
8 * The mrouted program is COPYRIGHT 1989 by The Board of Trustees of
9 * Leland Stanford Junior University.
10 */
11
12
13#include "defs.h"
14
15extern int cache_lifetime;
16extern int max_prune_lifetime;
17extern struct rtentry *routing_table;
18
19extern int phys_vif;
20
21/*
22 * dither cache lifetime to obtain a value between x and 2*x
23 */
24#ifdef SYSV
25#define CACHE_LIFETIME(x) ((x) + (lrand48() % (x)))
26#else
27#define CACHE_LIFETIME(x) ((x) + (random() % (x)))
28#endif
29
30#define CHK_GS(x, y) {	\
31		switch(x) { \
32			case 2:	\
33			case 4:	\
34			case 8:	\
35			case 16: \
36			case 32: \
37			case 64: \
38			case 128: \
39			case 256: y = 1; \
40				  break; \
41			default:  y = 0; \
42		} \
43	}
44
45struct gtable *kernel_table;		/* ptr to list of kernel grp entries*/
46static struct gtable *kernel_no_route;	/* list of grp entries w/o routes   */
47struct gtable *gtp;			/* pointer for kernel rt entries    */
48unsigned int kroutes;			/* current number of cache entries  */
49
50/****************************************************************************
51                       Functions that are local to prune.c
52****************************************************************************/
53static void		prun_add_ttls(struct gtable *gt);
54static int		pruning_neighbor(vifi_t vifi, u_int32_t addr);
55static int		can_mtrace(vifi_t vifi, u_int32_t addr);
56static struct ptable *	find_prune_entry(u_int32_t vr, struct ptable *pt);
57static void		expire_prune(vifi_t vifi, struct gtable *gt);
58static void		send_prune(struct gtable *gt);
59static void		send_graft(struct gtable *gt);
60static void		send_graft_ack(u_int32_t src, u_int32_t dst,
61			    u_int32_t origin, u_int32_t grp);
62static void		update_kernel(struct gtable *g);
63static char *		scaletime(u_long t);
64
65/*
66 * Updates the ttl values for each vif.
67 */
68static void
69prun_add_ttls(struct gtable *gt)
70{
71    struct uvif *v;
72    vifi_t vifi;
73
74    for (vifi = 0, v = uvifs; vifi < numvifs; ++vifi, ++v) {
75	if (VIFM_ISSET(vifi, gt->gt_grpmems))
76	    gt->gt_ttls[vifi] = v->uv_threshold;
77	else
78	    gt->gt_ttls[vifi] = 0;
79    }
80}
81
82/*
83 * checks for scoped multicast addresses
84 */
85#define GET_SCOPE(gt) { \
86	register vifi_t _i; \
87	if ((ntohl((gt)->gt_mcastgrp) & 0xff000000) == 0xef000000) \
88	    for (_i = 0; _i < numvifs; _i++) \
89		if (scoped_addr(_i, (gt)->gt_mcastgrp)) \
90		    VIFM_SET(_i, (gt)->gt_scope); \
91	}
92
93int
94scoped_addr(vifi_t vifi, u_int32_t addr)
95{
96    struct vif_acl *acl;
97
98    for (acl = uvifs[vifi].uv_acl; acl; acl = acl->acl_next)
99	if ((addr & acl->acl_mask) == acl->acl_addr)
100	    return 1;
101
102    return 0;
103}
104
105/*
106 * Determine if mcastgrp has a listener on vifi
107 */
108int
109grplst_mem(vifi_t vifi, u_int32_t mcastgrp)
110{
111    register struct listaddr *g;
112    register struct uvif *v;
113
114    v = &uvifs[vifi];
115
116    for (g = v->uv_groups; g != NULL; g = g->al_next)
117	if (mcastgrp == g->al_addr)
118	    return 1;
119
120    return 0;
121}
122
123/*
124 * Finds the group entry with the specified source and netmask.
125 * If netmask is 0, it uses the route's netmask.
126 *
127 * Returns TRUE if found a match, and the global variable gtp is left
128 * pointing to entry before the found entry.
129 * Returns FALSE if no exact match found, gtp is left pointing to before
130 * the entry in question belongs, or is NULL if the it belongs at the
131 * head of the list.
132 */
133int
134find_src_grp(u_int32_t src, u_int32_t mask, u_int32_t grp)
135{
136    struct gtable *gt;
137
138    gtp = NULL;
139    gt = kernel_table;
140    while (gt != NULL) {
141	if (grp == gt->gt_mcastgrp &&
142	    (mask ? (gt->gt_route->rt_origin == src &&
143		     gt->gt_route->rt_originmask == mask) :
144		    ((src & gt->gt_route->rt_originmask) ==
145		     gt->gt_route->rt_origin)))
146	    return TRUE;
147	if (ntohl(grp) > ntohl(gt->gt_mcastgrp) ||
148	    (grp == gt->gt_mcastgrp &&
149	     (ntohl(mask) < ntohl(gt->gt_route->rt_originmask) ||
150	      (mask == gt->gt_route->rt_originmask &&
151	       (ntohl(src) > ntohl(gt->gt_route->rt_origin)))))) {
152	    gtp = gt;
153	    gt = gt->gt_gnext;
154	}
155	else break;
156    }
157    return FALSE;
158}
159
160/*
161 * Check if the neighbor supports pruning
162 */
163static int
164pruning_neighbor(vifi_t vifi, u_int32_t addr)
165{
166    struct listaddr *n = neighbor_info(vifi, addr);
167    int vers;
168
169    if (n == NULL)
170	return 0;
171
172    if (n->al_flags & NF_PRUNE)
173	return 1;
174
175    /*
176     * Versions from 3.0 to 3.4 relied on the version number to identify
177     * that they could handle pruning.
178     */
179    vers = NBR_VERS(n);
180    return (vers >= 0x0300 && vers <= 0x0304);
181}
182
183/*
184 * Can the neighbor in question handle multicast traceroute?
185 */
186static int
187can_mtrace(vifi_t vifi, u_int32_t addr)
188{
189    struct listaddr *n = neighbor_info(vifi, addr);
190    int vers;
191
192    if (n == NULL)
193	return 0;
194
195    if (n->al_flags & NF_MTRACE)
196	return 1;
197
198    /*
199     * Versions 3.3 and 3.4 relied on the version number to identify
200     * that they could handle traceroute.
201     */
202    vers = NBR_VERS(n);
203    return (vers >= 0x0303 && vers <= 0x0304);
204}
205
206/*
207 * Returns the prune entry of the router, or NULL if none exists
208 */
209static struct ptable *
210find_prune_entry(u_int32_t vr, struct ptable *pt)
211{
212    while (pt) {
213	if (pt->pt_router == vr)
214	    return pt;
215	pt = pt->pt_next;
216    }
217
218    return NULL;
219}
220
221/*
222 * Send a prune message to the dominant router for
223 * this source.
224 *
225 * Record an entry that a prune was sent for this group
226 */
227static void
228send_prune(struct gtable *gt)
229{
230    struct ptable *pt;
231    char *p;
232    int i;
233    int datalen;
234    u_int32_t src;
235    u_int32_t dst;
236    u_int32_t tmp;
237
238    /* Don't process any prunes if router is not pruning */
239    if (pruning == 0)
240	return;
241
242    /* Can't process a prune if we don't have an associated route */
243    if (gt->gt_route == NULL)
244	return;
245
246    /* Don't send a prune to a non-pruning router */
247    if (!pruning_neighbor(gt->gt_route->rt_parent, gt->gt_route->rt_gateway))
248	return;
249
250    /*
251     * sends a prune message to the router upstream.
252     */
253    src = uvifs[gt->gt_route->rt_parent].uv_lcl_addr;
254    dst = gt->gt_route->rt_gateway;
255
256    p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
257    datalen = 0;
258
259    /*
260     * determine prune lifetime
261     */
262    gt->gt_prsent_timer = gt->gt_timer;
263    for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next)
264	if (pt->pt_timer < gt->gt_prsent_timer)
265	    gt->gt_prsent_timer = pt->pt_timer;
266
267    /*
268     * If we have a graft pending, cancel graft retransmission
269     */
270    gt->gt_grftsnt = 0;
271
272    for (i = 0; i < 4; i++)
273	*p++ = ((char *)&(gt->gt_route->rt_origin))[i];
274    for (i = 0; i < 4; i++)
275	*p++ = ((char *)&(gt->gt_mcastgrp))[i];
276    tmp = htonl(gt->gt_prsent_timer);
277    for (i = 0; i < 4; i++)
278	*p++ = ((char *)&(tmp))[i];
279    datalen += 12;
280
281    send_igmp(src, dst, IGMP_DVMRP, DVMRP_PRUNE,
282	      htonl(MROUTED_LEVEL), datalen);
283
284    log(LOG_DEBUG, 0, "sent prune for (%s %s)/%d on vif %d to %s",
285      inet_fmts(gt->gt_route->rt_origin, gt->gt_route->rt_originmask, s1),
286      inet_fmt(gt->gt_mcastgrp, s2),
287      gt->gt_prsent_timer, gt->gt_route->rt_parent,
288      inet_fmt(gt->gt_route->rt_gateway, s3));
289}
290
291/*
292 * a prune was sent upstream
293 * so, a graft has to be sent to annul the prune
294 * set up a graft timer so that if an ack is not
295 * heard within that time, another graft request
296 * is sent out.
297 */
298static void
299send_graft(struct gtable *gt)
300{
301    register char *p;
302    register int i;
303    int datalen;
304    u_int32_t src;
305    u_int32_t dst;
306
307    /* Can't send a graft without an associated route */
308    if (gt->gt_route == NULL)
309	return;
310
311    src = uvifs[gt->gt_route->rt_parent].uv_lcl_addr;
312    dst = gt->gt_route->rt_gateway;
313
314    p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
315    datalen = 0;
316
317    for (i = 0; i < 4; i++)
318	*p++ = ((char *)&(gt->gt_route->rt_origin))[i];
319    for (i = 0; i < 4; i++)
320	*p++ = ((char *)&(gt->gt_mcastgrp))[i];
321    datalen += 8;
322
323    if (datalen != 0) {
324	send_igmp(src, dst, IGMP_DVMRP, DVMRP_GRAFT,
325		  htonl(MROUTED_LEVEL), datalen);
326    }
327    log(LOG_DEBUG, 0, "sent graft for (%s %s) to %s on vif %d",
328	inet_fmts(gt->gt_route->rt_origin, gt->gt_route->rt_originmask, s1),
329	inet_fmt(gt->gt_mcastgrp, s2),
330	inet_fmt(gt->gt_route->rt_gateway, s3), gt->gt_route->rt_parent);
331}
332
333/*
334 * Send an ack that a graft was received
335 */
336static void
337send_graft_ack(u_int32_t src, u_int32_t dst, u_int32_t origin, u_int32_t grp)
338{
339    register char *p;
340    register int i;
341    int datalen;
342
343    p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
344    datalen = 0;
345
346    for (i = 0; i < 4; i++)
347	*p++ = ((char *)&(origin))[i];
348    for (i = 0; i < 4; i++)
349	*p++ = ((char *)&(grp))[i];
350    datalen += 8;
351
352    send_igmp(src, dst, IGMP_DVMRP, DVMRP_GRAFT_ACK,
353	      htonl(MROUTED_LEVEL), datalen);
354
355    log(LOG_DEBUG, 0, "sent graft ack for (%s, %s) to %s",
356	inet_fmt(origin, s1), inet_fmt(grp, s2), inet_fmt(dst, s3));
357}
358
359/*
360 * Update the kernel cache with all the routes hanging off the group entry
361 */
362static void
363update_kernel(struct gtable *g)
364{
365    struct stable *st;
366
367    for (st = g->gt_srctbl; st; st = st->st_next)
368	k_add_rg(st->st_origin, g);
369}
370
371/****************************************************************************
372                          Functions that are used externally
373****************************************************************************/
374
375#ifdef SNMP
376#include <sys/types.h>
377#include "snmp.h"
378
379/*
380 * Find a specific group entry in the group table
381 */
382struct gtable *
383find_grp(u_long grp)
384{
385   struct gtable *gt;
386
387   for (gt = kernel_table; gt; gt = gt->gt_gnext) {
388      if (ntohl(grp) < ntohl(gt->gt_mcastgrp))
389	 break;
390      if (gt->gt_mcastgrp == grp)
391         return gt;
392   }
393   return NULL;
394}
395
396/*
397 * Given a group entry and source, find the corresponding source table
398 * entry
399 */
400struct stable *
401find_grp_src(struct gtable *gt, u_long src)
402{
403   struct stable *st;
404   u_long grp = gt->gt_mcastgrp;
405   struct gtable *gtcurr;
406
407   for (gtcurr = gt; gtcurr->gt_mcastgrp == grp; gtcurr = gtcurr->gt_gnext) {
408      for (st = gtcurr->gt_srctbl; st; st = st->st_next)
409	 if (st->st_origin == src)
410	    return st;
411   }
412   return NULL;
413}
414
415/*
416 * Find next entry > specification
417 * gtpp: ordered by group
418 * stpp: ordered by source
419 */
420int
421next_grp_src_mask(struct gtable *gtpp, struct gtable *stpp, u_long grp,
422    u_long src, u_long mask)
423{
424   struct gtable *gt, *gbest = NULL;
425   struct stable *st, *sbest = NULL;
426
427   /* Find first group entry >= grp spec */
428   (*gtpp) = kernel_table;
429   while ((*gtpp) && ntohl((*gtpp)->gt_mcastgrp) < ntohl(grp))
430      (*gtpp)=(*gtpp)->gt_gnext;
431   if (!(*gtpp))
432      return 0; /* no more groups */
433
434   for (gt = kernel_table; gt; gt=gt->gt_gnext) {
435      /* Since grps are ordered, we can stop when group changes from gbest */
436      if (gbest && gbest->gt_mcastgrp != gt->gt_mcastgrp)
437         break;
438      for (st = gt->gt_srctbl; st; st=st->st_next) {
439
440         /* Among those entries > spec, find "lowest" one */
441         if (((ntohl(gt->gt_mcastgrp)> ntohl(grp))
442           || (ntohl(gt->gt_mcastgrp)==ntohl(grp)
443              && ntohl(st->st_origin)> ntohl(src))
444           || (ntohl(gt->gt_mcastgrp)==ntohl(grp)
445              && ntohl(st->st_origin)==src && 0xFFFFFFFF>ntohl(mask)))
446          && (!gbest
447           || (ntohl(gt->gt_mcastgrp)< ntohl(gbest->gt_mcastgrp))
448           || (ntohl(gt->gt_mcastgrp)==ntohl(gbest->gt_mcastgrp)
449              && ntohl(st->st_origin)< ntohl(sbest->st_origin)))) {
450               gbest = gt;
451               sbest = st;
452         }
453      }
454   }
455   (*gtpp) = gbest;
456   (*stpp) = sbest;
457   return (*gtpp)!=0;
458}
459
460/*
461 * Ensure that sg contains current information for the given group,source.
462 * This is fetched from the kernel as a unit so that counts for the entry
463 * are consistent, i.e. packet and byte counts for the same entry are
464 * read at the same time.
465 */
466void
467refresh_sg(struct sioc_sg_req *sg, struct gtable *gt, struct gtable *st)
468{
469   static   int lastq = -1;
470
471   if (quantum != lastq || sg->src.s_addr!=st->st_origin
472    || sg->grp.s_addr!=gt->gt_mcastgrp) {
473       lastq = quantum;
474       sg->src.s_addr = st->st_origin;
475       sg->grp.s_addr = gt->gt_mcastgrp;
476       ioctl(udp_socket, SIOCGETSGCNT, (char *)sg);
477   }
478}
479
480/*
481 * Return pointer to a specific route entry.  This must be a separate
482 * function from find_route() which modifies rtp.
483 */
484struct rtentry *
485snmp_find_route(u_long src, u_long mask)
486{
487    register struct rtentry *rt;
488
489   for (rt = routing_table; rt; rt = rt->rt_next) {
490      if (src == rt->rt_origin && mask == rt->rt_originmask)
491         return rt;
492   }
493   return NULL;
494}
495
496/*
497 * Find next route entry > specification
498 */
499int
500next_route(struct rtentry **rtpp, u_long src, u_long mask)
501{
502   struct rtentry *rt, *rbest = NULL;
503
504   /* Among all entries > spec, find "lowest" one in order */
505   for (rt = routing_table; rt; rt=rt->rt_next) {
506      if ((ntohl(rt->rt_origin) > ntohl(src)
507          || (ntohl(rt->rt_origin) == ntohl(src)
508             && ntohl(rt->rt_originmask) > ntohl(mask)))
509       && (!rbest || (ntohl(rt->rt_origin) < ntohl(rbest->rt_origin))
510          || (ntohl(rt->rt_origin) == ntohl(rbest->rt_origin)
511             && ntohl(rt->rt_originmask) < ntohl(rbest->rt_originmask))))
512               rbest = rt;
513   }
514   (*rtpp) = rbest;
515   return (*rtpp)!=0;
516}
517
518/*
519 * Given a routing table entry, and a vifi, find the next vifi/entry
520 * vifi: vif at which to start looking
521 */
522int
523next_route_child(struct rtentry **rtpp, u_long src, u_long mask, vifi_t vifi)
524{
525   struct rtentry *rt;
526
527   /* Get (S,M) entry */
528   if (!((*rtpp) = snmp_find_route(src,mask)))
529      if (!next_route(rtpp, src, mask))
530         return 0;
531
532   /* Continue until we get one with a valid next vif */
533   do {
534      for (; (*rtpp)->rt_children && *vifi<numvifs; (*vifi)++)
535         if (VIFM_ISSET(*vifi, (*rtpp)->rt_children))
536            return 1;
537      *vifi = 0;
538   } while( next_route(rtpp, (*rtpp)->rt_origin, (*rtpp)->rt_originmask) );
539
540   return 0;
541}
542
543/*
544 * Given a routing table entry, and a vifi, find the next entry
545 * equal to or greater than those
546 * vifi: vif at which to start looking
547 */
548int
549next_child(struct gtable *gtpp, struct gtable *stpp, u_long grp,
550    u_long src, u_long mask, vifi_t *vifi)
551{
552   struct stable *st;
553
554   /* Get (G,S,M) entry */
555   if (mask!=0xFFFFFFFF
556    || !((*gtpp) = find_grp(grp))
557    || !((*stpp) = find_grp_src((*gtpp),src)))
558      if (!next_grp_src_mask(gtpp, stpp, grp, src, mask))
559         return 0;
560
561   /* Continue until we get one with a valid next vif */
562   do {
563      for (; (*gtpp)->gt_route->rt_children && *vifi<numvifs; (*vifi)++)
564         if (VIFM_ISSET(*vifi, (*gtpp)->gt_route->rt_children))
565            return 1;
566      *vifi = 0;
567   } while (next_grp_src_mask(gtpp, stpp, (*gtpp)->gt_mcastgrp,
568		(*stpp)->st_origin, 0xFFFFFFFF) );
569
570   return 0;
571}
572#endif /* SNMP */
573
574/*
575 * Initialize the kernel table structure
576 */
577void
578init_ktable(void)
579{
580    kernel_table	= NULL;
581    kernel_no_route	= NULL;
582    kroutes		= 0;
583}
584
585/*
586 * Add a new table entry for (origin, mcastgrp)
587 */
588void
589add_table_entry(u_int32_t origin, u_int32_t mcastgrp)
590{
591    struct rtentry *r;
592    struct gtable *gt,**gtnp,*prev_gt;
593    struct stable *st,**stnp;
594    vifi_t i;
595
596#ifdef DEBUG_MFC
597    md_log(MD_MISS, origin, mcastgrp);
598#endif
599
600    r = determine_route(origin);
601    prev_gt = NULL;
602    if (r == NULL) {
603	/*
604	 * Look for it on the no_route table; if it is found then
605	 * it will be detected as a duplicate below.
606	 */
607	for (gt = kernel_no_route; gt; gt = gt->gt_next)
608	    if (mcastgrp == gt->gt_mcastgrp &&
609		gt->gt_srctbl && gt->gt_srctbl->st_origin == origin)
610			break;
611	gtnp = &kernel_no_route;
612    } else {
613	gtnp = &r->rt_groups;
614	while ((gt = *gtnp) != NULL) {
615	    if (gt->gt_mcastgrp >= mcastgrp)
616		break;
617	    gtnp = &gt->gt_next;
618	    prev_gt = gt;
619	}
620    }
621
622    if (gt == NULL || gt->gt_mcastgrp != mcastgrp) {
623	gt = (struct gtable *)malloc(sizeof(struct gtable));
624	if (gt == NULL)
625	    log(LOG_ERR, 0, "ran out of memory");
626
627	gt->gt_mcastgrp	    = mcastgrp;
628	gt->gt_timer	    = CACHE_LIFETIME(cache_lifetime);
629	time(&gt->gt_ctime);
630	gt->gt_grpmems	    = 0;
631	gt->gt_scope	    = 0;
632	gt->gt_prsent_timer = 0;
633	gt->gt_grftsnt	    = 0;
634	gt->gt_srctbl	    = NULL;
635	gt->gt_pruntbl	    = NULL;
636	gt->gt_route	    = r;
637#ifdef RSRR
638	gt->gt_rsrr_cache   = NULL;
639#endif
640
641	if (r != NULL) {
642	    /* obtain the multicast group membership list */
643	    for (i = 0; i < numvifs; i++) {
644		if (VIFM_ISSET(i, r->rt_children) &&
645		    !(VIFM_ISSET(i, r->rt_leaves)))
646		    VIFM_SET(i, gt->gt_grpmems);
647
648		if (VIFM_ISSET(i, r->rt_leaves) && grplst_mem(i, mcastgrp))
649		    VIFM_SET(i, gt->gt_grpmems);
650	    }
651	    GET_SCOPE(gt);
652	    if (VIFM_ISSET(r->rt_parent, gt->gt_scope))
653		gt->gt_scope = -1;
654	    gt->gt_grpmems &= ~gt->gt_scope;
655	} else {
656	    gt->gt_scope = -1;
657	    gt->gt_grpmems = 0;
658	}
659
660	/* update ttls */
661	prun_add_ttls(gt);
662
663	gt->gt_next = *gtnp;
664	*gtnp = gt;
665	if (gt->gt_next)
666	    gt->gt_next->gt_prev = gt;
667	gt->gt_prev = prev_gt;
668
669	if (r) {
670	    if (find_src_grp(r->rt_origin, r->rt_originmask, gt->gt_mcastgrp)) {
671		struct gtable *g;
672
673		g = gtp ? gtp->gt_gnext : kernel_table;
674		log(LOG_WARNING, 0, "Entry for (%s %s) (rt:%x) exists (rt:%x)",
675		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
676		    inet_fmt(g->gt_mcastgrp, s2),
677		    r, g->gt_route);
678	    } else {
679		if (gtp) {
680		    gt->gt_gnext = gtp->gt_gnext;
681		    gt->gt_gprev = gtp;
682		    gtp->gt_gnext = gt;
683		} else {
684		    gt->gt_gnext = kernel_table;
685		    gt->gt_gprev = NULL;
686		    kernel_table = gt;
687		}
688		if (gt->gt_gnext)
689		    gt->gt_gnext->gt_gprev = gt;
690	    }
691	} else {
692	    gt->gt_gnext = gt->gt_gprev = NULL;
693	}
694    }
695
696    stnp = &gt->gt_srctbl;
697    while ((st = *stnp) != NULL) {
698	if (ntohl(st->st_origin) >= ntohl(origin))
699	    break;
700	stnp = &st->st_next;
701    }
702
703    if (st == NULL || st->st_origin != origin) {
704	st = (struct stable *)malloc(sizeof(struct stable));
705	if (st == NULL)
706	    log(LOG_ERR, 0, "ran out of memory");
707
708	st->st_origin = origin;
709	st->st_pktcnt = 0;
710	st->st_next = *stnp;
711	*stnp = st;
712    } else {
713#ifdef DEBUG_MFC
714	md_log(MD_DUPE, origin, mcastgrp);
715#endif
716	log(LOG_WARNING, 0, "kernel entry already exists for (%s %s)",
717		inet_fmt(origin, s1), inet_fmt(mcastgrp, s2));
718	/* XXX Doing this should cause no harm, and may ensure
719	 * kernel<>mrouted synchronization */
720	k_add_rg(origin, gt);
721	return;
722    }
723
724    kroutes++;
725    k_add_rg(origin, gt);
726
727    log(LOG_DEBUG, 0, "add cache entry (%s %s) gm:%x, parent-vif:%d",
728	inet_fmt(origin, s1),
729	inet_fmt(mcastgrp, s2),
730	gt->gt_grpmems, r ? r->rt_parent : -1);
731
732    /* If there are no leaf vifs
733     * which have this group, then
734     * mark this src-grp as a prune candidate.
735     */
736    if (!gt->gt_prsent_timer && !gt->gt_grpmems && r && r->rt_gateway)
737	send_prune(gt);
738}
739
740/*
741 * An mrouter has gone down and come up on an interface
742 * Forward on that interface immediately
743 */
744void
745reset_neighbor_state(vifi_t vifi, u_int32_t addr)
746{
747    struct rtentry *r;
748    struct gtable *g;
749    struct ptable *pt, **ptnp;
750    struct stable *st;
751
752    for (g = kernel_table; g; g = g->gt_gnext) {
753	r = g->gt_route;
754
755	/*
756	 * If neighbor was the parent, remove the prune sent state
757	 * and all of the source cache info so that prunes get
758	 * regenerated.
759	 */
760	if (vifi == r->rt_parent) {
761	    if (addr == r->rt_gateway) {
762		log(LOG_DEBUG, 0, "reset_neighbor_state parent reset (%s %s)",
763		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
764		    inet_fmt(g->gt_mcastgrp, s2));
765
766		g->gt_prsent_timer = 0;
767		g->gt_grftsnt = 0;
768		while (st = g->gt_srctbl) {
769		    g->gt_srctbl = st->st_next;
770		    k_del_rg(st->st_origin, g);
771		    kroutes--;
772		    free(st);
773		}
774	    }
775	} else {
776	    /*
777	     * Neighbor was not the parent, send grafts to join the groups
778	     */
779	    if (g->gt_prsent_timer) {
780		g->gt_grftsnt = 1;
781		send_graft(g);
782		g->gt_prsent_timer = 0;
783	    }
784
785	    /*
786	     * Remove any prunes that this router has sent us.
787	     */
788	    ptnp = &g->gt_pruntbl;
789	    while ((pt = *ptnp) != NULL) {
790		if (pt->pt_vifi == vifi && pt->pt_router == addr) {
791		    *ptnp = pt->pt_next;
792		    free(pt);
793		} else
794		    ptnp = &pt->pt_next;
795	    }
796
797	    /*
798	     * And see if we want to forward again.
799	     */
800	    if (!VIFM_ISSET(vifi, g->gt_grpmems)) {
801		if (VIFM_ISSET(vifi, r->rt_children) &&
802		    !(VIFM_ISSET(vifi, r->rt_leaves)))
803		    VIFM_SET(vifi, g->gt_grpmems);
804
805		if (VIFM_ISSET(vifi, r->rt_leaves) &&
806		    grplst_mem(vifi, g->gt_mcastgrp))
807		    VIFM_SET(vifi, g->gt_grpmems);
808
809		g->gt_grpmems &= ~g->gt_scope;
810		prun_add_ttls(g);
811
812		/* Update kernel state */
813		update_kernel(g);
814#ifdef RSRR
815		/* Send route change notification to reservation protocol. */
816		rsrr_cache_send(g,1);
817#endif /* RSRR */
818
819		log(LOG_DEBUG, 0, "reset member state (%s %s) gm:%x",
820		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
821		    inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
822	    }
823	}
824    }
825}
826
827/*
828 * Delete table entry from the kernel
829 * del_flag determines how many entries to delete
830 */
831void
832del_table_entry(struct rtentry *r, u_int32_t mcastgrp, u_int del_flag)
833{
834    struct gtable *g, *prev_g;
835    struct stable *st, *prev_st;
836    struct ptable *pt, *prev_pt;
837
838    if (del_flag == DEL_ALL_ROUTES) {
839	g = r->rt_groups;
840	while (g) {
841	    log(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
842		inet_fmts(r->rt_origin, r->rt_originmask, s1),
843		inet_fmt(g->gt_mcastgrp, s2));
844	    st = g->gt_srctbl;
845	    while (st) {
846		if (k_del_rg(st->st_origin, g) < 0) {
847		    log(LOG_WARNING, errno,
848			"del_table_entry trying to delete (%s, %s)",
849			inet_fmt(st->st_origin, s1),
850			inet_fmt(g->gt_mcastgrp, s2));
851		}
852		kroutes--;
853		prev_st = st;
854		st = st->st_next;
855		free(prev_st);
856	    }
857	    g->gt_srctbl = NULL;
858
859	    pt = g->gt_pruntbl;
860	    while (pt) {
861		prev_pt = pt;
862		pt = pt->pt_next;
863		free(prev_pt);
864	    }
865	    g->gt_pruntbl = NULL;
866
867	    if (g->gt_gnext)
868		g->gt_gnext->gt_gprev = g->gt_gprev;
869	    if (g->gt_gprev)
870		g->gt_gprev->gt_gnext = g->gt_gnext;
871	    else
872		kernel_table = g->gt_gnext;
873
874#ifdef RSRR
875	    /* Send route change notification to reservation protocol. */
876	    rsrr_cache_send(g,0);
877	    rsrr_cache_clean(g);
878#endif /* RSRR */
879	    prev_g = g;
880	    g = g->gt_next;
881	    free(prev_g);
882	}
883	r->rt_groups = NULL;
884    }
885
886    /*
887     * Dummy routine - someday this may be needed, so it is just there
888     */
889    if (del_flag == DEL_RTE_GROUP) {
890	prev_g = (struct gtable *)&r->rt_groups;
891	for (g = r->rt_groups; g; g = g->gt_next) {
892	    if (g->gt_mcastgrp == mcastgrp) {
893		log(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
894		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
895		    inet_fmt(g->gt_mcastgrp, s2));
896		st = g->gt_srctbl;
897		while (st) {
898		    if (k_del_rg(st->st_origin, g) < 0) {
899			log(LOG_WARNING, errno,
900			    "del_table_entry trying to delete (%s, %s)",
901			    inet_fmt(st->st_origin, s1),
902			    inet_fmt(g->gt_mcastgrp, s2));
903		    }
904		    kroutes--;
905		    prev_st = st;
906		    st = st->st_next;
907		    free(prev_st);
908		}
909		g->gt_srctbl = NULL;
910
911		pt = g->gt_pruntbl;
912		while (pt) {
913		    prev_pt = pt;
914		    pt = pt->pt_next;
915		    free(prev_pt);
916		}
917		g->gt_pruntbl = NULL;
918
919		if (g->gt_gnext)
920		    g->gt_gnext->gt_gprev = g->gt_gprev;
921		if (g->gt_gprev)
922		    g->gt_gprev->gt_gnext = g->gt_gnext;
923		else
924		    kernel_table = g->gt_gnext;
925
926		if (prev_g != (struct gtable *)&r->rt_groups)
927		    g->gt_next->gt_prev = prev_g;
928		else
929		    g->gt_next->gt_prev = NULL;
930		prev_g->gt_next = g->gt_next;
931
932#ifdef RSRR
933		/* Send route change notification to reservation protocol. */
934		rsrr_cache_send(g,0);
935		rsrr_cache_clean(g);
936#endif /* RSRR */
937		free(g);
938		g = prev_g;
939	    } else {
940		prev_g = g;
941	    }
942	}
943    }
944}
945
946/*
947 * update kernel table entry when a route entry changes
948 */
949void
950update_table_entry(struct rtentry *r)
951{
952    struct gtable *g;
953    struct ptable *pt, *prev_pt;
954    vifi_t i;
955
956    for (g = r->rt_groups; g; g = g->gt_next) {
957	pt = g->gt_pruntbl;
958	while (pt) {
959	    prev_pt = pt->pt_next;
960	    free(pt);
961	    pt = prev_pt;
962	}
963	g->gt_pruntbl = NULL;
964
965	g->gt_grpmems = 0;
966
967	/* obtain the multicast group membership list */
968	for (i = 0; i < numvifs; i++) {
969	    if (VIFM_ISSET(i, r->rt_children) &&
970		!(VIFM_ISSET(i, r->rt_leaves)))
971		VIFM_SET(i, g->gt_grpmems);
972
973	    if (VIFM_ISSET(i, r->rt_leaves) && grplst_mem(i, g->gt_mcastgrp))
974		VIFM_SET(i, g->gt_grpmems);
975	}
976	if (VIFM_ISSET(r->rt_parent, g->gt_scope))
977	    g->gt_scope = -1;
978	g->gt_grpmems &= ~g->gt_scope;
979
980	log(LOG_DEBUG, 0, "updating cache entries (%s %s) gm:%x",
981	    inet_fmts(r->rt_origin, r->rt_originmask, s1),
982	    inet_fmt(g->gt_mcastgrp, s2),
983	    g->gt_grpmems);
984
985	if (g->gt_grpmems && g->gt_prsent_timer) {
986	    g->gt_grftsnt = 1;
987	    send_graft(g);
988	    g->gt_prsent_timer = 0;
989	}
990
991	/* update ttls and add entry into kernel */
992	prun_add_ttls(g);
993	update_kernel(g);
994#ifdef RSRR
995	/* Send route change notification to reservation protocol. */
996	rsrr_cache_send(g,1);
997#endif /* RSRR */
998
999	/* Check if we want to prune this group */
1000	if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway) {
1001	    g->gt_timer = CACHE_LIFETIME(cache_lifetime);
1002	    send_prune(g);
1003	}
1004    }
1005}
1006
1007/*
1008 * set the forwarding flag for all mcastgrps on this vifi
1009 */
1010void
1011update_lclgrp(vifi_t vifi, u_int32_t mcastgrp)
1012{
1013    struct rtentry *r;
1014    struct gtable *g;
1015
1016    log(LOG_DEBUG, 0, "group %s joined on vif %d",
1017	inet_fmt(mcastgrp, s1), vifi);
1018
1019    for (g = kernel_table; g; g = g->gt_gnext) {
1020	if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1021	    break;
1022
1023	r = g->gt_route;
1024	if (g->gt_mcastgrp == mcastgrp &&
1025	    VIFM_ISSET(vifi, r->rt_children)) {
1026
1027	    VIFM_SET(vifi, g->gt_grpmems);
1028	    g->gt_grpmems &= ~g->gt_scope;
1029	    if (g->gt_grpmems == 0)
1030		continue;
1031
1032	    prun_add_ttls(g);
1033	    log(LOG_DEBUG, 0, "update lclgrp (%s %s) gm:%x",
1034		inet_fmts(r->rt_origin, r->rt_originmask, s1),
1035		inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1036
1037	    update_kernel(g);
1038#ifdef RSRR
1039	    /* Send route change notification to reservation protocol. */
1040	    rsrr_cache_send(g,1);
1041#endif /* RSRR */
1042	}
1043    }
1044}
1045
1046/*
1047 * reset forwarding flag for all mcastgrps on this vifi
1048 */
1049void
1050delete_lclgrp(vifi_t vifi, u_int32_t mcastgrp)
1051{
1052    struct rtentry *r;
1053    struct gtable *g;
1054
1055    log(LOG_DEBUG, 0, "group %s left on vif %d",
1056	inet_fmt(mcastgrp, s1), vifi);
1057
1058    for (g = kernel_table; g; g = g->gt_gnext) {
1059	if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1060	    break;
1061
1062	if (g->gt_mcastgrp == mcastgrp) {
1063	    int stop_sending = 1;
1064
1065	    r = g->gt_route;
1066	    /*
1067	     * If this is not a leaf, then we have router neighbors on this
1068	     * vif.  Only turn off forwarding if they have all pruned.
1069	     */
1070	    if (!VIFM_ISSET(vifi, r->rt_leaves)) {
1071		struct listaddr *vr;
1072
1073		for (vr = uvifs[vifi].uv_neighbors; vr; vr = vr->al_next)
1074		  if (find_prune_entry(vr->al_addr, g->gt_pruntbl) == NULL) {
1075		      stop_sending = 0;
1076		      break;
1077		  }
1078	    }
1079
1080	    if (stop_sending) {
1081		VIFM_CLR(vifi, g->gt_grpmems);
1082		log(LOG_DEBUG, 0, "delete lclgrp (%s %s) gm:%x",
1083		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
1084		    inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1085
1086		prun_add_ttls(g);
1087		update_kernel(g);
1088#ifdef RSRR
1089		/* Send route change notification to reservation protocol. */
1090		rsrr_cache_send(g,1);
1091#endif /* RSRR */
1092
1093		/*
1094		 * If there are no more members of this particular group,
1095		 *  send prune upstream
1096		 */
1097		if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway)
1098		    send_prune(g);
1099	    }
1100	}
1101    }
1102}
1103
1104/*
1105 * Takes the prune message received and then strips it to
1106 * determine the (src, grp) pair to be pruned.
1107 *
1108 * Adds the router to the (src, grp) entry then.
1109 *
1110 * Determines if further packets have to be sent down that vif
1111 *
1112 * Determines if a corresponding prune message has to be generated
1113 */
1114void
1115accept_prune(u_int32_t src, u_int32_t dst, char *p, int datalen)
1116{
1117    u_int32_t prun_src;
1118    u_int32_t prun_grp;
1119    u_int32_t prun_tmr;
1120    vifi_t vifi;
1121    int i;
1122    int stop_sending;
1123    struct rtentry *r;
1124    struct gtable *g;
1125    struct ptable *pt;
1126    struct listaddr *vr;
1127
1128    /* Don't process any prunes if router is not pruning */
1129    if (pruning == 0)
1130	return;
1131
1132    if ((vifi = find_vif(src, dst)) == NO_VIF) {
1133	log(LOG_INFO, 0,
1134	    "ignoring prune report from non-neighbor %s",
1135	    inet_fmt(src, s1));
1136	return;
1137    }
1138
1139    /* Check if enough data is present */
1140    if (datalen < 12)
1141	{
1142	    log(LOG_WARNING, 0,
1143		"non-decipherable prune from %s",
1144		inet_fmt(src, s1));
1145	    return;
1146	}
1147
1148    for (i = 0; i< 4; i++)
1149	((char *)&prun_src)[i] = *p++;
1150    for (i = 0; i< 4; i++)
1151	((char *)&prun_grp)[i] = *p++;
1152    for (i = 0; i< 4; i++)
1153	((char *)&prun_tmr)[i] = *p++;
1154    prun_tmr = ntohl(prun_tmr);
1155
1156    log(LOG_DEBUG, 0, "%s on vif %d prunes (%s %s)/%d",
1157	inet_fmt(src, s1), vifi,
1158	inet_fmt(prun_src, s2), inet_fmt(prun_grp, s3), prun_tmr);
1159
1160    /*
1161     * Find the subnet for the prune
1162     */
1163    if (find_src_grp(prun_src, 0, prun_grp)) {
1164	g = gtp ? gtp->gt_gnext : kernel_table;
1165	r = g->gt_route;
1166
1167	if (!VIFM_ISSET(vifi, r->rt_children)) {
1168	    log(LOG_WARNING, 0, "prune received from non-child %s for (%s %s)",
1169		inet_fmt(src, s1), inet_fmt(prun_src, s2),
1170		inet_fmt(prun_grp, s3));
1171	    return;
1172	}
1173	if (VIFM_ISSET(vifi, g->gt_scope)) {
1174	    log(LOG_WARNING, 0, "prune received from %s on scoped grp (%s %s)",
1175		inet_fmt(src, s1), inet_fmt(prun_src, s2),
1176		inet_fmt(prun_grp, s3));
1177	    return;
1178	}
1179	if ((pt = find_prune_entry(src, g->gt_pruntbl)) != NULL) {
1180	    /*
1181	     * If it's about to expire, then it's only still around because
1182	     * of timer granularity, so don't warn about it.
1183	     */
1184	    if (pt->pt_timer > 10) {
1185		log(LOG_WARNING, 0, "%s %d from %s for (%s %s)/%d %s %d %s %x",
1186		    "duplicate prune received on vif",
1187		    vifi, inet_fmt(src, s1), inet_fmt(prun_src, s2),
1188		    inet_fmt(prun_grp, s3), prun_tmr,
1189		    "old timer:", pt->pt_timer, "cur gm:", g->gt_grpmems);
1190	    }
1191	    pt->pt_timer = prun_tmr;
1192	} else {
1193	    /* allocate space for the prune structure */
1194	    pt = (struct ptable *)(malloc(sizeof(struct ptable)));
1195	    if (pt == NULL)
1196	      log(LOG_ERR, 0, "pt: ran out of memory");
1197
1198	    pt->pt_vifi = vifi;
1199	    pt->pt_router = src;
1200	    pt->pt_timer = prun_tmr;
1201
1202	    pt->pt_next = g->gt_pruntbl;
1203	    g->gt_pruntbl = pt;
1204	}
1205
1206	/* Refresh the group's lifetime */
1207	g->gt_timer = CACHE_LIFETIME(cache_lifetime);
1208	if (g->gt_timer < prun_tmr)
1209	    g->gt_timer = prun_tmr;
1210
1211	/*
1212	 * check if any more packets need to be sent on the
1213	 * vif which sent this message
1214	 */
1215	stop_sending = 1;
1216	for (vr = uvifs[vifi].uv_neighbors; vr; vr = vr->al_next)
1217	  if (find_prune_entry(vr->al_addr, g->gt_pruntbl) == NULL)  {
1218	      stop_sending = 0;
1219	      break;
1220	  }
1221
1222	if (stop_sending && !grplst_mem(vifi, prun_grp)) {
1223	    VIFM_CLR(vifi, g->gt_grpmems);
1224	    log(LOG_DEBUG, 0, "prune (%s %s), stop sending on vif %d, gm:%x",
1225		inet_fmts(r->rt_origin, r->rt_originmask, s1),
1226		inet_fmt(g->gt_mcastgrp, s2), vifi, g->gt_grpmems);
1227
1228	    prun_add_ttls(g);
1229	    update_kernel(g);
1230#ifdef RSRR
1231	    /* Send route change notification to reservation protocol. */
1232	    rsrr_cache_send(g,1);
1233#endif /* RSRR */
1234	}
1235
1236	/*
1237	 * check if all the child routers have expressed no interest
1238	 * in this group and if this group does not exist in the
1239	 * interface
1240	 * Send a prune message then upstream
1241	 */
1242	if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway) {
1243	    send_prune(g);
1244	}
1245    } else {
1246	/*
1247	 * There is no kernel entry for this group.  Therefore, we can
1248	 * simply ignore the prune, as we are not forwarding this traffic
1249	 * downstream.
1250	 */
1251	log(LOG_DEBUG, 0, "%s (%s %s)/%d from %s",
1252	    "prune message received with no kernel entry for",
1253	    inet_fmt(prun_src, s1), inet_fmt(prun_grp, s2),
1254	    prun_tmr, inet_fmt(src, s3));
1255	return;
1256    }
1257}
1258
1259/*
1260 * Checks if this mcastgrp is present in the kernel table
1261 * If so and if a prune was sent, it sends a graft upwards
1262 */
1263void
1264chkgrp_graft(vifi_t vifi, u_int32_t mcastgrp)
1265{
1266    struct rtentry *r;
1267    struct gtable *g;
1268
1269    for (g = kernel_table; g; g = g->gt_gnext) {
1270	if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1271	    break;
1272
1273	r = g->gt_route;
1274	if (g->gt_mcastgrp == mcastgrp && VIFM_ISSET(vifi, r->rt_children))
1275	    if (g->gt_prsent_timer) {
1276		VIFM_SET(vifi, g->gt_grpmems);
1277
1278		/*
1279		 * If the vif that was joined was a scoped vif,
1280		 * ignore it ; don't graft back
1281		 */
1282		g->gt_grpmems &= ~g->gt_scope;
1283		if (g->gt_grpmems == 0)
1284		    continue;
1285
1286		/* set the flag for graft retransmission */
1287		g->gt_grftsnt = 1;
1288
1289		/* send graft upwards */
1290		send_graft(g);
1291
1292		/* reset the prune timer and update cache timer*/
1293		g->gt_prsent_timer = 0;
1294		g->gt_timer = max_prune_lifetime;
1295
1296		log(LOG_DEBUG, 0, "chkgrp graft (%s %s) gm:%x",
1297		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
1298		    inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1299
1300		prun_add_ttls(g);
1301		update_kernel(g);
1302#ifdef RSRR
1303		/* Send route change notification to reservation protocol. */
1304		rsrr_cache_send(g,1);
1305#endif /* RSRR */
1306	    }
1307    }
1308}
1309
1310/* determine the multicast group and src
1311 *
1312 * if it does, then determine if a prune was sent
1313 * upstream.
1314 * if prune sent upstream, send graft upstream and send
1315 * ack downstream.
1316 *
1317 * if no prune sent upstream, change the forwarding bit
1318 * for this interface and send ack downstream.
1319 *
1320 * if no entry exists for this group send ack downstream.
1321 */
1322void
1323accept_graft(u_int32_t src, u_int32_t dst, char *p, int datalen)
1324{
1325    vifi_t	vifi;
1326    u_int32_t	graft_src;
1327    u_int32_t	graft_grp;
1328    int		i;
1329    struct rtentry *r;
1330    struct gtable *g;
1331    struct ptable *pt, **ptnp;
1332
1333    if ((vifi = find_vif(src, dst)) == NO_VIF) {
1334	log(LOG_INFO, 0,
1335	    "ignoring graft from non-neighbor %s",
1336	    inet_fmt(src, s1));
1337	return;
1338    }
1339
1340    if (datalen < 8) {
1341	log(LOG_WARNING, 0,
1342	    "received non-decipherable graft from %s",
1343	    inet_fmt(src, s1));
1344	return;
1345    }
1346
1347    for (i = 0; i< 4; i++)
1348	((char *)&graft_src)[i] = *p++;
1349    for (i = 0; i< 4; i++)
1350	((char *)&graft_grp)[i] = *p++;
1351
1352    log(LOG_DEBUG, 0, "%s on vif %d grafts (%s %s)",
1353	inet_fmt(src, s1), vifi,
1354	inet_fmt(graft_src, s2), inet_fmt(graft_grp, s3));
1355
1356    /*
1357     * Find the subnet for the graft
1358     */
1359    if (find_src_grp(graft_src, 0, graft_grp)) {
1360	g = gtp ? gtp->gt_gnext : kernel_table;
1361	r = g->gt_route;
1362
1363	if (VIFM_ISSET(vifi, g->gt_scope)) {
1364	    log(LOG_WARNING, 0, "graft received from %s on scoped grp (%s %s)",
1365		inet_fmt(src, s1), inet_fmt(graft_src, s2),
1366		inet_fmt(graft_grp, s3));
1367	    return;
1368	}
1369
1370	ptnp = &g->gt_pruntbl;
1371	while ((pt = *ptnp) != NULL) {
1372	    if ((pt->pt_vifi == vifi) && (pt->pt_router == src)) {
1373		*ptnp = pt->pt_next;
1374		free(pt);
1375
1376		VIFM_SET(vifi, g->gt_grpmems);
1377		log(LOG_DEBUG, 0, "accept graft (%s %s) gm:%x",
1378		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
1379		    inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1380
1381		prun_add_ttls(g);
1382		update_kernel(g);
1383#ifdef RSRR
1384		/* Send route change notification to reservation protocol. */
1385		rsrr_cache_send(g,1);
1386#endif /* RSRR */
1387		break;
1388	    } else {
1389		ptnp = &pt->pt_next;
1390	    }
1391	}
1392
1393	/* send ack downstream */
1394	send_graft_ack(dst, src, graft_src, graft_grp);
1395	g->gt_timer = max_prune_lifetime;
1396
1397	if (g->gt_prsent_timer) {
1398	    /* set the flag for graft retransmission */
1399	    g->gt_grftsnt = 1;
1400
1401	    /* send graft upwards */
1402	    send_graft(g);
1403
1404	    /* reset the prune sent timer */
1405	    g->gt_prsent_timer = 0;
1406	}
1407    } else {
1408	/*
1409	 * We have no state for the source and group in question.
1410	 * We can simply acknowledge the graft, since we know
1411	 * that we have no prune state, and grafts are requests
1412	 * to remove prune state.
1413	 */
1414	send_graft_ack(dst, src, graft_src, graft_grp);
1415	log(LOG_DEBUG, 0, "%s (%s %s) from %s",
1416	    "graft received with no kernel entry for",
1417	    inet_fmt(graft_src, s1), inet_fmt(graft_grp, s2),
1418	    inet_fmt(src, s3));
1419	return;
1420    }
1421}
1422
1423/*
1424 * find out which group is involved first of all
1425 * then determine if a graft was sent.
1426 * if no graft sent, ignore the message
1427 * if graft was sent and the ack is from the right
1428 * source, remove the graft timer so that we don't
1429 * have send a graft again
1430 */
1431void
1432accept_g_ack(u_int32_t src, u_int32_t dst, char *p, int datalen)
1433{
1434    struct gtable *g;
1435    vifi_t	vifi;
1436    u_int32_t	grft_src;
1437    u_int32_t	grft_grp;
1438    int		i;
1439
1440    if ((vifi = find_vif(src, dst)) == NO_VIF) {
1441	log(LOG_INFO, 0,
1442	    "ignoring graft ack from non-neighbor %s",
1443	    inet_fmt(src, s1));
1444	return;
1445    }
1446
1447    if (datalen < 0  || datalen > 8) {
1448	log(LOG_WARNING, 0,
1449	    "received non-decipherable graft ack from %s",
1450	    inet_fmt(src, s1));
1451	return;
1452    }
1453
1454    for (i = 0; i< 4; i++)
1455	((char *)&grft_src)[i] = *p++;
1456    for (i = 0; i< 4; i++)
1457	((char *)&grft_grp)[i] = *p++;
1458
1459    log(LOG_DEBUG, 0, "%s on vif %d acks graft (%s, %s)",
1460	inet_fmt(src, s1), vifi,
1461	inet_fmt(grft_src, s2), inet_fmt(grft_grp, s3));
1462
1463    /*
1464     * Find the subnet for the graft ack
1465     */
1466    if (find_src_grp(grft_src, 0, grft_grp)) {
1467	g = gtp ? gtp->gt_gnext : kernel_table;
1468	g->gt_grftsnt = 0;
1469    } else {
1470	log(LOG_WARNING, 0, "%s (%s, %s) from %s",
1471	    "rcvd graft ack with no kernel entry for",
1472	    inet_fmt(grft_src, s1), inet_fmt(grft_grp, s2),
1473	    inet_fmt(src, s3));
1474	return;
1475    }
1476}
1477
1478
1479/*
1480 * free all prune entries and kernel routes
1481 * normally, this should inform the kernel that all of its routes
1482 * are going away, but this is only called by restart(), which is
1483 * about to call MRT_DONE which does that anyway.
1484 */
1485void
1486free_all_prunes(void)
1487{
1488    register struct rtentry *r;
1489    register struct gtable *g, *prev_g;
1490    register struct stable *s, *prev_s;
1491    register struct ptable *p, *prev_p;
1492
1493    for (r = routing_table; r; r = r->rt_next) {
1494	g = r->rt_groups;
1495	while (g) {
1496	    s = g->gt_srctbl;
1497	    while (s) {
1498		prev_s = s;
1499		s = s->st_next;
1500		free(prev_s);
1501	    }
1502
1503	    p = g->gt_pruntbl;
1504	    while (p) {
1505		prev_p = p;
1506		p = p->pt_next;
1507		free(prev_p);
1508	    }
1509
1510	    prev_g = g;
1511	    g = g->gt_next;
1512	    free(prev_g);
1513	}
1514	r->rt_groups = NULL;
1515    }
1516    kernel_table = NULL;
1517
1518    g = kernel_no_route;
1519    while (g) {
1520	if (g->gt_srctbl)
1521	    free(g->gt_srctbl);
1522
1523	prev_g = g;
1524	g = g->gt_next;
1525	free(prev_g);
1526    }
1527    kernel_no_route = NULL;
1528}
1529
1530/*
1531 * When a new route is created, search
1532 * a) The less-specific part of the routing table
1533 * b) The route-less kernel table
1534 * for sources that the new route might want to handle.
1535 *
1536 * "Inheriting" these sources might be cleanest, but simply deleting
1537 * them is easier, and letting the kernel re-request them.
1538 */
1539void
1540steal_sources(struct rtentry *rt)
1541{
1542    register struct rtentry *rp;
1543    register struct gtable *gt, **gtnp;
1544    register struct stable *st, **stnp;
1545
1546    for (rp = rt->rt_next; rp; rp = rp->rt_next) {
1547	if ((rt->rt_origin & rp->rt_originmask) == rp->rt_origin) {
1548	    log(LOG_DEBUG, 0, "Route for %s stealing sources from %s",
1549		inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1550		inet_fmts(rp->rt_origin, rp->rt_originmask, s2));
1551	    for (gt = rp->rt_groups; gt; gt = gt->gt_next) {
1552		stnp = &gt->gt_srctbl;
1553		while ((st = *stnp) != NULL) {
1554		    if ((st->st_origin & rt->rt_originmask) == rt->rt_origin) {
1555			log(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1556			    inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1557			    inet_fmt(st->st_origin, s3),
1558			    inet_fmt(gt->gt_mcastgrp, s4),
1559			    inet_fmts(rp->rt_origin, rp->rt_originmask, s2));
1560			if (k_del_rg(st->st_origin, gt) < 0) {
1561			    log(LOG_WARNING, errno, "%s (%s, %s)",
1562				"steal_sources trying to delete",
1563				inet_fmt(st->st_origin, s1),
1564				inet_fmt(gt->gt_mcastgrp, s2));
1565			}
1566			*stnp = st->st_next;
1567			kroutes--;
1568			free(st);
1569		    } else {
1570			stnp = &st->st_next;
1571		    }
1572		}
1573	    }
1574	}
1575    }
1576
1577    gtnp = &kernel_no_route;
1578    while ((gt = *gtnp) != NULL) {
1579	if (gt->gt_srctbl && ((gt->gt_srctbl->st_origin & rt->rt_originmask)
1580				    == rt->rt_origin)) {
1581	    log(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1582		inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1583		inet_fmt(gt->gt_srctbl->st_origin, s3),
1584		inet_fmt(gt->gt_mcastgrp, s4),
1585		"no_route table");
1586	    if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
1587		log(LOG_WARNING, errno, "%s (%s %s)",
1588		    "steal_sources trying to delete",
1589		    inet_fmt(gt->gt_srctbl->st_origin, s1),
1590		    inet_fmt(gt->gt_mcastgrp, s2));
1591	    }
1592	    kroutes--;
1593	    free(gt->gt_srctbl);
1594	    *gtnp = gt->gt_next;
1595	    if (gt->gt_next)
1596		gt->gt_next->gt_prev = gt->gt_prev;
1597	    free(gt);
1598	} else {
1599	    gtnp = &gt->gt_next;
1600	}
1601    }
1602}
1603
1604/*
1605 * Advance the timers on all the cache entries.
1606 * If there are any entries whose timers have expired,
1607 * remove these entries from the kernel cache.
1608 */
1609void
1610age_table_entry(void)
1611{
1612    struct rtentry *r;
1613    struct gtable *gt, **gtnptr;
1614    struct stable *st, **stnp;
1615    struct ptable *pt, **ptnp;
1616    struct sioc_sg_req sg_req;
1617
1618    log(LOG_DEBUG, 0, "ageing entries");
1619
1620    gtnptr = &kernel_table;
1621    while ((gt = *gtnptr) != NULL) {
1622	r = gt->gt_route;
1623
1624	/* advance the timer for the kernel entry */
1625	gt->gt_timer -= ROUTE_MAX_REPORT_DELAY;
1626
1627	/* decrement prune timer if need be */
1628	if (gt->gt_prsent_timer > 0) {
1629	    gt->gt_prsent_timer -= ROUTE_MAX_REPORT_DELAY;
1630	    if (gt->gt_prsent_timer <= 0) {
1631		log(LOG_DEBUG, 0, "upstream prune tmo (%s %s)",
1632		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
1633		    inet_fmt(gt->gt_mcastgrp, s2));
1634		gt->gt_prsent_timer = -1;
1635	    }
1636	}
1637
1638	/* retransmit graft if graft sent flag is still set */
1639	if (gt->gt_grftsnt) {
1640	    register int y;
1641	    CHK_GS(gt->gt_grftsnt++, y);
1642	    if (y)
1643		send_graft(gt);
1644	}
1645
1646	/*
1647	 * Age prunes
1648	 *
1649	 * If a prune expires, forward again on that vif.
1650	 */
1651	ptnp = &gt->gt_pruntbl;
1652	while ((pt = *ptnp) != NULL) {
1653	    if ((pt->pt_timer -= ROUTE_MAX_REPORT_DELAY) <= 0) {
1654		log(LOG_DEBUG, 0, "expire prune (%s %s) from %s on vif %d",
1655		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
1656		    inet_fmt(gt->gt_mcastgrp, s2),
1657		    inet_fmt(pt->pt_router, s3),
1658		    pt->pt_vifi);
1659
1660		expire_prune(pt->pt_vifi, gt);
1661
1662		/* remove the router's prune entry and await new one */
1663		*ptnp = pt->pt_next;
1664		free(pt);
1665	    } else {
1666		ptnp = &pt->pt_next;
1667	    }
1668	}
1669
1670	/*
1671	 * If the cache entry has expired, delete source table entries for
1672	 * silent sources.  If there are no source entries left, and there
1673	 * are no downstream prunes, then the entry is deleted.
1674	 * Otherwise, the cache entry's timer is refreshed.
1675	 */
1676	if (gt->gt_timer <= 0) {
1677	    /* Check for traffic before deleting source entries */
1678	    sg_req.grp.s_addr = gt->gt_mcastgrp;
1679	    stnp = &gt->gt_srctbl;
1680	    while ((st = *stnp) != NULL) {
1681		sg_req.src.s_addr = st->st_origin;
1682		if (ioctl(udp_socket, SIOCGETSGCNT, (char *)&sg_req) < 0) {
1683		    log(LOG_WARNING, errno, "%s (%s %s)",
1684			"age_table_entry: SIOCGETSGCNT failing for",
1685			inet_fmt(st->st_origin, s1),
1686			inet_fmt(gt->gt_mcastgrp, s2));
1687		    /* Make sure it gets deleted below */
1688		    sg_req.pktcnt = st->st_pktcnt;
1689		}
1690		if (sg_req.pktcnt == st->st_pktcnt) {
1691		    *stnp = st->st_next;
1692		    log(LOG_DEBUG, 0, "age_table_entry deleting (%s %s)",
1693			inet_fmt(st->st_origin, s1),
1694			inet_fmt(gt->gt_mcastgrp, s2));
1695		    if (k_del_rg(st->st_origin, gt) < 0) {
1696			log(LOG_WARNING, errno,
1697			    "age_table_entry trying to delete (%s %s)",
1698			    inet_fmt(st->st_origin, s1),
1699			    inet_fmt(gt->gt_mcastgrp, s2));
1700		    }
1701		    kroutes--;
1702		    free(st);
1703		} else {
1704		    st->st_pktcnt = sg_req.pktcnt;
1705		    stnp = &st->st_next;
1706		}
1707	    }
1708
1709	    /*
1710	     * Retain the group entry if we have downstream prunes or if
1711	     * there is at least one source in the list that still has
1712	     * traffic, or if our upstream prune timer is running.
1713	     */
1714	    if (gt->gt_pruntbl != NULL || gt->gt_srctbl != NULL ||
1715		gt->gt_prsent_timer > 0) {
1716		gt->gt_timer = CACHE_LIFETIME(cache_lifetime);
1717		if (gt->gt_prsent_timer == -1)
1718		    if (gt->gt_grpmems == 0)
1719			send_prune(gt);
1720		    else
1721			gt->gt_prsent_timer = 0;
1722		gtnptr = &gt->gt_gnext;
1723		continue;
1724	    }
1725
1726	    log(LOG_DEBUG, 0, "timeout cache entry (%s, %s)",
1727		inet_fmts(r->rt_origin, r->rt_originmask, s1),
1728		inet_fmt(gt->gt_mcastgrp, s2));
1729
1730	    if (gt->gt_prev)
1731		gt->gt_prev->gt_next = gt->gt_next;
1732	    else
1733		gt->gt_route->rt_groups = gt->gt_next;
1734	    if (gt->gt_next)
1735		gt->gt_next->gt_prev = gt->gt_prev;
1736
1737	    if (gt->gt_gprev) {
1738		gt->gt_gprev->gt_gnext = gt->gt_gnext;
1739		gtnptr = &gt->gt_gprev->gt_gnext;
1740	    } else {
1741		kernel_table = gt->gt_gnext;
1742		gtnptr = &kernel_table;
1743	    }
1744	    if (gt->gt_gnext)
1745		gt->gt_gnext->gt_gprev = gt->gt_gprev;
1746
1747#ifdef RSRR
1748	    /* Send route change notification to reservation protocol. */
1749	    rsrr_cache_send(gt,0);
1750	    rsrr_cache_clean(gt);
1751#endif /* RSRR */
1752	    free((char *)gt);
1753	} else {
1754	    if (gt->gt_prsent_timer == -1)
1755		if (gt->gt_grpmems == 0)
1756		    send_prune(gt);
1757		else
1758		    gt->gt_prsent_timer = 0;
1759	    gtnptr = &gt->gt_gnext;
1760	}
1761    }
1762
1763    /*
1764     * When traversing the no_route table, the decision is much easier.
1765     * Just delete it if it has timed out.
1766     */
1767    gtnptr = &kernel_no_route;
1768    while ((gt = *gtnptr) != NULL) {
1769	/* advance the timer for the kernel entry */
1770	gt->gt_timer -= ROUTE_MAX_REPORT_DELAY;
1771
1772	if (gt->gt_timer < 0) {
1773	    if (gt->gt_srctbl) {
1774		if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
1775		    log(LOG_WARNING, errno, "%s (%s %s)",
1776			"age_table_entry trying to delete no-route",
1777			inet_fmt(gt->gt_srctbl->st_origin, s1),
1778			inet_fmt(gt->gt_mcastgrp, s2));
1779		}
1780		free(gt->gt_srctbl);
1781	    }
1782	    *gtnptr = gt->gt_next;
1783	    if (gt->gt_next)
1784		gt->gt_next->gt_prev = gt->gt_prev;
1785
1786	    free((char *)gt);
1787	} else {
1788	    gtnptr = &gt->gt_next;
1789	}
1790    }
1791}
1792
1793/*
1794 * Modify the kernel to forward packets when one or multiple prunes that
1795 * were received on the vif given by vifi, for the group given by gt,
1796 * have expired.
1797 */
1798static void
1799expire_prune(vifi_t vifi, struct gtable *gt)
1800{
1801    /*
1802     * No need to send a graft, any prunes that we sent
1803     * will expire before any prunes that we have received.
1804     */
1805    if (gt->gt_prsent_timer > 0) {
1806        log(LOG_DEBUG, 0, "prune expired with %d left on %s",
1807		gt->gt_prsent_timer, "prsent_timer");
1808        gt->gt_prsent_timer = 0;
1809    }
1810
1811    /* modify the kernel entry to forward packets */
1812    if (!VIFM_ISSET(vifi, gt->gt_grpmems)) {
1813        struct rtentry *rt = gt->gt_route;
1814        VIFM_SET(vifi, gt->gt_grpmems);
1815        log(LOG_DEBUG, 0, "forw again (%s %s) gm:%x vif:%d",
1816	inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1817	inet_fmt(gt->gt_mcastgrp, s2), gt->gt_grpmems, vifi);
1818
1819        prun_add_ttls(gt);
1820        update_kernel(gt);
1821#ifdef RSRR
1822        /* Send route change notification to reservation protocol. */
1823        rsrr_cache_send(gt,1);
1824#endif /* RSRR */
1825    }
1826}
1827
1828
1829static char *
1830scaletime(u_long t)
1831{
1832    static char buf1[5];
1833    static char buf2[5];
1834    static char *buf=buf1;
1835    char s;
1836    char *p;
1837
1838    p = buf;
1839    if (buf == buf1)
1840	buf = buf2;
1841    else
1842	buf = buf1;
1843
1844    if (t < 120) {
1845	s = 's';
1846    } else if (t < 3600) {
1847	t /= 60;
1848	s = 'm';
1849    } else if (t < 86400) {
1850	t /= 3600;
1851	s = 'h';
1852    } else if (t < 864000) {
1853	t /= 86400;
1854	s = 'd';
1855    } else {
1856	t /= 604800;
1857	s = 'w';
1858    }
1859    if (t > 999)
1860	return "*** ";
1861
1862    snprintf(p, 5, "%3d%c", (int)t, s);
1863
1864    return p;
1865}
1866
1867/*
1868 * Print the contents of the cache table on file 'fp2'.
1869 */
1870void
1871dump_cache(FILE *fp2)
1872{
1873    register struct rtentry *r;
1874    register struct gtable *gt;
1875    register struct stable *st;
1876    register vifi_t i;
1877    register time_t thyme = time(0);
1878
1879    fprintf(fp2,
1880	    "Multicast Routing Cache Table (%d entries)\n%s", kroutes,
1881    " Origin             Mcast-group     CTmr  Age Ptmr IVif Forwvifs\n");
1882
1883    for (gt = kernel_no_route; gt; gt = gt->gt_next) {
1884	if (gt->gt_srctbl) {
1885	    fprintf(fp2, " %-18s %-15s %-4s %-4s    - -1\n",
1886		inet_fmts(gt->gt_srctbl->st_origin, 0xffffffff, s1),
1887		inet_fmt(gt->gt_mcastgrp, s2), scaletime(gt->gt_timer),
1888		scaletime(thyme - gt->gt_ctime));
1889	    fprintf(fp2, ">%s\n", inet_fmt(gt->gt_srctbl->st_origin, s1));
1890	}
1891    }
1892
1893    for (gt = kernel_table; gt; gt = gt->gt_gnext) {
1894	r = gt->gt_route;
1895	fprintf(fp2, " %-18s %-15s",
1896	    inet_fmts(r->rt_origin, r->rt_originmask, s1),
1897	    inet_fmt(gt->gt_mcastgrp, s2));
1898
1899	fprintf(fp2, " %-4s", scaletime(gt->gt_timer));
1900
1901	fprintf(fp2, " %-4s %-4s ", scaletime(thyme - gt->gt_ctime),
1902			gt->gt_prsent_timer ? scaletime(gt->gt_prsent_timer) :
1903					      "   -");
1904
1905	fprintf(fp2, "%2u%c%c ", r->rt_parent,
1906	    gt->gt_prsent_timer ? 'P' : ' ',
1907	    VIFM_ISSET(r->rt_parent, gt->gt_scope) ? 'B' : ' ');
1908
1909	for (i = 0; i < numvifs; ++i) {
1910	    if (VIFM_ISSET(i, gt->gt_grpmems))
1911		fprintf(fp2, " %u ", i);
1912	    else if (VIFM_ISSET(i, r->rt_children) &&
1913		     !VIFM_ISSET(i, r->rt_leaves))
1914		fprintf(fp2, " %u%c", i,
1915			VIFM_ISSET(i, gt->gt_scope) ? 'b' : 'p');
1916	}
1917	fprintf(fp2, "\n");
1918	for (st = gt->gt_srctbl; st; st = st->st_next) {
1919	    fprintf(fp2, ">%s\n", inet_fmt(st->st_origin, s1));
1920	}
1921#ifdef DEBUG_PRUNES
1922	for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next) {
1923	    fprintf(fp2, "<r:%s v:%d t:%d\n", inet_fmt(pt->pt_router, s1),
1924		pt->pt_vifi, pt->pt_timer);
1925	}
1926#endif
1927    }
1928}
1929
1930/*
1931 * Traceroute function which returns traceroute replies to the requesting
1932 * router. Also forwards the request to downstream routers.
1933 * NOTE: u_int no is narrowed to u_char
1934 */
1935void
1936accept_mtrace(u_int32_t src, u_int32_t dst, u_int32_t group,
1937    char *data, u_int no, int datalen)
1938{
1939    u_char type;
1940    struct rtentry *rt;
1941    struct gtable *gt;
1942    struct tr_query *qry;
1943    struct tr_resp  *resp;
1944    int vifi;
1945    char *p;
1946    int rcount;
1947    int errcode = TR_NO_ERR;
1948    int resptype;
1949    struct timeval tp;
1950    struct sioc_vif_req v_req;
1951    struct sioc_sg_req sg_req;
1952
1953    /* Remember qid across invocations */
1954    static u_int32_t oqid = 0;
1955
1956    /* timestamp the request/response */
1957    gettimeofday(&tp, 0);
1958
1959    /*
1960     * Check if it is a query or a response
1961     */
1962    if (datalen == QLEN) {
1963	type = QUERY;
1964	log(LOG_DEBUG, 0, "Initial traceroute query rcvd from %s to %s",
1965	    inet_fmt(src, s1), inet_fmt(dst, s2));
1966    }
1967    else if ((datalen - QLEN) % RLEN == 0) {
1968	type = RESP;
1969	log(LOG_DEBUG, 0, "In-transit traceroute query rcvd from %s to %s",
1970	    inet_fmt(src, s1), inet_fmt(dst, s2));
1971	if (IN_MULTICAST(ntohl(dst))) {
1972	    log(LOG_DEBUG, 0, "Dropping multicast response");
1973	    return;
1974	}
1975    }
1976    else {
1977	log(LOG_WARNING, 0, "%s from %s to %s",
1978	    "Non decipherable traceroute request received",
1979	    inet_fmt(src, s1), inet_fmt(dst, s2));
1980	return;
1981    }
1982
1983    qry = (struct tr_query *)data;
1984
1985    /*
1986     * if it is a packet with all reports filled, drop it
1987     */
1988    if ((rcount = (datalen - QLEN)/RLEN) == no) {
1989	log(LOG_DEBUG, 0, "packet with all reports filled in");
1990	return;
1991    }
1992
1993    log(LOG_DEBUG, 0, "s: %s g: %s d: %s ", inet_fmt(qry->tr_src, s1),
1994	    inet_fmt(group, s2), inet_fmt(qry->tr_dst, s3));
1995    log(LOG_DEBUG, 0, "rttl: %d rd: %s", qry->tr_rttl,
1996	    inet_fmt(qry->tr_raddr, s1));
1997    log(LOG_DEBUG, 0, "rcount:%d, qid:%06x", rcount, qry->tr_qid);
1998
1999    /* determine the routing table entry for this traceroute */
2000    rt = determine_route(qry->tr_src);
2001    if (rt) {
2002	log(LOG_DEBUG, 0, "rt parent vif: %d rtr: %s metric: %d",
2003		rt->rt_parent, inet_fmt(rt->rt_gateway, s1), rt->rt_metric);
2004	log(LOG_DEBUG, 0, "rt origin %s",
2005		inet_fmts(rt->rt_origin, rt->rt_originmask, s1));
2006    } else
2007	log(LOG_DEBUG, 0, "...no route");
2008
2009    /*
2010     * Query type packet - check if rte exists
2011     * Check if the query destination is a vif connected to me.
2012     * and if so, whether I should start response back
2013     */
2014    if (type == QUERY) {
2015	if (oqid == qry->tr_qid) {
2016	    /*
2017	     * If the multicast router is a member of the group being
2018	     * queried, and the query is multicasted, then the router can
2019	     * receive multiple copies of the same query.  If we have already
2020	     * replied to this traceroute, just ignore it this time.
2021	     *
2022	     * This is not a total solution, but since if this fails you
2023	     * only get N copies, N <= the number of interfaces on the router,
2024	     * it is not fatal.
2025	     */
2026	    log(LOG_DEBUG, 0, "ignoring duplicate traceroute packet");
2027	    return;
2028	}
2029
2030	if (rt == NULL) {
2031	    log(LOG_DEBUG, 0, "Mcast traceroute: no route entry %s",
2032		   inet_fmt(qry->tr_src, s1));
2033	    if (IN_MULTICAST(ntohl(dst)))
2034		return;
2035	}
2036	vifi = find_vif(qry->tr_dst, 0);
2037
2038	if (vifi == NO_VIF) {
2039	    /* The traceroute destination is not on one of my subnet vifs. */
2040	    log(LOG_DEBUG, 0, "Destination %s not an interface",
2041		   inet_fmt(qry->tr_dst, s1));
2042	    if (IN_MULTICAST(ntohl(dst)))
2043		return;
2044	    errcode = TR_WRONG_IF;
2045	} else if (rt != NULL && !VIFM_ISSET(vifi, rt->rt_children)) {
2046	    log(LOG_DEBUG, 0, "Destination %s not on forwarding tree for src %s",
2047		   inet_fmt(qry->tr_dst, s1), inet_fmt(qry->tr_src, s2));
2048	    if (IN_MULTICAST(ntohl(dst)))
2049		return;
2050	    errcode = TR_WRONG_IF;
2051	}
2052    }
2053    else {
2054	/*
2055	 * determine which interface the packet came in on
2056	 * RESP packets travel hop-by-hop so this either traversed
2057	 * a tunnel or came from a directly attached mrouter.
2058	 */
2059	if ((vifi = find_vif(src, dst)) == NO_VIF) {
2060	    log(LOG_DEBUG, 0, "Wrong interface for packet");
2061	    errcode = TR_WRONG_IF;
2062	}
2063    }
2064
2065    /* Now that we've decided to send a response, save the qid */
2066    oqid = qry->tr_qid;
2067
2068    log(LOG_DEBUG, 0, "Sending traceroute response");
2069
2070    /* copy the packet to the sending buffer */
2071    p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
2072
2073    bcopy(data, p, datalen);
2074
2075    p += datalen;
2076
2077    /*
2078     * If there is no room to insert our reply, coopt the previous hop
2079     * error indication to relay this fact.
2080     */
2081    if (p + sizeof(struct tr_resp) > send_buf + RECV_BUF_SIZE) {
2082	resp = (struct tr_resp *)p - 1;
2083	resp->tr_rflags = TR_NO_SPACE;
2084	rt = NULL;
2085	goto sendit;
2086    }
2087
2088    /*
2089     * fill in initial response fields
2090     */
2091    resp = (struct tr_resp *)p;
2092    bzero(resp, sizeof(struct tr_resp));
2093    datalen += RLEN;
2094
2095    resp->tr_qarr    = htonl((tp.tv_sec + JAN_1970) << 16) +
2096				((tp.tv_usec >> 4) & 0xffff);
2097
2098    resp->tr_rproto  = PROTO_DVMRP;
2099    if (errcode != TR_NO_ERR) {
2100	resp->tr_rflags	 = errcode;
2101	rt = NULL;	/* hack to enforce send straight to requestor */
2102	goto sendit;
2103    }
2104    resp->tr_outaddr = uvifs[vifi].uv_lcl_addr;
2105    resp->tr_fttl    = uvifs[vifi].uv_threshold;
2106    resp->tr_rflags  = TR_NO_ERR;
2107
2108    /*
2109     * obtain # of packets out on interface
2110     */
2111    v_req.vifi = vifi;
2112    if (ioctl(udp_socket, SIOCGETVIFCNT, (char *)&v_req) >= 0)
2113	resp->tr_vifout  =  htonl(v_req.ocount);
2114
2115    /*
2116     * fill in scoping & pruning information
2117     */
2118    if (rt)
2119	for (gt = rt->rt_groups; gt; gt = gt->gt_next) {
2120	    if (gt->gt_mcastgrp >= group)
2121		break;
2122	}
2123    else
2124	gt = NULL;
2125
2126    if (gt && gt->gt_mcastgrp == group) {
2127	sg_req.src.s_addr = qry->tr_src;
2128	sg_req.grp.s_addr = group;
2129	if (ioctl(udp_socket, SIOCGETSGCNT, (char *)&sg_req) >= 0)
2130	    resp->tr_pktcnt = htonl(sg_req.pktcnt);
2131
2132	if (VIFM_ISSET(vifi, gt->gt_scope))
2133	    resp->tr_rflags = TR_SCOPED;
2134	else if (gt->gt_prsent_timer)
2135	    resp->tr_rflags = TR_PRUNED;
2136	else if (!VIFM_ISSET(vifi, gt->gt_grpmems))
2137	    if (VIFM_ISSET(vifi, rt->rt_children) &&
2138		!VIFM_ISSET(vifi, rt->rt_leaves))
2139		resp->tr_rflags = TR_OPRUNED;
2140	    else
2141		resp->tr_rflags = TR_NO_FWD;
2142    } else {
2143	if (scoped_addr(vifi, group))
2144	    resp->tr_rflags = TR_SCOPED;
2145	else if (rt && !VIFM_ISSET(vifi, rt->rt_children))
2146	    resp->tr_rflags = TR_NO_FWD;
2147    }
2148
2149    /*
2150     *  if no rte exists, set NO_RTE error
2151     */
2152    if (rt == NULL) {
2153	src = dst;		/* the dst address of resp. pkt */
2154	resp->tr_inaddr   = 0;
2155	resp->tr_rflags   = TR_NO_RTE;
2156	resp->tr_rmtaddr  = 0;
2157    } else {
2158	/* get # of packets in on interface */
2159	v_req.vifi = rt->rt_parent;
2160	if (ioctl(udp_socket, SIOCGETVIFCNT, (char *)&v_req) >= 0)
2161	    resp->tr_vifin = htonl(v_req.icount);
2162
2163	MASK_TO_VAL(rt->rt_originmask, resp->tr_smask);
2164	src = uvifs[rt->rt_parent].uv_lcl_addr;
2165	resp->tr_inaddr = src;
2166	resp->tr_rmtaddr = rt->rt_gateway;
2167	if (!VIFM_ISSET(vifi, rt->rt_children)) {
2168	    log(LOG_DEBUG, 0, "Destination %s not on forwarding tree for src %s",
2169		   inet_fmt(qry->tr_dst, s1), inet_fmt(qry->tr_src, s2));
2170	    resp->tr_rflags = TR_WRONG_IF;
2171	}
2172	if (rt->rt_metric >= UNREACHABLE) {
2173	    resp->tr_rflags = TR_NO_RTE;
2174	    /* Hack to send reply directly */
2175	    rt = NULL;
2176	}
2177    }
2178
2179sendit:
2180    /*
2181     * if metric is 1 or no. of reports is 1, send response to requestor
2182     * else send to upstream router.  If the upstream router can't handle
2183     * mtrace, set an error code and send to requestor anyway.
2184     */
2185    log(LOG_DEBUG, 0, "rcount:%d, no:%d", rcount, no);
2186
2187    if ((rcount + 1 == no) || (rt == NULL) || (rt->rt_metric == 1)) {
2188	resptype = IGMP_MTRACE_REPLY;
2189	dst = qry->tr_raddr;
2190    } else
2191	if (!can_mtrace(rt->rt_parent, rt->rt_gateway)) {
2192	    dst = qry->tr_raddr;
2193	    resp->tr_rflags = TR_OLD_ROUTER;
2194	    resptype = IGMP_MTRACE_REPLY;
2195	} else {
2196	    dst = rt->rt_gateway;
2197	    resptype = IGMP_MTRACE_QUERY;
2198	}
2199
2200    if (IN_MULTICAST(ntohl(dst))) {
2201	/*
2202	 * Send the reply on a known multicast capable vif.
2203	 * If we don't have one, we can't source any multicasts anyway.
2204	 */
2205	if (phys_vif != -1) {
2206	    log(LOG_DEBUG, 0, "Sending reply to %s from %s",
2207		inet_fmt(dst, s1), inet_fmt(uvifs[phys_vif].uv_lcl_addr, s2));
2208	    k_set_ttl(qry->tr_rttl);
2209	    send_igmp(uvifs[phys_vif].uv_lcl_addr, dst,
2210		      resptype, no, group,
2211		      datalen);
2212	    k_set_ttl(1);
2213	} else
2214	    log(LOG_INFO, 0, "No enabled phyints -- %s",
2215			"dropping traceroute reply");
2216    } else {
2217	log(LOG_DEBUG, 0, "Sending %s to %s from %s",
2218	    resptype == IGMP_MTRACE_REPLY ?  "reply" : "request on",
2219	    inet_fmt(dst, s1), inet_fmt(src, s2));
2220
2221	send_igmp(src, dst,
2222		  resptype, no, group,
2223		  datalen);
2224    }
2225    return;
2226}
2227