1/*	$NetBSD: prune.c,v 1.17 2006/05/25 01:41:13 christos Exp $	*/
2
3/*
4 * The mrouted program is covered by the license in the accompanying file
5 * named "LICENSE".  Use of the mrouted program represents acceptance of
6 * the terms and conditions listed in that file.
7 *
8 * The mrouted program is COPYRIGHT 1989 by The Board of Trustees of
9 * Leland Stanford Junior University.
10 */
11
12
13#include "defs.h"
14
15extern int cache_lifetime;
16extern int max_prune_lifetime;
17extern struct rtentry *routing_table;
18
19extern int phys_vif;
20
21/*
22 * dither cache lifetime to obtain a value between x and 2*x
23 */
24#define CACHE_LIFETIME(x) ((x) + (arc4random() % (x)))
25
26#define CHK_GS(x, y) {	\
27		switch(x) { \
28			case 2:	\
29			case 4:	\
30			case 8:	\
31			case 16: \
32			case 32: \
33			case 64: \
34			case 128: \
35			/*case 256:*/ y = 1; \
36				  break; \
37			default:  y = 0; \
38		} \
39	}
40
41struct gtable *kernel_table;		/* ptr to list of kernel grp entries*/
42static struct gtable *kernel_no_route;	/* list of grp entries w/o routes   */
43struct gtable *gtp;			/* pointer for kernel rt entries    */
44unsigned int kroutes;			/* current number of cache entries  */
45
46/****************************************************************************
47                       Functions that are local to prune.c
48****************************************************************************/
49static void		prun_add_ttls(struct gtable *gt);
50static int		pruning_neighbor(vifi_t vifi, u_int32_t addr);
51static int		can_mtrace(vifi_t vifi, u_int32_t addr);
52static struct ptable *	find_prune_entry(u_int32_t vr, struct ptable *pt);
53static void		expire_prune(vifi_t vifi, struct gtable *gt);
54static void		send_prune(struct gtable *gt);
55static void		send_graft(struct gtable *gt);
56static void		send_graft_ack(u_int32_t src, u_int32_t dst,
57				       u_int32_t origin, u_int32_t grp);
58static void		update_kernel(struct gtable *g);
59static const char *	scaletime(u_long t);
60
61/*
62 * Updates the ttl values for each vif.
63 */
64static void
65prun_add_ttls(struct gtable *gt)
66{
67    struct uvif *v;
68    vifi_t vifi;
69
70    for (vifi = 0, v = uvifs; vifi < numvifs; ++vifi, ++v) {
71	if (VIFM_ISSET(vifi, gt->gt_grpmems))
72	    gt->gt_ttls[vifi] = v->uv_threshold;
73	else
74	    gt->gt_ttls[vifi] = 0;
75    }
76}
77
78/*
79 * checks for scoped multicast addresses
80 */
81#define GET_SCOPE(gt) { \
82	vifi_t _i; \
83	if ((ntohl((gt)->gt_mcastgrp) & 0xff000000) == 0xef000000) \
84	    for (_i = 0; _i < numvifs; _i++) \
85		if (scoped_addr(_i, (gt)->gt_mcastgrp)) \
86		    VIFM_SET(_i, (gt)->gt_scope); \
87	}
88
89int
90scoped_addr(vifi_t vifi, u_int32_t addr)
91{
92    struct vif_acl *acl;
93
94    for (acl = uvifs[vifi].uv_acl; acl; acl = acl->acl_next)
95	if ((addr & acl->acl_mask) == acl->acl_addr)
96	    return 1;
97
98    return 0;
99}
100
101/*
102 * Determine if mcastgrp has a listener on vifi
103 */
104int
105grplst_mem(vifi_t vifi, u_int32_t mcastgrp)
106{
107    struct listaddr *g;
108    struct uvif *v;
109
110    v = &uvifs[vifi];
111
112    for (g = v->uv_groups; g != NULL; g = g->al_next)
113	if (mcastgrp == g->al_addr)
114	    return 1;
115
116    return 0;
117}
118
119/*
120 * Finds the group entry with the specified source and netmask.
121 * If netmask is 0, it uses the route's netmask.
122 *
123 * Returns TRUE if found a match, and the global variable gtp is left
124 * pointing to entry before the found entry.
125 * Returns FALSE if no exact match found, gtp is left pointing to before
126 * the entry in question belongs, or is NULL if the it belongs at the
127 * head of the list.
128 */
129int
130find_src_grp(u_int32_t src, u_int32_t mask, u_int32_t grp)
131{
132    struct gtable *gt;
133
134    gtp = NULL;
135    gt = kernel_table;
136    while (gt != NULL) {
137	if (grp == gt->gt_mcastgrp &&
138	    (mask ? (gt->gt_route->rt_origin == src &&
139		     gt->gt_route->rt_originmask == mask) :
140		    ((src & gt->gt_route->rt_originmask) ==
141		     gt->gt_route->rt_origin)))
142	    return TRUE;
143	if (ntohl(grp) > ntohl(gt->gt_mcastgrp) ||
144	    (grp == gt->gt_mcastgrp &&
145	     (ntohl(mask) < ntohl(gt->gt_route->rt_originmask) ||
146	      (mask == gt->gt_route->rt_originmask &&
147	       (ntohl(src) > ntohl(gt->gt_route->rt_origin)))))) {
148	    gtp = gt;
149	    gt = gt->gt_gnext;
150	}
151	else break;
152    }
153    return FALSE;
154}
155
156/*
157 * Check if the neighbor supports pruning
158 */
159static int
160pruning_neighbor(vifi_t vifi, u_int32_t addr)
161{
162    struct listaddr *n = neighbor_info(vifi, addr);
163    int vers;
164
165    if (n == NULL)
166	return 0;
167
168    if (n->al_flags & NF_PRUNE)
169	return 1;
170
171    /*
172     * Versions from 3.0 to 3.4 relied on the version number to identify
173     * that they could handle pruning.
174     */
175    vers = NBR_VERS(n);
176    return (vers >= 0x0300 && vers <= 0x0304);
177}
178
179/*
180 * Can the neighbor in question handle multicast traceroute?
181 */
182static int
183can_mtrace(vifi_t vifi, u_int32_t addr)
184{
185    struct listaddr *n = neighbor_info(vifi, addr);
186    int vers;
187
188    if (n == NULL)
189	return 0;
190
191    if (n->al_flags & NF_MTRACE)
192	return 1;
193
194    /*
195     * Versions 3.3 and 3.4 relied on the version number to identify
196     * that they could handle traceroute.
197     */
198    vers = NBR_VERS(n);
199    return (vers >= 0x0303 && vers <= 0x0304);
200}
201
202/*
203 * Returns the prune entry of the router, or NULL if none exists
204 */
205static struct ptable *
206find_prune_entry(u_int32_t vr, struct ptable *pt)
207{
208    while (pt) {
209	if (pt->pt_router == vr)
210	    return pt;
211	pt = pt->pt_next;
212    }
213
214    return NULL;
215}
216
217/*
218 * Send a prune message to the dominant router for
219 * this source.
220 *
221 * Record an entry that a prune was sent for this group
222 */
223static void
224send_prune(struct gtable *gt)
225{
226    struct ptable *pt;
227    char *p;
228    int i;
229    int datalen;
230    u_int32_t src;
231    u_int32_t dst;
232    u_int32_t tmp;
233
234    /* Don't process any prunes if router is not pruning */
235    if (pruning == 0)
236	return;
237
238    /* Can't process a prune if we don't have an associated route */
239    if (gt->gt_route == NULL)
240	return;
241
242    /* Don't send a prune to a non-pruning router */
243    if (!pruning_neighbor(gt->gt_route->rt_parent, gt->gt_route->rt_gateway))
244	return;
245
246    /*
247     * sends a prune message to the router upstream.
248     */
249    src = uvifs[gt->gt_route->rt_parent].uv_lcl_addr;
250    dst = gt->gt_route->rt_gateway;
251
252    p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
253    datalen = 0;
254
255    /*
256     * determine prune lifetime
257     */
258    gt->gt_prsent_timer = gt->gt_timer;
259    for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next)
260	if (pt->pt_timer < gt->gt_prsent_timer)
261	    gt->gt_prsent_timer = pt->pt_timer;
262
263    /*
264     * If we have a graft pending, cancel graft retransmission
265     */
266    gt->gt_grftsnt = 0;
267
268    for (i = 0; i < 4; i++)
269	*p++ = ((char *)&(gt->gt_route->rt_origin))[i];
270    for (i = 0; i < 4; i++)
271	*p++ = ((char *)&(gt->gt_mcastgrp))[i];
272    tmp = htonl(gt->gt_prsent_timer);
273    for (i = 0; i < 4; i++)
274	*p++ = ((char *)&(tmp))[i];
275    datalen += 12;
276
277    send_igmp(src, dst, IGMP_DVMRP, DVMRP_PRUNE,
278	      htonl(MROUTED_LEVEL), datalen);
279
280    logit(LOG_DEBUG, 0, "sent prune for (%s %s)/%d on vif %d to %s",
281      inet_fmts(gt->gt_route->rt_origin, gt->gt_route->rt_originmask),
282      inet_fmt(gt->gt_mcastgrp),
283      gt->gt_prsent_timer, gt->gt_route->rt_parent,
284      inet_fmt(gt->gt_route->rt_gateway));
285}
286
287/*
288 * a prune was sent upstream
289 * so, a graft has to be sent to annul the prune
290 * set up a graft timer so that if an ack is not
291 * heard within that time, another graft request
292 * is sent out.
293 */
294static void
295send_graft(struct gtable *gt)
296{
297    char *p;
298    int i;
299    int datalen;
300    u_int32_t src;
301    u_int32_t dst;
302
303    /* Can't send a graft without an associated route */
304    if (gt->gt_route == NULL)
305	return;
306
307    src = uvifs[gt->gt_route->rt_parent].uv_lcl_addr;
308    dst = gt->gt_route->rt_gateway;
309
310    p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
311    datalen = 0;
312
313    for (i = 0; i < 4; i++)
314	*p++ = ((char *)&(gt->gt_route->rt_origin))[i];
315    for (i = 0; i < 4; i++)
316	*p++ = ((char *)&(gt->gt_mcastgrp))[i];
317    datalen += 8;
318
319    if (datalen != 0) {
320	send_igmp(src, dst, IGMP_DVMRP, DVMRP_GRAFT,
321		  htonl(MROUTED_LEVEL), datalen);
322    }
323    logit(LOG_DEBUG, 0, "sent graft for (%s %s) to %s on vif %d",
324	inet_fmts(gt->gt_route->rt_origin, gt->gt_route->rt_originmask),
325	inet_fmt(gt->gt_mcastgrp),
326	inet_fmt(gt->gt_route->rt_gateway),
327	gt->gt_route->rt_parent);
328}
329
330/*
331 * Send an ack that a graft was received
332 */
333static void
334send_graft_ack(u_int32_t src, u_int32_t dst, u_int32_t origin, u_int32_t grp)
335{
336    char *p;
337    int i;
338    int datalen;
339
340    p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
341    datalen = 0;
342
343    for (i = 0; i < 4; i++)
344	*p++ = ((char *)&(origin))[i];
345    for (i = 0; i < 4; i++)
346	*p++ = ((char *)&(grp))[i];
347    datalen += 8;
348
349    send_igmp(src, dst, IGMP_DVMRP, DVMRP_GRAFT_ACK,
350	      htonl(MROUTED_LEVEL), datalen);
351
352    logit(LOG_DEBUG, 0, "sent graft ack for (%s, %s) to %s",
353	inet_fmt(origin), inet_fmt(grp),
354	inet_fmt(dst));
355}
356
357/*
358 * Update the kernel cache with all the routes hanging off the group entry
359 */
360static void
361update_kernel(struct gtable *g)
362{
363    struct stable *st;
364
365    for (st = g->gt_srctbl; st; st = st->st_next)
366	k_add_rg(st->st_origin, g);
367}
368
369/****************************************************************************
370                          Functions that are used externally
371****************************************************************************/
372
373#ifdef SNMP
374#include <sys/types.h>
375#include "snmp.h"
376
377/*
378 * Find a specific group entry in the group table
379 */
380struct gtable *
381find_grp(u_long grp)
382{
383   struct gtable *gt;
384
385   for (gt = kernel_table; gt; gt = gt->gt_gnext) {
386      if (ntohl(grp) < ntohl(gt->gt_mcastgrp))
387      	 break;
388      if (gt->gt_mcastgrp == grp)
389         return gt;
390   }
391   return NULL;
392}
393
394/*
395 * Given a group entry and source, find the corresponding source table
396 * entry
397 */
398struct stable *
399find_grp_src(struct gtable *gt, u_long src)
400{
401   struct stable *st;
402   u_long grp = gt->gt_mcastgrp;
403   struct gtable *gtcurr;
404
405   for (gtcurr = gt; gtcurr->gt_mcastgrp == grp; gtcurr = gtcurr->gt_gnext) {
406      for (st = gtcurr->gt_srctbl; st; st = st->st_next)
407	 if (st->st_origin == src)
408	    return st;
409   }
410   return NULL;
411}
412
413/*
414 * Find next entry > specification
415 *
416 * gtpp: ordered by group
417 * stpp: ordered by source
418 */
419int
420next_grp_src_mask(struct gtable **gtpp, struct stable **stpp, u_long grp,
421		  u_long src, u_long mask)
422{
423   struct gtable *gt, *gbest = NULL;
424   struct stable *st, *sbest = NULL;
425
426   /* Find first group entry >= grp spec */
427   (*gtpp) = kernel_table;
428   while ((*gtpp) && ntohl((*gtpp)->gt_mcastgrp) < ntohl(grp))
429      (*gtpp)=(*gtpp)->gt_gnext;
430   if (!(*gtpp))
431      return 0; /* no more groups */
432
433   for (gt = kernel_table; gt; gt=gt->gt_gnext) {
434      /* Since grps are ordered, we can stop when group changes from gbest */
435      if (gbest && gbest->gt_mcastgrp != gt->gt_mcastgrp)
436         break;
437      for (st = gt->gt_srctbl; st; st=st->st_next) {
438
439         /* Among those entries > spec, find "lowest" one */
440         if (((ntohl(gt->gt_mcastgrp)> ntohl(grp))
441           || (ntohl(gt->gt_mcastgrp)==ntohl(grp)
442              && ntohl(st->st_origin)> ntohl(src))
443           || (ntohl(gt->gt_mcastgrp)==ntohl(grp)
444              && ntohl(st->st_origin)==src && 0xFFFFFFFF>ntohl(mask)))
445          && (!gbest
446           || (ntohl(gt->gt_mcastgrp)< ntohl(gbest->gt_mcastgrp))
447           || (ntohl(gt->gt_mcastgrp)==ntohl(gbest->gt_mcastgrp)
448              && ntohl(st->st_origin)< ntohl(sbest->st_origin)))) {
449               gbest = gt;
450               sbest = st;
451         }
452      }
453   }
454   (*gtpp) = gbest;
455   (*stpp) = sbest;
456   return (*gtpp)!=0;
457}
458
459/*
460 * Ensure that sg contains current information for the given group,source.
461 * This is fetched from the kernel as a unit so that counts for the entry
462 * are consistent, i.e. packet and byte counts for the same entry are
463 * read at the same time.
464 */
465void
466refresh_sg(struct sioc_sg_req *sg, struct gtable *gt, struct stable *st)
467{
468   static   int lastq = -1;
469
470   if (quantum != lastq || sg->src.s_addr!=st->st_origin
471    || sg->grp.s_addr!=gt->gt_mcastgrp) {
472       lastq = quantum;
473       sg->src.s_addr = st->st_origin;
474       sg->grp.s_addr = gt->gt_mcastgrp;
475       ioctl(igmp_socket, SIOCGETSGCNT, (char *)sg);
476   }
477}
478
479/*
480 * Return pointer to a specific route entry.  This must be a separate
481 * function from find_route() which modifies rtp.
482 */
483struct rtentry *
484snmp_find_route(u_long src, u_long mask)
485{
486    struct rtentry *rt;
487
488   for (rt = routing_table; rt; rt = rt->rt_next) {
489      if (src == rt->rt_origin && mask == rt->rt_originmask)
490         return rt;
491   }
492   return NULL;
493}
494
495/*
496 * Find next route entry > specification
497 */
498int
499next_route(struct rtentry **rtpp, u_long src, u_long mask)
500{
501   struct rtentry *rt, *rbest = NULL;
502
503   /* Among all entries > spec, find "lowest" one in order */
504   for (rt = routing_table; rt; rt=rt->rt_next) {
505      if ((ntohl(rt->rt_origin) > ntohl(src)
506          || (ntohl(rt->rt_origin) == ntohl(src)
507             && ntohl(rt->rt_originmask) > ntohl(mask)))
508       && (!rbest || (ntohl(rt->rt_origin) < ntohl(rbest->rt_origin))
509          || (ntohl(rt->rt_origin) == ntohl(rbest->rt_origin)
510             && ntohl(rt->rt_originmask) < ntohl(rbest->rt_originmask))))
511               rbest = rt;
512   }
513   (*rtpp) = rbest;
514   return (*rtpp)!=0;
515}
516
517/*
518 * Given a routing table entry, and a vifi, find the next vifi/entry
519 *
520 * vifi: vifi at which to start looking
521 */
522int
523next_route_child(struct rtentry **rtpp, u_long src, u_long mask, vifi_t *vifi)
524{
525   struct rtentry *rt;
526
527   /* Get (S,M) entry */
528   if (!((*rtpp) = snmp_find_route(src,mask)))
529      if (!next_route(rtpp, src, mask))
530         return 0;
531
532   /* Continue until we get one with a valid next vif */
533   do {
534      for (; (*rtpp)->rt_children && *vifi<numvifs; (*vifi)++)
535         if (VIFM_ISSET(*vifi, (*rtpp)->rt_children))
536            return 1;
537      *vifi = 0;
538   } while( next_route(rtpp, (*rtpp)->rt_origin, (*rtpp)->rt_originmask) );
539
540   return 0;
541}
542
543/*
544 * Given a routing table entry, and a vifi, find the next entry
545 * equal to or greater than those
546 *
547 * vifi: vifi at which to start looking
548 */
549int
550next_child(struct gtable **gtpp, struct stable **stpp, u_long grp, u_long src,
551	   u_long mask, vifi_t *vifi)
552{
553   struct stable *st;
554
555   /* Get (G,S,M) entry */
556   if (mask!=0xFFFFFFFF
557    || !((*gtpp) = find_grp(grp))
558    || !((*stpp) = find_grp_src((*gtpp),src)))
559      if (!next_grp_src_mask(gtpp, stpp, grp, src, mask))
560         return 0;
561
562   /* Continue until we get one with a valid next vif */
563   do {
564      for (; (*gtpp)->gt_route->rt_children && *vifi<numvifs; (*vifi)++)
565         if (VIFM_ISSET(*vifi, (*gtpp)->gt_route->rt_children))
566            return 1;
567      *vifi = 0;
568   } while (next_grp_src_mask(gtpp, stpp, (*gtpp)->gt_mcastgrp,
569		(*stpp)->st_origin, 0xFFFFFFFF) );
570
571   return 0;
572}
573#endif /* SNMP */
574
575/*
576 * Initialize the kernel table structure
577 */
578void
579init_ktable(void)
580{
581    kernel_table 	= NULL;
582    kernel_no_route	= NULL;
583    kroutes		= 0;
584}
585
586/*
587 * Add a new table entry for (origin, mcastgrp)
588 */
589void
590add_table_entry(u_int32_t origin, u_int32_t mcastgrp)
591{
592    struct rtentry *r;
593    struct gtable *gt,**gtnp,*prev_gt;
594    struct stable *st,**stnp;
595    vifi_t i;
596
597#ifdef DEBUG_MFC
598    md_log(MD_MISS, origin, mcastgrp);
599#endif
600
601    r = determine_route(origin);
602    prev_gt = NULL;
603    if (r == NULL) {
604	/*
605	 * Look for it on the no_route table; if it is found then
606	 * it will be detected as a duplicate below.
607	 */
608	for (gt = kernel_no_route; gt; gt = gt->gt_next)
609	    if (mcastgrp == gt->gt_mcastgrp &&
610		gt->gt_srctbl && gt->gt_srctbl->st_origin == origin)
611			break;
612	gtnp = &kernel_no_route;
613    } else {
614	gtnp = &r->rt_groups;
615	while ((gt = *gtnp) != NULL) {
616	    if (gt->gt_mcastgrp >= mcastgrp)
617		break;
618	    gtnp = &gt->gt_next;
619	    prev_gt = gt;
620	}
621    }
622
623    if (gt == NULL || gt->gt_mcastgrp != mcastgrp) {
624	gt = (struct gtable *)malloc(sizeof(struct gtable));
625	if (gt == NULL) {
626	    logit(LOG_ERR, 0, "ran out of memory");
627	    return;
628	}
629
630	gt->gt_mcastgrp	    = mcastgrp;
631	gt->gt_timer   	    = CACHE_LIFETIME(cache_lifetime);
632	time(&gt->gt_ctime);
633	gt->gt_grpmems	    = 0;
634	gt->gt_scope	    = 0;
635	gt->gt_prsent_timer = 0;
636	gt->gt_grftsnt	    = 0;
637	gt->gt_srctbl	    = NULL;
638	gt->gt_pruntbl	    = NULL;
639	gt->gt_route	    = r;
640#ifdef RSRR
641	gt->gt_rsrr_cache   = NULL;
642#endif
643
644	if (r != NULL) {
645	    /* obtain the multicast group membership list */
646	    for (i = 0; i < numvifs; i++) {
647		if (VIFM_ISSET(i, r->rt_children) &&
648		    !(VIFM_ISSET(i, r->rt_leaves)))
649		    VIFM_SET(i, gt->gt_grpmems);
650
651		if (VIFM_ISSET(i, r->rt_leaves) && grplst_mem(i, mcastgrp))
652		    VIFM_SET(i, gt->gt_grpmems);
653	    }
654	    GET_SCOPE(gt);
655	    if (VIFM_ISSET(r->rt_parent, gt->gt_scope))
656		gt->gt_scope = -1;
657	    gt->gt_grpmems &= ~gt->gt_scope;
658	} else {
659	    gt->gt_scope = -1;
660	    gt->gt_grpmems = 0;
661	}
662
663	/* update ttls */
664	prun_add_ttls(gt);
665
666	gt->gt_next = *gtnp;
667	*gtnp = gt;
668	if (gt->gt_next)
669	    gt->gt_next->gt_prev = gt;
670	gt->gt_prev = prev_gt;
671
672	if (r) {
673	    if (find_src_grp(r->rt_origin, r->rt_originmask, gt->gt_mcastgrp)) {
674		struct gtable *g;
675
676		g = gtp ? gtp->gt_gnext : kernel_table;
677		logit(LOG_WARNING, 0, "Entry for (%s %s) (rt:%p) exists (rt:%p)",
678		    inet_fmts(r->rt_origin, r->rt_originmask),
679		    inet_fmt(g->gt_mcastgrp),
680		    r, g->gt_route);
681	    } else {
682		if (gtp) {
683		    gt->gt_gnext = gtp->gt_gnext;
684		    gt->gt_gprev = gtp;
685		    gtp->gt_gnext = gt;
686		} else {
687		    gt->gt_gnext = kernel_table;
688		    gt->gt_gprev = NULL;
689		    kernel_table = gt;
690		}
691		if (gt->gt_gnext)
692		    gt->gt_gnext->gt_gprev = gt;
693	    }
694	} else {
695	    gt->gt_gnext = gt->gt_gprev = NULL;
696	}
697    }
698
699    stnp = &gt->gt_srctbl;
700    while ((st = *stnp) != NULL) {
701	if (ntohl(st->st_origin) >= ntohl(origin))
702	    break;
703	stnp = &st->st_next;
704    }
705
706    if (st == NULL || st->st_origin != origin) {
707	st = (struct stable *)malloc(sizeof(struct stable));
708	if (st == NULL)
709	    logit(LOG_ERR, 0, "ran out of memory");
710
711	st->st_origin = origin;
712	st->st_pktcnt = 0;
713	st->st_next = *stnp;
714	*stnp = st;
715    } else {
716#ifdef DEBUG_MFC
717	md_log(MD_DUPE, origin, mcastgrp);
718#endif
719	logit(LOG_WARNING, 0, "kernel entry already exists for (%s %s)",
720		inet_fmt(origin),
721		inet_fmt(mcastgrp));
722	/* XXX Doing this should cause no harm, and may ensure
723	 * kernel<>mrouted synchronization */
724	k_add_rg(origin, gt);
725	return;
726    }
727
728    kroutes++;
729    k_add_rg(origin, gt);
730
731    logit(LOG_DEBUG, 0, "add cache entry (%s %s) gm:%x, parent-vif:%d",
732	inet_fmt(origin),
733	inet_fmt(mcastgrp),
734	gt->gt_grpmems, r ? r->rt_parent : -1);
735
736    /* If there are no leaf vifs
737     * which have this group, then
738     * mark this src-grp as a prune candidate.
739     */
740    if (!gt->gt_prsent_timer && !gt->gt_grpmems && r && r->rt_gateway)
741	send_prune(gt);
742}
743
744/*
745 * An mrouter has gone down and come up on an interface
746 * Forward on that interface immediately
747 */
748void
749reset_neighbor_state(vifi_t vifi, u_int32_t addr)
750{
751    struct rtentry *r;
752    struct gtable *g;
753    struct ptable *pt, **ptnp;
754    struct stable *st;
755
756    for (g = kernel_table; g; g = g->gt_gnext) {
757	r = g->gt_route;
758
759	/*
760	 * If neighbor was the parent, remove the prune sent state
761	 * and all of the source cache info so that prunes get
762	 * regenerated.
763	 */
764	if (vifi == r->rt_parent) {
765	    if (addr == r->rt_gateway) {
766		logit(LOG_DEBUG, 0, "reset_neighbor_state parent reset (%s %s)",
767		    inet_fmts(r->rt_origin, r->rt_originmask),
768		    inet_fmt(g->gt_mcastgrp));
769
770		g->gt_prsent_timer = 0;
771		g->gt_grftsnt = 0;
772		while ((st = g->gt_srctbl) != NULL) {
773		    g->gt_srctbl = st->st_next;
774		    k_del_rg(st->st_origin, g);
775		    kroutes--;
776		    free(st);
777		}
778	    }
779	} else {
780	    /*
781	     * Neighbor was not the parent, send grafts to join the groups
782	     */
783	    if (g->gt_prsent_timer) {
784		g->gt_grftsnt = 1;
785		send_graft(g);
786		g->gt_prsent_timer = 0;
787	    }
788
789	    /*
790	     * Remove any prunes that this router has sent us.
791	     */
792	    ptnp = &g->gt_pruntbl;
793	    while ((pt = *ptnp) != NULL) {
794		if (pt->pt_vifi == vifi && pt->pt_router == addr) {
795		    *ptnp = pt->pt_next;
796		    free(pt);
797		} else
798		    ptnp = &pt->pt_next;
799	    }
800
801	    /*
802	     * And see if we want to forward again.
803	     */
804	    if (!VIFM_ISSET(vifi, g->gt_grpmems)) {
805		if (VIFM_ISSET(vifi, r->rt_children) &&
806		    !(VIFM_ISSET(vifi, r->rt_leaves)))
807		    VIFM_SET(vifi, g->gt_grpmems);
808
809		if (VIFM_ISSET(vifi, r->rt_leaves) &&
810		    grplst_mem(vifi, g->gt_mcastgrp))
811		    VIFM_SET(vifi, g->gt_grpmems);
812
813		g->gt_grpmems &= ~g->gt_scope;
814		prun_add_ttls(g);
815
816		/* Update kernel state */
817		update_kernel(g);
818#ifdef RSRR
819		/* Send route change notification to reservation protocol. */
820		rsrr_cache_send(g,1);
821#endif /* RSRR */
822
823		logit(LOG_DEBUG, 0, "reset member state (%s %s) gm:%x",
824		    inet_fmts(r->rt_origin, r->rt_originmask),
825		    inet_fmt(g->gt_mcastgrp), g->gt_grpmems);
826	    }
827	}
828    }
829}
830
831/*
832 * Delete table entry from the kernel
833 * del_flag determines how many entries to delete
834 */
835void
836del_table_entry(struct rtentry *r, u_int32_t mcastgrp, u_int del_flag)
837{
838    struct gtable *g, *prev_g;
839    struct stable *st, *prev_st;
840    struct ptable *pt, *prev_pt;
841
842    if (del_flag == DEL_ALL_ROUTES) {
843	g = r->rt_groups;
844	while (g) {
845	    logit(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
846		inet_fmts(r->rt_origin, r->rt_originmask),
847		inet_fmt(g->gt_mcastgrp));
848	    st = g->gt_srctbl;
849	    while (st) {
850		if (k_del_rg(st->st_origin, g) < 0) {
851		    logit(LOG_WARNING, errno,
852			"del_table_entry trying to delete (%s, %s)",
853			inet_fmt(st->st_origin),
854			inet_fmt(g->gt_mcastgrp));
855		}
856		kroutes--;
857		prev_st = st;
858		st = st->st_next;
859		free(prev_st);
860	    }
861	    g->gt_srctbl = NULL;
862
863	    pt = g->gt_pruntbl;
864	    while (pt) {
865		prev_pt = pt;
866		pt = pt->pt_next;
867		free(prev_pt);
868	    }
869	    g->gt_pruntbl = NULL;
870
871	    if (g->gt_gnext)
872		g->gt_gnext->gt_gprev = g->gt_gprev;
873	    if (g->gt_gprev)
874		g->gt_gprev->gt_gnext = g->gt_gnext;
875	    else
876		kernel_table = g->gt_gnext;
877
878#ifdef RSRR
879	    /* Send route change notification to reservation protocol. */
880	    rsrr_cache_send(g,0);
881	    rsrr_cache_clean(g);
882#endif /* RSRR */
883	    prev_g = g;
884	    g = g->gt_next;
885	    free(prev_g);
886	}
887	r->rt_groups = NULL;
888    }
889
890    /*
891     * Dummy routine - someday this may be needed, so it is just there
892     */
893    if (del_flag == DEL_RTE_GROUP) {
894	prev_g = (struct gtable *)&r->rt_groups;
895	for (g = r->rt_groups; g; g = g->gt_next) {
896	    if (g->gt_mcastgrp == mcastgrp) {
897		logit(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
898		    inet_fmts(r->rt_origin, r->rt_originmask),
899		    inet_fmt(g->gt_mcastgrp));
900		st = g->gt_srctbl;
901		while (st) {
902		    if (k_del_rg(st->st_origin, g) < 0) {
903			logit(LOG_WARNING, errno,
904			    "del_table_entry trying to delete (%s, %s)",
905			    inet_fmt(st->st_origin),
906			    inet_fmt(g->gt_mcastgrp));
907		    }
908		    kroutes--;
909		    prev_st = st;
910		    st = st->st_next;
911		    free(prev_st);
912		}
913		g->gt_srctbl = NULL;
914
915		pt = g->gt_pruntbl;
916		while (pt) {
917		    prev_pt = pt;
918		    pt = pt->pt_next;
919		    free(prev_pt);
920		}
921		g->gt_pruntbl = NULL;
922
923		if (g->gt_gnext)
924		    g->gt_gnext->gt_gprev = g->gt_gprev;
925		if (g->gt_gprev)
926		    g->gt_gprev->gt_gnext = g->gt_gnext;
927		else
928		    kernel_table = g->gt_gnext;
929
930		if (prev_g != (struct gtable *)&r->rt_groups)
931		    g->gt_next->gt_prev = prev_g;
932		else
933		    g->gt_next->gt_prev = NULL;
934		prev_g->gt_next = g->gt_next;
935
936#ifdef RSRR
937		/* Send route change notification to reservation protocol. */
938		rsrr_cache_send(g,0);
939		rsrr_cache_clean(g);
940#endif /* RSRR */
941		free(g);
942		g = prev_g;
943	    } else {
944		prev_g = g;
945	    }
946	}
947    }
948}
949
950/*
951 * update kernel table entry when a route entry changes
952 */
953void
954update_table_entry(struct rtentry *r)
955{
956    struct gtable *g;
957    struct ptable *pt, *prev_pt;
958    vifi_t i;
959
960    for (g = r->rt_groups; g; g = g->gt_next) {
961	pt = g->gt_pruntbl;
962	while (pt) {
963	    prev_pt = pt->pt_next;
964	    free(pt);
965	    pt = prev_pt;
966	}
967	g->gt_pruntbl = NULL;
968
969	g->gt_grpmems = 0;
970
971	/* obtain the multicast group membership list */
972	for (i = 0; i < numvifs; i++) {
973	    if (VIFM_ISSET(i, r->rt_children) &&
974		!(VIFM_ISSET(i, r->rt_leaves)))
975		VIFM_SET(i, g->gt_grpmems);
976
977	    if (VIFM_ISSET(i, r->rt_leaves) && grplst_mem(i, g->gt_mcastgrp))
978		VIFM_SET(i, g->gt_grpmems);
979	}
980	if (VIFM_ISSET(r->rt_parent, g->gt_scope))
981	    g->gt_scope = -1;
982	g->gt_grpmems &= ~g->gt_scope;
983
984	logit(LOG_DEBUG, 0, "updating cache entries (%s %s) gm:%x",
985	    inet_fmts(r->rt_origin, r->rt_originmask),
986	    inet_fmt(g->gt_mcastgrp),
987	    g->gt_grpmems);
988
989	if (g->gt_grpmems && g->gt_prsent_timer) {
990	    g->gt_grftsnt = 1;
991	    send_graft(g);
992	    g->gt_prsent_timer = 0;
993	}
994
995	/* update ttls and add entry into kernel */
996	prun_add_ttls(g);
997	update_kernel(g);
998#ifdef RSRR
999	/* Send route change notification to reservation protocol. */
1000	rsrr_cache_send(g,1);
1001#endif /* RSRR */
1002
1003	/* Check if we want to prune this group */
1004	if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway) {
1005	    g->gt_timer = CACHE_LIFETIME(cache_lifetime);
1006	    send_prune(g);
1007	}
1008    }
1009}
1010
1011/*
1012 * set the forwarding flag for all mcastgrps on this vifi
1013 */
1014void
1015update_lclgrp(vifi_t vifi, u_int32_t mcastgrp)
1016{
1017    struct rtentry *r;
1018    struct gtable *g;
1019
1020    logit(LOG_DEBUG, 0, "group %s joined on vif %d",
1021	inet_fmt(mcastgrp), vifi);
1022
1023    for (g = kernel_table; g; g = g->gt_gnext) {
1024	if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1025	    break;
1026
1027	r = g->gt_route;
1028	if (g->gt_mcastgrp == mcastgrp &&
1029	    VIFM_ISSET(vifi, r->rt_children)) {
1030
1031	    VIFM_SET(vifi, g->gt_grpmems);
1032	    g->gt_grpmems &= ~g->gt_scope;
1033	    if (g->gt_grpmems == 0)
1034		continue;
1035
1036	    prun_add_ttls(g);
1037	    logit(LOG_DEBUG, 0, "update lclgrp (%s %s) gm:%x",
1038		inet_fmts(r->rt_origin, r->rt_originmask),
1039		inet_fmt(g->gt_mcastgrp), g->gt_grpmems);
1040
1041	    update_kernel(g);
1042#ifdef RSRR
1043	    /* Send route change notification to reservation protocol. */
1044	    rsrr_cache_send(g,1);
1045#endif /* RSRR */
1046	}
1047    }
1048}
1049
1050/*
1051 * reset forwarding flag for all mcastgrps on this vifi
1052 */
1053void
1054delete_lclgrp(vifi_t vifi, u_int32_t mcastgrp)
1055{
1056    struct rtentry *r;
1057    struct gtable *g;
1058
1059    logit(LOG_DEBUG, 0, "group %s left on vif %d",
1060	inet_fmt(mcastgrp), vifi);
1061
1062    for (g = kernel_table; g; g = g->gt_gnext) {
1063	if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1064	    break;
1065
1066	if (g->gt_mcastgrp == mcastgrp) {
1067	    int stop_sending = 1;
1068
1069	    r = g->gt_route;
1070	    /*
1071	     * If this is not a leaf, then we have router neighbors on this
1072	     * vif.  Only turn off forwarding if they have all pruned.
1073	     */
1074	    if (!VIFM_ISSET(vifi, r->rt_leaves)) {
1075		struct listaddr *vr;
1076
1077		for (vr = uvifs[vifi].uv_neighbors; vr; vr = vr->al_next)
1078		  if (find_prune_entry(vr->al_addr, g->gt_pruntbl) == NULL) {
1079		      stop_sending = 0;
1080		      break;
1081		  }
1082	    }
1083
1084	    if (stop_sending) {
1085		VIFM_CLR(vifi, g->gt_grpmems);
1086		logit(LOG_DEBUG, 0, "delete lclgrp (%s %s) gm:%x",
1087		    inet_fmts(r->rt_origin, r->rt_originmask),
1088		    inet_fmt(g->gt_mcastgrp), g->gt_grpmems);
1089
1090		prun_add_ttls(g);
1091		update_kernel(g);
1092#ifdef RSRR
1093		/* Send route change notification to reservation protocol. */
1094		rsrr_cache_send(g,1);
1095#endif /* RSRR */
1096
1097		/*
1098		 * If there are no more members of this particular group,
1099		 *  send prune upstream
1100		 */
1101		if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway)
1102		    send_prune(g);
1103	    }
1104	}
1105    }
1106}
1107
1108/*
1109 * Takes the prune message received and then strips it to
1110 * determine the (src, grp) pair to be pruned.
1111 *
1112 * Adds the router to the (src, grp) entry then.
1113 *
1114 * Determines if further packets have to be sent down that vif
1115 *
1116 * Determines if a corresponding prune message has to be generated
1117 */
1118void
1119accept_prune(u_int32_t src, u_int32_t dst, char *p, int datalen)
1120{
1121    u_int32_t prun_src;
1122    u_int32_t prun_grp;
1123    u_int32_t prun_tmr;
1124    vifi_t vifi;
1125    int i;
1126    int stop_sending;
1127    struct rtentry *r;
1128    struct gtable *g;
1129    struct ptable *pt;
1130    struct listaddr *vr;
1131
1132    /* Don't process any prunes if router is not pruning */
1133    if (pruning == 0)
1134	return;
1135
1136    if ((vifi = find_vif(src, dst)) == NO_VIF) {
1137	logit(LOG_INFO, 0,
1138    	    "ignoring prune report from non-neighbor %s",
1139	    inet_fmt(src));
1140	return;
1141    }
1142
1143    /* Check if enough data is present */
1144    if (datalen < 12)
1145	{
1146	    logit(LOG_WARNING, 0,
1147		"non-decipherable prune from %s",
1148		inet_fmt(src));
1149	    return;
1150	}
1151
1152    for (i = 0; i< 4; i++)
1153	((char *)&prun_src)[i] = *p++;
1154    for (i = 0; i< 4; i++)
1155	((char *)&prun_grp)[i] = *p++;
1156    for (i = 0; i< 4; i++)
1157	((char *)&prun_tmr)[i] = *p++;
1158    prun_tmr = ntohl(prun_tmr);
1159
1160    logit(LOG_DEBUG, 0, "%s on vif %d prunes (%s %s)/%d",
1161	inet_fmt(src), vifi,
1162	inet_fmt(prun_src), inet_fmt(prun_grp), prun_tmr);
1163
1164    /*
1165     * Find the subnet for the prune
1166     */
1167    if (find_src_grp(prun_src, 0, prun_grp)) {
1168	g = gtp ? gtp->gt_gnext : kernel_table;
1169    	r = g->gt_route;
1170
1171	if (!VIFM_ISSET(vifi, r->rt_children)) {
1172	    logit(LOG_WARNING, 0, "prune received from non-child %s for (%s %s)",
1173		inet_fmt(src), inet_fmt(prun_src),
1174		inet_fmt(prun_grp));
1175	    return;
1176	}
1177	if (VIFM_ISSET(vifi, g->gt_scope)) {
1178	    logit(LOG_WARNING, 0, "prune received from %s on scoped grp (%s %s)",
1179		inet_fmt(src), inet_fmt(prun_src),
1180		inet_fmt(prun_grp));
1181	    return;
1182	}
1183	if ((pt = find_prune_entry(src, g->gt_pruntbl)) != NULL) {
1184	    /*
1185	     * If it's about to expire, then it's only still around because
1186	     * of timer granularity, so don't warn about it.
1187	     */
1188	    if (pt->pt_timer > 10) {
1189		logit(LOG_WARNING, 0, "%s %d from %s for (%s %s)/%d %s %d %s %x",
1190		    "duplicate prune received on vif",
1191		    vifi, inet_fmt(src), inet_fmt(prun_src),
1192		    inet_fmt(prun_grp), prun_tmr,
1193		    "old timer:", pt->pt_timer, "cur gm:", g->gt_grpmems);
1194	    }
1195	    pt->pt_timer = prun_tmr;
1196	} else {
1197	    /* allocate space for the prune structure */
1198	    pt = (struct ptable *)(malloc(sizeof(struct ptable)));
1199	    if (pt == NULL) {
1200	      logit(LOG_ERR, 0, "pt: ran out of memory");
1201	      return;
1202	    }
1203
1204	    pt->pt_vifi = vifi;
1205	    pt->pt_router = src;
1206	    pt->pt_timer = prun_tmr;
1207
1208	    pt->pt_next = g->gt_pruntbl;
1209	    g->gt_pruntbl = pt;
1210	}
1211
1212	/* Refresh the group's lifetime */
1213	g->gt_timer = CACHE_LIFETIME(cache_lifetime);
1214	if ((u_int32_t)g->gt_timer < prun_tmr)
1215	    g->gt_timer = prun_tmr;
1216
1217	/*
1218	 * check if any more packets need to be sent on the
1219	 * vif which sent this message
1220	 */
1221	stop_sending = 1;
1222	for (vr = uvifs[vifi].uv_neighbors; vr; vr = vr->al_next)
1223	  if (find_prune_entry(vr->al_addr, g->gt_pruntbl) == NULL)  {
1224	      stop_sending = 0;
1225	      break;
1226	  }
1227
1228	if (stop_sending && !grplst_mem(vifi, prun_grp)) {
1229	    VIFM_CLR(vifi, g->gt_grpmems);
1230	    logit(LOG_DEBUG, 0, "prune (%s %s), stop sending on vif %d, gm:%x",
1231		inet_fmts(r->rt_origin, r->rt_originmask),
1232		inet_fmt(g->gt_mcastgrp), vifi, g->gt_grpmems);
1233
1234	    prun_add_ttls(g);
1235	    update_kernel(g);
1236#ifdef RSRR
1237	    /* Send route change notification to reservation protocol. */
1238	    rsrr_cache_send(g,1);
1239#endif /* RSRR */
1240	}
1241
1242	/*
1243	 * check if all the child routers have expressed no interest
1244	 * in this group and if this group does not exist in the
1245	 * interface
1246	 * Send a prune message then upstream
1247	 */
1248	if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway) {
1249	    send_prune(g);
1250	}
1251    } else {
1252	/*
1253	 * There is no kernel entry for this group.  Therefore, we can
1254	 * simply ignore the prune, as we are not forwarding this traffic
1255	 * downstream.
1256	 */
1257	logit(LOG_DEBUG, 0, "%s (%s %s)/%d from %s",
1258	    "prune message received with no kernel entry for",
1259	    inet_fmt(prun_src), inet_fmt(prun_grp),
1260	    prun_tmr, inet_fmt(src));
1261	return;
1262    }
1263}
1264
1265/*
1266 * Checks if this mcastgrp is present in the kernel table
1267 * If so and if a prune was sent, it sends a graft upwards
1268 */
1269void
1270chkgrp_graft(vifi_t vifi, u_int32_t mcastgrp)
1271{
1272    struct rtentry *r;
1273    struct gtable *g;
1274
1275    for (g = kernel_table; g; g = g->gt_gnext) {
1276	if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1277	    break;
1278
1279	r = g->gt_route;
1280	if (g->gt_mcastgrp == mcastgrp && VIFM_ISSET(vifi, r->rt_children))
1281	    if (g->gt_prsent_timer) {
1282		VIFM_SET(vifi, g->gt_grpmems);
1283
1284		/*
1285		 * If the vif that was joined was a scoped vif,
1286		 * ignore it ; don't graft back
1287		 */
1288		g->gt_grpmems &= ~g->gt_scope;
1289		if (g->gt_grpmems == 0)
1290		    continue;
1291
1292		/* set the flag for graft retransmission */
1293		g->gt_grftsnt = 1;
1294
1295		/* send graft upwards */
1296		send_graft(g);
1297
1298		/* reset the prune timer and update cache timer*/
1299		g->gt_prsent_timer = 0;
1300		g->gt_timer = max_prune_lifetime;
1301
1302		logit(LOG_DEBUG, 0, "chkgrp graft (%s %s) gm:%x",
1303		    inet_fmts(r->rt_origin, r->rt_originmask),
1304		    inet_fmt(g->gt_mcastgrp), g->gt_grpmems);
1305
1306		prun_add_ttls(g);
1307		update_kernel(g);
1308#ifdef RSRR
1309		/* Send route change notification to reservation protocol. */
1310		rsrr_cache_send(g,1);
1311#endif /* RSRR */
1312	    }
1313    }
1314}
1315
1316/* determine the multicast group and src
1317 *
1318 * if it does, then determine if a prune was sent
1319 * upstream.
1320 * if prune sent upstream, send graft upstream and send
1321 * ack downstream.
1322 *
1323 * if no prune sent upstream, change the forwarding bit
1324 * for this interface and send ack downstream.
1325 *
1326 * if no entry exists for this group send ack downstream.
1327 */
1328void
1329accept_graft(u_int32_t src, u_int32_t dst, char *p, int datalen)
1330{
1331    vifi_t 	vifi;
1332    u_int32_t 	graft_src;
1333    u_int32_t	graft_grp;
1334    int 	i;
1335    struct rtentry *r;
1336    struct gtable *g;
1337    struct ptable *pt, **ptnp;
1338
1339    if ((vifi = find_vif(src, dst)) == NO_VIF) {
1340	logit(LOG_INFO, 0,
1341    	    "ignoring graft from non-neighbor %s",
1342	    inet_fmt(src));
1343	return;
1344    }
1345
1346    if (datalen < 8) {
1347	logit(LOG_WARNING, 0,
1348	    "received non-decipherable graft from %s",
1349	    inet_fmt(src));
1350	return;
1351    }
1352
1353    for (i = 0; i< 4; i++)
1354	((char *)&graft_src)[i] = *p++;
1355    for (i = 0; i< 4; i++)
1356	((char *)&graft_grp)[i] = *p++;
1357
1358    logit(LOG_DEBUG, 0, "%s on vif %d grafts (%s %s)",
1359	inet_fmt(src), vifi,
1360	inet_fmt(graft_src), inet_fmt(graft_grp));
1361
1362    /*
1363     * Find the subnet for the graft
1364     */
1365    if (find_src_grp(graft_src, 0, graft_grp)) {
1366	g = gtp ? gtp->gt_gnext : kernel_table;
1367	r = g->gt_route;
1368
1369	if (VIFM_ISSET(vifi, g->gt_scope)) {
1370	    logit(LOG_WARNING, 0, "graft received from %s on scoped grp (%s %s)",
1371		inet_fmt(src), inet_fmt(graft_src),
1372		inet_fmt(graft_grp));
1373	    return;
1374	}
1375
1376	ptnp = &g->gt_pruntbl;
1377	while ((pt = *ptnp) != NULL) {
1378	    if ((pt->pt_vifi == vifi) && (pt->pt_router == src)) {
1379		*ptnp = pt->pt_next;
1380		free(pt);
1381
1382		VIFM_SET(vifi, g->gt_grpmems);
1383		logit(LOG_DEBUG, 0, "accept graft (%s %s) gm:%x",
1384		    inet_fmts(r->rt_origin, r->rt_originmask),
1385		    inet_fmt(g->gt_mcastgrp), g->gt_grpmems);
1386
1387		prun_add_ttls(g);
1388		update_kernel(g);
1389#ifdef RSRR
1390		/* Send route change notification to reservation protocol. */
1391		rsrr_cache_send(g,1);
1392#endif /* RSRR */
1393		break;
1394	    } else {
1395		ptnp = &pt->pt_next;
1396	    }
1397	}
1398
1399	/* send ack downstream */
1400	send_graft_ack(dst, src, graft_src, graft_grp);
1401	g->gt_timer = max_prune_lifetime;
1402
1403	if (g->gt_prsent_timer) {
1404	    /* set the flag for graft retransmission */
1405	    g->gt_grftsnt = 1;
1406
1407	    /* send graft upwards */
1408	    send_graft(g);
1409
1410	    /* reset the prune sent timer */
1411	    g->gt_prsent_timer = 0;
1412	}
1413    } else {
1414	/*
1415	 * We have no state for the source and group in question.
1416	 * We can simply acknowledge the graft, since we know
1417	 * that we have no prune state, and grafts are requests
1418	 * to remove prune state.
1419	 */
1420	send_graft_ack(dst, src, graft_src, graft_grp);
1421	logit(LOG_DEBUG, 0, "%s (%s %s) from %s",
1422	    "graft received with no kernel entry for",
1423	    inet_fmt(graft_src), inet_fmt(graft_grp),
1424	    inet_fmt(src));
1425	return;
1426    }
1427}
1428
1429/*
1430 * find out which group is involved first of all
1431 * then determine if a graft was sent.
1432 * if no graft sent, ignore the message
1433 * if graft was sent and the ack is from the right
1434 * source, remove the graft timer so that we don't
1435 * have send a graft again
1436 */
1437void
1438accept_g_ack(u_int32_t src, u_int32_t dst, char *p, int datalen)
1439{
1440    struct gtable *g;
1441    vifi_t 	vifi;
1442    u_int32_t 	grft_src;
1443    u_int32_t	grft_grp;
1444    int 	i;
1445
1446    if ((vifi = find_vif(src, dst)) == NO_VIF) {
1447	logit(LOG_INFO, 0,
1448    	    "ignoring graft ack from non-neighbor %s",
1449	    inet_fmt(src));
1450	return;
1451    }
1452
1453    if (datalen < 0  || datalen > 8) {
1454	logit(LOG_WARNING, 0,
1455	    "received non-decipherable graft ack from %s",
1456	    inet_fmt(src));
1457	return;
1458    }
1459
1460    for (i = 0; i< 4; i++)
1461	((char *)&grft_src)[i] = *p++;
1462    for (i = 0; i< 4; i++)
1463	((char *)&grft_grp)[i] = *p++;
1464
1465    logit(LOG_DEBUG, 0, "%s on vif %d acks graft (%s, %s)",
1466	inet_fmt(src), vifi,
1467	inet_fmt(grft_src), inet_fmt(grft_grp));
1468
1469    /*
1470     * Find the subnet for the graft ack
1471     */
1472    if (find_src_grp(grft_src, 0, grft_grp)) {
1473	g = gtp ? gtp->gt_gnext : kernel_table;
1474	g->gt_grftsnt = 0;
1475    } else {
1476	logit(LOG_WARNING, 0, "%s (%s, %s) from %s",
1477	    "rcvd graft ack with no kernel entry for",
1478	    inet_fmt(grft_src), inet_fmt(grft_grp),
1479	    inet_fmt(src));
1480	return;
1481    }
1482}
1483
1484
1485/*
1486 * free all prune entries and kernel routes
1487 * normally, this should inform the kernel that all of its routes
1488 * are going away, but this is only called by restart(), which is
1489 * about to call MRT_DONE which does that anyway.
1490 */
1491void
1492free_all_prunes(void)
1493{
1494    struct rtentry *r;
1495    struct gtable *g, *prev_g;
1496    struct stable *s, *prev_s;
1497    struct ptable *p, *prev_p;
1498
1499    for (r = routing_table; r; r = r->rt_next) {
1500	g = r->rt_groups;
1501	while (g) {
1502	    s = g->gt_srctbl;
1503	    while (s) {
1504		prev_s = s;
1505		s = s->st_next;
1506		free(prev_s);
1507	    }
1508
1509	    p = g->gt_pruntbl;
1510	    while (p) {
1511		prev_p = p;
1512		p = p->pt_next;
1513		free(prev_p);
1514	    }
1515
1516	    prev_g = g;
1517	    g = g->gt_next;
1518	    free(prev_g);
1519	}
1520	r->rt_groups = NULL;
1521    }
1522    kernel_table = NULL;
1523
1524    g = kernel_no_route;
1525    while (g) {
1526	if (g->gt_srctbl)
1527	    free(g->gt_srctbl);
1528
1529	prev_g = g;
1530	g = g->gt_next;
1531	free(prev_g);
1532    }
1533    kernel_no_route = NULL;
1534}
1535
1536/*
1537 * When a new route is created, search
1538 * a) The less-specific part of the routing table
1539 * b) The route-less kernel table
1540 * for sources that the new route might want to handle.
1541 *
1542 * "Inheriting" these sources might be cleanest, but simply deleting
1543 * them is easier, and letting the kernel re-request them.
1544 */
1545void
1546steal_sources(struct rtentry *rt)
1547{
1548    struct rtentry *rp;
1549    struct gtable *gt, **gtnp;
1550    struct stable *st, **stnp;
1551
1552    for (rp = rt->rt_next; rp; rp = rp->rt_next) {
1553	if ((rt->rt_origin & rp->rt_originmask) == rp->rt_origin) {
1554	    logit(LOG_DEBUG, 0, "Route for %s stealing sources from %s",
1555		inet_fmts(rt->rt_origin, rt->rt_originmask),
1556		inet_fmts(rp->rt_origin, rp->rt_originmask));
1557	    for (gt = rp->rt_groups; gt; gt = gt->gt_next) {
1558		stnp = &gt->gt_srctbl;
1559		while ((st = *stnp) != NULL) {
1560		    if ((st->st_origin & rt->rt_originmask) == rt->rt_origin) {
1561			logit(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1562			    inet_fmts(rt->rt_origin, rt->rt_originmask),
1563			    inet_fmt(st->st_origin),
1564			    inet_fmt(gt->gt_mcastgrp),
1565			    inet_fmts(rp->rt_origin, rp->rt_originmask));
1566			if (k_del_rg(st->st_origin, gt) < 0) {
1567			    logit(LOG_WARNING, errno, "%s (%s, %s)",
1568				"steal_sources trying to delete",
1569				inet_fmt(st->st_origin),
1570				inet_fmt(gt->gt_mcastgrp));
1571			}
1572			*stnp = st->st_next;
1573			kroutes--;
1574			free(st);
1575		    } else {
1576			stnp = &st->st_next;
1577		    }
1578		}
1579	    }
1580	}
1581    }
1582
1583    gtnp = &kernel_no_route;
1584    while ((gt = *gtnp) != NULL) {
1585	if (gt->gt_srctbl && ((gt->gt_srctbl->st_origin & rt->rt_originmask)
1586				    == rt->rt_origin)) {
1587	    logit(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1588		inet_fmts(rt->rt_origin, rt->rt_originmask),
1589		inet_fmt(gt->gt_srctbl->st_origin),
1590		inet_fmt(gt->gt_mcastgrp),
1591		"no_route table");
1592	    if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
1593		logit(LOG_WARNING, errno, "%s (%s %s)",
1594		    "steal_sources trying to delete",
1595		    inet_fmt(gt->gt_srctbl->st_origin),
1596		    inet_fmt(gt->gt_mcastgrp));
1597	    }
1598	    kroutes--;
1599	    free(gt->gt_srctbl);
1600	    *gtnp = gt->gt_next;
1601	    if (gt->gt_next)
1602		gt->gt_next->gt_prev = gt->gt_prev;
1603	    free(gt);
1604	} else {
1605	    gtnp = &gt->gt_next;
1606	}
1607    }
1608}
1609
1610/*
1611 * Advance the timers on all the cache entries.
1612 * If there are any entries whose timers have expired,
1613 * remove these entries from the kernel cache.
1614 */
1615void
1616age_table_entry(void)
1617{
1618    struct rtentry *r;
1619    struct gtable *gt, **gtnptr;
1620    struct stable *st, **stnp;
1621    struct ptable *pt, **ptnp;
1622    struct sioc_sg_req sg_req;
1623
1624    logit(LOG_DEBUG, 0, "ageing entries");
1625
1626    gtnptr = &kernel_table;
1627    while ((gt = *gtnptr) != NULL) {
1628	r = gt->gt_route;
1629
1630	/* advance the timer for the kernel entry */
1631	gt->gt_timer -= ROUTE_MAX_REPORT_DELAY;
1632
1633	/* decrement prune timer if need be */
1634	if (gt->gt_prsent_timer > 0) {
1635	    gt->gt_prsent_timer -= ROUTE_MAX_REPORT_DELAY;
1636	    if (gt->gt_prsent_timer <= 0) {
1637		logit(LOG_DEBUG, 0, "upstream prune tmo (%s %s)",
1638		    inet_fmts(r->rt_origin, r->rt_originmask),
1639		    inet_fmt(gt->gt_mcastgrp));
1640		gt->gt_prsent_timer = -1;
1641	    }
1642	}
1643
1644	/* retransmit graft if graft sent flag is still set */
1645	if (gt->gt_grftsnt) {
1646	    int y;
1647	    CHK_GS(gt->gt_grftsnt++, y);
1648	    if (y)
1649		send_graft(gt);
1650	}
1651
1652	/*
1653	 * Age prunes
1654	 *
1655	 * If a prune expires, forward again on that vif.
1656	 */
1657	ptnp = &gt->gt_pruntbl;
1658	while ((pt = *ptnp) != NULL) {
1659	    if ((pt->pt_timer -= ROUTE_MAX_REPORT_DELAY) <= 0) {
1660		logit(LOG_DEBUG, 0, "expire prune (%s %s) from %s on vif %d",
1661		    inet_fmts(r->rt_origin, r->rt_originmask),
1662		    inet_fmt(gt->gt_mcastgrp),
1663		    inet_fmt(pt->pt_router),
1664		    pt->pt_vifi);
1665
1666		expire_prune(pt->pt_vifi, gt);
1667
1668		/* remove the router's prune entry and await new one */
1669		*ptnp = pt->pt_next;
1670		free(pt);
1671	    } else {
1672		ptnp = &pt->pt_next;
1673	    }
1674	}
1675
1676	/*
1677	 * If the cache entry has expired, delete source table entries for
1678	 * silent sources.  If there are no source entries left, and there
1679	 * are no downstream prunes, then the entry is deleted.
1680	 * Otherwise, the cache entry's timer is refreshed.
1681	 */
1682	if (gt->gt_timer <= 0) {
1683	    /* Check for traffic before deleting source entries */
1684	    sg_req.grp.s_addr = gt->gt_mcastgrp;
1685	    stnp = &gt->gt_srctbl;
1686	    while ((st = *stnp) != NULL) {
1687		sg_req.src.s_addr = st->st_origin;
1688		if (ioctl(igmp_socket, SIOCGETSGCNT, (char *)&sg_req) < 0) {
1689		    logit(LOG_WARNING, errno, "%s (%s %s)",
1690			"age_table_entry: SIOCGETSGCNT failing for",
1691			inet_fmt(st->st_origin),
1692			inet_fmt(gt->gt_mcastgrp));
1693		    /* Make sure it gets deleted below */
1694		    sg_req.pktcnt = st->st_pktcnt;
1695		}
1696		if (sg_req.pktcnt == st->st_pktcnt) {
1697		    *stnp = st->st_next;
1698		    logit(LOG_DEBUG, 0, "age_table_entry deleting (%s %s)",
1699			inet_fmt(st->st_origin),
1700			inet_fmt(gt->gt_mcastgrp));
1701		    if (k_del_rg(st->st_origin, gt) < 0) {
1702			logit(LOG_WARNING, errno,
1703			    "age_table_entry trying to delete (%s %s)",
1704			    inet_fmt(st->st_origin),
1705			    inet_fmt(gt->gt_mcastgrp));
1706		    }
1707		    kroutes--;
1708		    free(st);
1709		} else {
1710		    st->st_pktcnt = sg_req.pktcnt;
1711		    stnp = &st->st_next;
1712		}
1713	    }
1714
1715	    /*
1716	     * Retain the group entry if we have downstream prunes or if
1717	     * there is at least one source in the list that still has
1718	     * traffic, or if our upstream prune timer is running.
1719	     */
1720	    if (gt->gt_pruntbl != NULL || gt->gt_srctbl != NULL ||
1721		gt->gt_prsent_timer > 0) {
1722		gt->gt_timer = CACHE_LIFETIME(cache_lifetime);
1723		if (gt->gt_prsent_timer == -1) {
1724		    if (gt->gt_grpmems == 0)
1725			send_prune(gt);
1726		    else
1727			gt->gt_prsent_timer = 0;
1728		}
1729		gtnptr = &gt->gt_gnext;
1730		continue;
1731	    }
1732
1733	    logit(LOG_DEBUG, 0, "timeout cache entry (%s, %s)",
1734		inet_fmts(r->rt_origin, r->rt_originmask),
1735		inet_fmt(gt->gt_mcastgrp));
1736
1737	    if (gt->gt_prev)
1738		gt->gt_prev->gt_next = gt->gt_next;
1739	    else
1740		gt->gt_route->rt_groups = gt->gt_next;
1741	    if (gt->gt_next)
1742		gt->gt_next->gt_prev = gt->gt_prev;
1743
1744	    if (gt->gt_gprev) {
1745		gt->gt_gprev->gt_gnext = gt->gt_gnext;
1746		gtnptr = &gt->gt_gprev->gt_gnext;
1747	    } else {
1748		kernel_table = gt->gt_gnext;
1749		gtnptr = &kernel_table;
1750	    }
1751	    if (gt->gt_gnext)
1752		gt->gt_gnext->gt_gprev = gt->gt_gprev;
1753
1754#ifdef RSRR
1755	    /* Send route change notification to reservation protocol. */
1756	    rsrr_cache_send(gt,0);
1757	    rsrr_cache_clean(gt);
1758#endif /* RSRR */
1759	    free((char *)gt);
1760	} else {
1761	    if (gt->gt_prsent_timer == -1) {
1762		if (gt->gt_grpmems == 0)
1763		    send_prune(gt);
1764		else
1765		    gt->gt_prsent_timer = 0;
1766	    }
1767	    gtnptr = &gt->gt_gnext;
1768	}
1769    }
1770
1771    /*
1772     * When traversing the no_route table, the decision is much easier.
1773     * Just delete it if it has timed out.
1774     */
1775    gtnptr = &kernel_no_route;
1776    while ((gt = *gtnptr) != NULL) {
1777	/* advance the timer for the kernel entry */
1778	gt->gt_timer -= ROUTE_MAX_REPORT_DELAY;
1779
1780	if (gt->gt_timer < 0) {
1781	    if (gt->gt_srctbl) {
1782		if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
1783		    logit(LOG_WARNING, errno, "%s (%s %s)",
1784			"age_table_entry trying to delete no-route",
1785			inet_fmt(gt->gt_srctbl->st_origin),
1786			inet_fmt(gt->gt_mcastgrp));
1787		}
1788		free(gt->gt_srctbl);
1789	    }
1790	    *gtnptr = gt->gt_next;
1791	    if (gt->gt_next)
1792		gt->gt_next->gt_prev = gt->gt_prev;
1793
1794	    free((char *)gt);
1795	} else {
1796	    gtnptr = &gt->gt_next;
1797	}
1798    }
1799}
1800
1801/*
1802 * Modify the kernel to forward packets when one or multiple prunes that
1803 * were received on the vif given by vifi, for the group given by gt,
1804 * have expired.
1805 */
1806static void
1807expire_prune(vifi_t vifi, struct gtable *gt)
1808{
1809    /*
1810     * No need to send a graft, any prunes that we sent
1811     * will expire before any prunes that we have received.
1812     */
1813    if (gt->gt_prsent_timer > 0) {
1814        logit(LOG_DEBUG, 0, "prune expired with %d left on %s",
1815		gt->gt_prsent_timer, "prsent_timer");
1816        gt->gt_prsent_timer = 0;
1817    }
1818
1819    /* modify the kernel entry to forward packets */
1820    if (!VIFM_ISSET(vifi, gt->gt_grpmems)) {
1821        struct rtentry *rt = gt->gt_route;
1822        VIFM_SET(vifi, gt->gt_grpmems);
1823        logit(LOG_DEBUG, 0, "forw again (%s %s) gm:%x vif:%d",
1824	inet_fmts(rt->rt_origin, rt->rt_originmask),
1825	inet_fmt(gt->gt_mcastgrp), gt->gt_grpmems, vifi);
1826
1827        prun_add_ttls(gt);
1828        update_kernel(gt);
1829#ifdef RSRR
1830        /* Send route change notification to reservation protocol. */
1831        rsrr_cache_send(gt,1);
1832#endif /* RSRR */
1833    }
1834}
1835
1836
1837static const char *
1838scaletime(u_long t)
1839{
1840    static char buf1[5];
1841    static char buf2[5];
1842    static char *buf=buf1;
1843    char s;
1844    char *p;
1845
1846    p = buf;
1847    if (buf == buf1)
1848	buf = buf2;
1849    else
1850	buf = buf1;
1851
1852    if (t < 120) {
1853	s = 's';
1854    } else if (t < 3600) {
1855	t /= 60;
1856	s = 'm';
1857    } else if (t < 86400) {
1858	t /= 3600;
1859	s = 'h';
1860    } else if (t < 864000) {
1861	t /= 86400;
1862	s = 'd';
1863    } else {
1864	t /= 604800;
1865	s = 'w';
1866    }
1867    if (t > 999)
1868	return "*** ";
1869
1870    snprintf(p, 5, "%3d%c", (int)t, s);
1871
1872    return p;
1873}
1874
1875/*
1876 * Print the contents of the cache table on file 'fp2'.
1877 */
1878void
1879dump_cache(FILE *fp2)
1880{
1881    struct rtentry *r;
1882    struct gtable *gt;
1883    struct stable *st;
1884    vifi_t i;
1885    time_t thyme = time(0);
1886
1887    fprintf(fp2,
1888	    "Multicast Routing Cache Table (%d entries)\n%s", kroutes,
1889    " Origin             Mcast-group     CTmr  Age Ptmr IVif Forwvifs\n");
1890
1891    for (gt = kernel_no_route; gt; gt = gt->gt_next) {
1892	if (gt->gt_srctbl) {
1893	    fprintf(fp2, " %-18s %-15s %-4s %-4s    - -1\n",
1894		inet_fmts(gt->gt_srctbl->st_origin, 0xffffffff),
1895		inet_fmt(gt->gt_mcastgrp), scaletime(gt->gt_timer),
1896		scaletime(thyme - gt->gt_ctime));
1897	    fprintf(fp2, ">%s\n", inet_fmt(gt->gt_srctbl->st_origin));
1898	}
1899    }
1900
1901    for (gt = kernel_table; gt; gt = gt->gt_gnext) {
1902	r = gt->gt_route;
1903	fprintf(fp2, " %-18s %-15s",
1904	    inet_fmts(r->rt_origin, r->rt_originmask),
1905	    inet_fmt(gt->gt_mcastgrp));
1906
1907	fprintf(fp2, " %-4s", scaletime(gt->gt_timer));
1908
1909	fprintf(fp2, " %-4s %-4s ", scaletime(thyme - gt->gt_ctime),
1910			gt->gt_prsent_timer ? scaletime(gt->gt_prsent_timer) :
1911					      "   -");
1912
1913	fprintf(fp2, "%2u%c%c ", r->rt_parent,
1914	    gt->gt_prsent_timer ? 'P' : ' ',
1915	    VIFM_ISSET(r->rt_parent, gt->gt_scope) ? 'B' : ' ');
1916
1917	for (i = 0; i < numvifs; ++i) {
1918	    if (VIFM_ISSET(i, gt->gt_grpmems))
1919		fprintf(fp2, " %u ", i);
1920	    else if (VIFM_ISSET(i, r->rt_children) &&
1921		     !VIFM_ISSET(i, r->rt_leaves))
1922		fprintf(fp2, " %u%c", i,
1923			VIFM_ISSET(i, gt->gt_scope) ? 'b' : 'p');
1924	}
1925	fprintf(fp2, "\n");
1926	for (st = gt->gt_srctbl; st; st = st->st_next) {
1927	    fprintf(fp2, ">%s\n", inet_fmt(st->st_origin));
1928	}
1929#ifdef DEBUG_PRUNES
1930	for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next) {
1931	    fprintf(fp2, "<r:%s v:%d t:%d\n", inet_fmt(pt->pt_router),
1932		pt->pt_vifi, pt->pt_timer);
1933	}
1934#endif
1935    }
1936}
1937
1938/*
1939 * Traceroute function which returns traceroute replies to the requesting
1940 * router. Also forwards the request to downstream routers.
1941 *
1942 * no: promoted u_char
1943 */
1944void
1945accept_mtrace(u_int32_t src, u_int32_t dst, u_int32_t group, char *data,
1946	      u_int no, int datalen)
1947{
1948    u_char type;
1949    struct rtentry *rt;
1950    struct gtable *gt;
1951    struct tr_query *qry;
1952    struct tr_resp  *resp;
1953    int vifi;
1954    char *p;
1955    int rcount;
1956    int errcode = TR_NO_ERR;
1957    int resptype;
1958    struct timeval tp;
1959    struct sioc_vif_req v_req;
1960    struct sioc_sg_req sg_req;
1961
1962    /* Remember qid across invocations */
1963    static u_int32_t oqid = 0;
1964
1965    /* timestamp the request/response */
1966    gettimeofday(&tp, 0);
1967
1968    /*
1969     * Check if it is a query or a response
1970     */
1971    if (datalen == QLEN) {
1972	type = QUERY;
1973	logit(LOG_DEBUG, 0, "Initial traceroute query rcvd from %s to %s",
1974	    inet_fmt(src), inet_fmt(dst));
1975    }
1976    else if ((datalen - QLEN) % RLEN == 0) {
1977	type = RESP;
1978	logit(LOG_DEBUG, 0, "In-transit traceroute query rcvd from %s to %s",
1979	    inet_fmt(src), inet_fmt(dst));
1980	if (IN_MULTICAST(ntohl(dst))) {
1981	    logit(LOG_DEBUG, 0, "Dropping multicast response");
1982	    return;
1983	}
1984    }
1985    else {
1986	logit(LOG_WARNING, 0, "%s from %s to %s",
1987	    "Non decipherable traceroute request received",
1988	    inet_fmt(src), inet_fmt(dst));
1989	return;
1990    }
1991
1992    qry = (struct tr_query *)data;
1993
1994    /*
1995     * if it is a packet with all reports filled, drop it
1996     */
1997    if ((u_int)(rcount = (datalen - QLEN)/RLEN) == no) {
1998	logit(LOG_DEBUG, 0, "packet with all reports filled in");
1999	return;
2000    }
2001
2002    logit(LOG_DEBUG, 0, "s: %s g: %s d: %s ",
2003	    inet_fmt(qry->tr_src),
2004	    inet_fmt(group),
2005	    inet_fmt(qry->tr_dst));
2006    logit(LOG_DEBUG, 0, "rttl: %d rd: %s", qry->tr_rttl,
2007	    inet_fmt(qry->tr_raddr));
2008    logit(LOG_DEBUG, 0, "rcount:%d, qid:%06x", rcount, qry->tr_qid);
2009
2010    /* determine the routing table entry for this traceroute */
2011    rt = determine_route(qry->tr_src);
2012    if (rt) {
2013	logit(LOG_DEBUG, 0, "rt parent vif: %d rtr: %s metric: %d",
2014		rt->rt_parent, inet_fmt(rt->rt_gateway),
2015		rt->rt_metric);
2016	logit(LOG_DEBUG, 0, "rt origin %s",
2017		inet_fmts(rt->rt_origin, rt->rt_originmask));
2018    } else
2019	logit(LOG_DEBUG, 0, "...no route");
2020
2021    /*
2022     * Query type packet - check if rte exists
2023     * Check if the query destination is a vif connected to me.
2024     * and if so, whether I should start response back
2025     */
2026    if (type == QUERY) {
2027	if (oqid == qry->tr_qid) {
2028	    /*
2029	     * If the multicast router is a member of the group being
2030	     * queried, and the query is multicasted, then the router can
2031	     * receive multiple copies of the same query.  If we have already
2032	     * replied to this traceroute, just ignore it this time.
2033	     *
2034	     * This is not a total solution, but since if this fails you
2035	     * only get N copies, N <= the number of interfaces on the router,
2036	     * it is not fatal.
2037	     */
2038	    logit(LOG_DEBUG, 0, "ignoring duplicate traceroute packet");
2039	    return;
2040	}
2041
2042	if (rt == NULL) {
2043	    logit(LOG_DEBUG, 0, "Mcast traceroute: no route entry %s",
2044		   inet_fmt(qry->tr_src));
2045	    if (IN_MULTICAST(ntohl(dst)))
2046		return;
2047	}
2048	vifi = find_vif(qry->tr_dst, 0);
2049
2050	if (vifi == NO_VIF) {
2051	    /* The traceroute destination is not on one of my subnet vifs. */
2052	    logit(LOG_DEBUG, 0, "Destination %s not an interface",
2053		   inet_fmt(qry->tr_dst));
2054	    if (IN_MULTICAST(ntohl(dst)))
2055		return;
2056	    errcode = TR_WRONG_IF;
2057	} else if (rt != NULL && !VIFM_ISSET(vifi, rt->rt_children)) {
2058	    logit(LOG_DEBUG, 0,
2059		    "Destination %s not on forwarding tree for src %s",
2060		   inet_fmt(qry->tr_dst),
2061		   inet_fmt(qry->tr_src));
2062	    if (IN_MULTICAST(ntohl(dst)))
2063		return;
2064	    errcode = TR_WRONG_IF;
2065	}
2066    }
2067    else {
2068	/*
2069	 * determine which interface the packet came in on
2070	 * RESP packets travel hop-by-hop so this either traversed
2071	 * a tunnel or came from a directly attached mrouter.
2072	 */
2073	if ((vifi = find_vif(src, dst)) == NO_VIF) {
2074	    logit(LOG_DEBUG, 0, "Wrong interface for packet");
2075	    errcode = TR_WRONG_IF;
2076	}
2077    }
2078
2079    /* Now that we've decided to send a response, save the qid */
2080    oqid = qry->tr_qid;
2081
2082    logit(LOG_DEBUG, 0, "Sending traceroute response");
2083
2084    /* copy the packet to the sending buffer */
2085    p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
2086
2087    bcopy(data, p, datalen);
2088
2089    p += datalen;
2090
2091    /*
2092     * If there is no room to insert our reply, coopt the previous hop
2093     * error indication to relay this fact.
2094     */
2095    if (p + sizeof(struct tr_resp) > send_buf + RECV_BUF_SIZE) {
2096	resp = (struct tr_resp *)p - 1;
2097	resp->tr_rflags = TR_NO_SPACE;
2098	rt = NULL;
2099	goto sendit;
2100    }
2101
2102    /*
2103     * fill in initial response fields
2104     */
2105    resp = (struct tr_resp *)p;
2106    bzero(resp, sizeof(struct tr_resp));
2107    datalen += RLEN;
2108
2109    resp->tr_qarr    = htonl((tp.tv_sec + JAN_1970) << 16) +
2110				((tp.tv_usec >> 4) & 0xffff);
2111
2112    resp->tr_rproto  = PROTO_DVMRP;
2113    if (errcode != TR_NO_ERR) {
2114	resp->tr_rflags	 = errcode;
2115	rt = NULL;	/* hack to enforce send straight to requestor */
2116	goto sendit;
2117    }
2118    resp->tr_outaddr = uvifs[vifi].uv_lcl_addr;
2119    resp->tr_fttl    = uvifs[vifi].uv_threshold;
2120    resp->tr_rflags  = TR_NO_ERR;
2121
2122    /*
2123     * obtain # of packets out on interface
2124     */
2125    v_req.vifi = vifi;
2126    if (ioctl(igmp_socket, SIOCGETVIFCNT, (char *)&v_req) >= 0)
2127	resp->tr_vifout  =  htonl(v_req.ocount);
2128
2129    /*
2130     * fill in scoping & pruning information
2131     */
2132    if (rt)
2133	for (gt = rt->rt_groups; gt; gt = gt->gt_next) {
2134	    if (gt->gt_mcastgrp >= group)
2135		break;
2136	}
2137    else
2138	gt = NULL;
2139
2140    if (gt && gt->gt_mcastgrp == group) {
2141	sg_req.src.s_addr = qry->tr_src;
2142	sg_req.grp.s_addr = group;
2143	if (ioctl(igmp_socket, SIOCGETSGCNT, (char *)&sg_req) >= 0)
2144	    resp->tr_pktcnt = htonl(sg_req.pktcnt);
2145
2146	if (VIFM_ISSET(vifi, gt->gt_scope))
2147	    resp->tr_rflags = TR_SCOPED;
2148	else if (gt->gt_prsent_timer)
2149	    resp->tr_rflags = TR_PRUNED;
2150	else if (!VIFM_ISSET(vifi, gt->gt_grpmems)) {
2151	    if (VIFM_ISSET(vifi, rt->rt_children) &&
2152		!VIFM_ISSET(vifi, rt->rt_leaves))
2153		resp->tr_rflags = TR_OPRUNED;
2154	    else
2155		resp->tr_rflags = TR_NO_FWD;
2156	}
2157    } else {
2158	if (scoped_addr(vifi, group))
2159	    resp->tr_rflags = TR_SCOPED;
2160	else if (rt && !VIFM_ISSET(vifi, rt->rt_children))
2161	    resp->tr_rflags = TR_NO_FWD;
2162    }
2163
2164    /*
2165     *  if no rte exists, set NO_RTE error
2166     */
2167    if (rt == NULL) {
2168	src = dst;		/* the dst address of resp. pkt */
2169	resp->tr_inaddr   = 0;
2170	resp->tr_rflags   = TR_NO_RTE;
2171	resp->tr_rmtaddr  = 0;
2172    } else {
2173	/* get # of packets in on interface */
2174	v_req.vifi = rt->rt_parent;
2175	if (ioctl(igmp_socket, SIOCGETVIFCNT, (char *)&v_req) >= 0)
2176	    resp->tr_vifin = htonl(v_req.icount);
2177
2178	MASK_TO_VAL(rt->rt_originmask, resp->tr_smask);
2179	src = uvifs[rt->rt_parent].uv_lcl_addr;
2180	resp->tr_inaddr = src;
2181	resp->tr_rmtaddr = rt->rt_gateway;
2182	if (!VIFM_ISSET(vifi, rt->rt_children)) {
2183	    logit(LOG_DEBUG, 0,
2184		    "Destination %s not on forwarding tree for src %s",
2185		   inet_fmt(qry->tr_dst),
2186		   inet_fmt(qry->tr_src));
2187	    resp->tr_rflags = TR_WRONG_IF;
2188	}
2189	if (rt->rt_metric >= UNREACHABLE) {
2190	    resp->tr_rflags = TR_NO_RTE;
2191	    /* Hack to send reply directly */
2192	    rt = NULL;
2193	}
2194    }
2195
2196sendit:
2197    /*
2198     * if metric is 1 or no. of reports is 1, send response to requestor
2199     * else send to upstream router.  If the upstream router can't handle
2200     * mtrace, set an error code and send to requestor anyway.
2201     */
2202    logit(LOG_DEBUG, 0, "rcount:%d, no:%d", rcount, no);
2203
2204    if (((u_int)(rcount + 1) == no) || (rt == NULL) || (rt->rt_metric == 1)) {
2205	resptype = IGMP_MTRACE_REPLY;
2206	dst = qry->tr_raddr;
2207    } else
2208	if (!can_mtrace(rt->rt_parent, rt->rt_gateway)) {
2209	    dst = qry->tr_raddr;
2210	    resp->tr_rflags = TR_OLD_ROUTER;
2211	    resptype = IGMP_MTRACE_REPLY;
2212	} else {
2213	    dst = rt->rt_gateway;
2214	    resptype = IGMP_MTRACE_QUERY;
2215	}
2216
2217    if (IN_MULTICAST(ntohl(dst))) {
2218	/*
2219	 * Send the reply on a known multicast capable vif.
2220	 * If we don't have one, we can't source any multicasts anyway.
2221	 */
2222	if (phys_vif != -1) {
2223	    logit(LOG_DEBUG, 0, "Sending reply to %s from %s",
2224		inet_fmt(dst), inet_fmt(uvifs[phys_vif].uv_lcl_addr));
2225	    k_set_ttl(qry->tr_rttl);
2226	    send_igmp(uvifs[phys_vif].uv_lcl_addr, dst,
2227		      resptype, no, group,
2228		      datalen);
2229	    k_set_ttl(1);
2230	} else
2231	    logit(LOG_INFO, 0, "No enabled phyints -- %s",
2232			"dropping traceroute reply");
2233    } else {
2234	logit(LOG_DEBUG, 0, "Sending %s to %s from %s",
2235	    resptype == IGMP_MTRACE_REPLY ?  "reply" : "request on",
2236	    inet_fmt(dst), inet_fmt(src));
2237
2238	send_igmp(src, dst,
2239		  resptype, no, group,
2240		  datalen);
2241    }
2242    return;
2243}
2244