prune.c revision 1.11
1/*	$NetBSD: prune.c,v 1.3 1995/12/10 10:07:09 mycroft Exp $	*/
2
3/*
4 * The mrouted program is covered by the license in the accompanying file
5 * named "LICENSE".  Use of the mrouted program represents acceptance of
6 * the terms and conditions listed in that file.
7 *
8 * The mrouted program is COPYRIGHT 1989 by The Board of Trustees of
9 * Leland Stanford Junior University.
10 */
11
12
13#include "defs.h"
14
15extern int cache_lifetime;
16extern int max_prune_lifetime;
17extern struct rtentry *routing_table;
18
19extern int phys_vif;
20
21/*
22 * dither cache lifetime to obtain a value between x and 2*x
23 */
24#ifdef SYSV
25#define CACHE_LIFETIME(x) ((x) + (lrand48() % (x)))
26#else
27#define CACHE_LIFETIME(x) ((x) + (random() % (x)))
28#endif
29
30#define CHK_GS(x, y) {	\
31		switch(x) { \
32			case 2:	\
33			case 4:	\
34			case 8:	\
35			case 16: \
36			case 32: \
37			case 64: \
38			case 128: \
39			case 256: y = 1; \
40				  break; \
41			default:  y = 0; \
42		} \
43	}
44
45struct gtable *kernel_table;		/* ptr to list of kernel grp entries*/
46static struct gtable *kernel_no_route;	/* list of grp entries w/o routes   */
47struct gtable *gtp;			/* pointer for kernel rt entries    */
48unsigned int kroutes;			/* current number of cache entries  */
49
50/****************************************************************************
51                       Functions that are local to prune.c
52****************************************************************************/
53static void		prun_add_ttls(struct gtable *gt);
54static int		pruning_neighbor(vifi_t vifi, u_int32_t addr);
55static int		can_mtrace(vifi_t vifi, u_int32_t addr);
56static struct ptable *	find_prune_entry(u_int32_t vr, struct ptable *pt);
57static void		expire_prune(vifi_t vifi, struct gtable *gt);
58static void		send_prune(struct gtable *gt);
59static void		send_graft(struct gtable *gt);
60static void		send_graft_ack(u_int32_t src, u_int32_t dst,
61			    u_int32_t origin, u_int32_t grp);
62static void		update_kernel(struct gtable *g);
63static char *		scaletime(u_long t);
64
65/*
66 * Updates the ttl values for each vif.
67 */
68static void
69prun_add_ttls(struct gtable *gt)
70{
71    struct uvif *v;
72    vifi_t vifi;
73
74    for (vifi = 0, v = uvifs; vifi < numvifs; ++vifi, ++v) {
75	if (VIFM_ISSET(vifi, gt->gt_grpmems))
76	    gt->gt_ttls[vifi] = v->uv_threshold;
77	else
78	    gt->gt_ttls[vifi] = 0;
79    }
80}
81
82/*
83 * checks for scoped multicast addresses
84 */
85#define GET_SCOPE(gt) { \
86	register vifi_t _i; \
87	if ((ntohl((gt)->gt_mcastgrp) & 0xff000000) == 0xef000000) \
88	    for (_i = 0; _i < numvifs; _i++) \
89		if (scoped_addr(_i, (gt)->gt_mcastgrp)) \
90		    VIFM_SET(_i, (gt)->gt_scope); \
91	}
92
93int
94scoped_addr(vifi_t vifi, u_int32_t addr)
95{
96    struct vif_acl *acl;
97
98    for (acl = uvifs[vifi].uv_acl; acl; acl = acl->acl_next)
99	if ((addr & acl->acl_mask) == acl->acl_addr)
100	    return 1;
101
102    return 0;
103}
104
105/*
106 * Determine if mcastgrp has a listener on vifi
107 */
108int
109grplst_mem(vifi_t vifi, u_int32_t mcastgrp)
110{
111    register struct listaddr *g;
112    register struct uvif *v;
113
114    v = &uvifs[vifi];
115
116    for (g = v->uv_groups; g != NULL; g = g->al_next)
117	if (mcastgrp == g->al_addr)
118	    return 1;
119
120    return 0;
121}
122
123/*
124 * Finds the group entry with the specified source and netmask.
125 * If netmask is 0, it uses the route's netmask.
126 *
127 * Returns TRUE if found a match, and the global variable gtp is left
128 * pointing to entry before the found entry.
129 * Returns FALSE if no exact match found, gtp is left pointing to before
130 * the entry in question belongs, or is NULL if the it belongs at the
131 * head of the list.
132 */
133int
134find_src_grp(u_int32_t src, u_int32_t mask, u_int32_t grp)
135{
136    struct gtable *gt;
137
138    gtp = NULL;
139    gt = kernel_table;
140    while (gt != NULL) {
141	if (grp == gt->gt_mcastgrp &&
142	    (mask ? (gt->gt_route->rt_origin == src &&
143		     gt->gt_route->rt_originmask == mask) :
144		    ((src & gt->gt_route->rt_originmask) ==
145		     gt->gt_route->rt_origin)))
146	    return TRUE;
147	if (ntohl(grp) > ntohl(gt->gt_mcastgrp) ||
148	    (grp == gt->gt_mcastgrp &&
149	     (ntohl(mask) < ntohl(gt->gt_route->rt_originmask) ||
150	      (mask == gt->gt_route->rt_originmask &&
151	       (ntohl(src) > ntohl(gt->gt_route->rt_origin)))))) {
152	    gtp = gt;
153	    gt = gt->gt_gnext;
154	}
155	else break;
156    }
157    return FALSE;
158}
159
160/*
161 * Check if the neighbor supports pruning
162 */
163static int
164pruning_neighbor(vifi_t vifi, u_int32_t addr)
165{
166    struct listaddr *n = neighbor_info(vifi, addr);
167    int vers;
168
169    if (n == NULL)
170	return 0;
171
172    if (n->al_flags & NF_PRUNE)
173	return 1;
174
175    /*
176     * Versions from 3.0 to 3.4 relied on the version number to identify
177     * that they could handle pruning.
178     */
179    vers = NBR_VERS(n);
180    return (vers >= 0x0300 && vers <= 0x0304);
181}
182
183/*
184 * Can the neighbor in question handle multicast traceroute?
185 */
186static int
187can_mtrace(vifi_t vifi, u_int32_t addr)
188{
189    struct listaddr *n = neighbor_info(vifi, addr);
190    int vers;
191
192    if (n == NULL)
193	return 0;
194
195    if (n->al_flags & NF_MTRACE)
196	return 1;
197
198    /*
199     * Versions 3.3 and 3.4 relied on the version number to identify
200     * that they could handle traceroute.
201     */
202    vers = NBR_VERS(n);
203    return (vers >= 0x0303 && vers <= 0x0304);
204}
205
206/*
207 * Returns the prune entry of the router, or NULL if none exists
208 */
209static struct ptable *
210find_prune_entry(u_int32_t vr, struct ptable *pt)
211{
212    while (pt) {
213	if (pt->pt_router == vr)
214	    return pt;
215	pt = pt->pt_next;
216    }
217
218    return NULL;
219}
220
221/*
222 * Send a prune message to the dominant router for
223 * this source.
224 *
225 * Record an entry that a prune was sent for this group
226 */
227static void
228send_prune(struct gtable *gt)
229{
230    struct ptable *pt;
231    char *p;
232    int i;
233    int datalen;
234    u_int32_t src;
235    u_int32_t dst;
236    u_int32_t tmp;
237
238    /* Don't process any prunes if router is not pruning */
239    if (pruning == 0)
240	return;
241
242    /* Can't process a prune if we don't have an associated route */
243    if (gt->gt_route == NULL)
244	return;
245
246    /* Don't send a prune to a non-pruning router */
247    if (!pruning_neighbor(gt->gt_route->rt_parent, gt->gt_route->rt_gateway))
248	return;
249
250    /*
251     * sends a prune message to the router upstream.
252     */
253    src = uvifs[gt->gt_route->rt_parent].uv_lcl_addr;
254    dst = gt->gt_route->rt_gateway;
255
256    p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
257    datalen = 0;
258
259    /*
260     * determine prune lifetime
261     */
262    gt->gt_prsent_timer = gt->gt_timer;
263    for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next)
264	if (pt->pt_timer < gt->gt_prsent_timer)
265	    gt->gt_prsent_timer = pt->pt_timer;
266
267    /*
268     * If we have a graft pending, cancel graft retransmission
269     */
270    gt->gt_grftsnt = 0;
271
272    for (i = 0; i < 4; i++)
273	*p++ = ((char *)&(gt->gt_route->rt_origin))[i];
274    for (i = 0; i < 4; i++)
275	*p++ = ((char *)&(gt->gt_mcastgrp))[i];
276    tmp = htonl(gt->gt_prsent_timer);
277    for (i = 0; i < 4; i++)
278	*p++ = ((char *)&(tmp))[i];
279    datalen += 12;
280
281    send_igmp(src, dst, IGMP_DVMRP, DVMRP_PRUNE,
282	      htonl(MROUTED_LEVEL), datalen);
283
284    logit(LOG_DEBUG, 0, "sent prune for (%s %s)/%d on vif %d to %s",
285      inet_fmts(gt->gt_route->rt_origin, gt->gt_route->rt_originmask, s1),
286      inet_fmt(gt->gt_mcastgrp, s2),
287      gt->gt_prsent_timer, gt->gt_route->rt_parent,
288      inet_fmt(gt->gt_route->rt_gateway, s3));
289}
290
291/*
292 * a prune was sent upstream
293 * so, a graft has to be sent to annul the prune
294 * set up a graft timer so that if an ack is not
295 * heard within that time, another graft request
296 * is sent out.
297 */
298static void
299send_graft(struct gtable *gt)
300{
301    register char *p;
302    register int i;
303    int datalen;
304    u_int32_t src;
305    u_int32_t dst;
306
307    /* Can't send a graft without an associated route */
308    if (gt->gt_route == NULL)
309	return;
310
311    src = uvifs[gt->gt_route->rt_parent].uv_lcl_addr;
312    dst = gt->gt_route->rt_gateway;
313
314    p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
315    datalen = 0;
316
317    for (i = 0; i < 4; i++)
318	*p++ = ((char *)&(gt->gt_route->rt_origin))[i];
319    for (i = 0; i < 4; i++)
320	*p++ = ((char *)&(gt->gt_mcastgrp))[i];
321    datalen += 8;
322
323    if (datalen != 0) {
324	send_igmp(src, dst, IGMP_DVMRP, DVMRP_GRAFT,
325		  htonl(MROUTED_LEVEL), datalen);
326    }
327    logit(LOG_DEBUG, 0, "sent graft for (%s %s) to %s on vif %d",
328	inet_fmts(gt->gt_route->rt_origin, gt->gt_route->rt_originmask, s1),
329	inet_fmt(gt->gt_mcastgrp, s2),
330	inet_fmt(gt->gt_route->rt_gateway, s3), gt->gt_route->rt_parent);
331}
332
333/*
334 * Send an ack that a graft was received
335 */
336static void
337send_graft_ack(u_int32_t src, u_int32_t dst, u_int32_t origin, u_int32_t grp)
338{
339    register char *p;
340    register int i;
341    int datalen;
342
343    p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
344    datalen = 0;
345
346    for (i = 0; i < 4; i++)
347	*p++ = ((char *)&(origin))[i];
348    for (i = 0; i < 4; i++)
349	*p++ = ((char *)&(grp))[i];
350    datalen += 8;
351
352    send_igmp(src, dst, IGMP_DVMRP, DVMRP_GRAFT_ACK,
353	      htonl(MROUTED_LEVEL), datalen);
354
355    logit(LOG_DEBUG, 0, "sent graft ack for (%s, %s) to %s",
356	inet_fmt(origin, s1), inet_fmt(grp, s2), inet_fmt(dst, s3));
357}
358
359/*
360 * Update the kernel cache with all the routes hanging off the group entry
361 */
362static void
363update_kernel(struct gtable *g)
364{
365    struct stable *st;
366
367    for (st = g->gt_srctbl; st; st = st->st_next)
368	k_add_rg(st->st_origin, g);
369}
370
371/****************************************************************************
372                          Functions that are used externally
373****************************************************************************/
374
375/*
376 * Initialize the kernel table structure
377 */
378void
379init_ktable(void)
380{
381    kernel_table	= NULL;
382    kernel_no_route	= NULL;
383    kroutes		= 0;
384}
385
386/*
387 * Add a new table entry for (origin, mcastgrp)
388 */
389void
390add_table_entry(u_int32_t origin, u_int32_t mcastgrp)
391{
392    struct rtentry *r;
393    struct gtable *gt,**gtnp,*prev_gt;
394    struct stable *st,**stnp;
395    vifi_t i;
396
397#ifdef DEBUG_MFC
398    md_logit(MD_MISS, origin, mcastgrp);
399#endif
400
401    r = determine_route(origin);
402    prev_gt = NULL;
403    if (r == NULL) {
404	/*
405	 * Look for it on the no_route table; if it is found then
406	 * it will be detected as a duplicate below.
407	 */
408	for (gt = kernel_no_route; gt; gt = gt->gt_next)
409	    if (mcastgrp == gt->gt_mcastgrp &&
410		gt->gt_srctbl && gt->gt_srctbl->st_origin == origin)
411			break;
412	gtnp = &kernel_no_route;
413    } else {
414	gtnp = &r->rt_groups;
415	while ((gt = *gtnp) != NULL) {
416	    if (gt->gt_mcastgrp >= mcastgrp)
417		break;
418	    gtnp = &gt->gt_next;
419	    prev_gt = gt;
420	}
421    }
422
423    if (gt == NULL || gt->gt_mcastgrp != mcastgrp) {
424	gt = (struct gtable *)malloc(sizeof(struct gtable));
425	if (gt == NULL)
426	    logit(LOG_ERR, 0, "ran out of memory");
427
428	gt->gt_mcastgrp	    = mcastgrp;
429	gt->gt_timer	    = CACHE_LIFETIME(cache_lifetime);
430	time(&gt->gt_ctime);
431	gt->gt_grpmems	    = 0;
432	gt->gt_scope	    = 0;
433	gt->gt_prsent_timer = 0;
434	gt->gt_grftsnt	    = 0;
435	gt->gt_srctbl	    = NULL;
436	gt->gt_pruntbl	    = NULL;
437	gt->gt_route	    = r;
438#ifdef RSRR
439	gt->gt_rsrr_cache   = NULL;
440#endif
441
442	if (r != NULL) {
443	    /* obtain the multicast group membership list */
444	    for (i = 0; i < numvifs; i++) {
445		if (VIFM_ISSET(i, r->rt_children) &&
446		    !(VIFM_ISSET(i, r->rt_leaves)))
447		    VIFM_SET(i, gt->gt_grpmems);
448
449		if (VIFM_ISSET(i, r->rt_leaves) && grplst_mem(i, mcastgrp))
450		    VIFM_SET(i, gt->gt_grpmems);
451	    }
452	    GET_SCOPE(gt);
453	    if (VIFM_ISSET(r->rt_parent, gt->gt_scope))
454		gt->gt_scope = -1;
455	    gt->gt_grpmems &= ~gt->gt_scope;
456	} else {
457	    gt->gt_scope = -1;
458	    gt->gt_grpmems = 0;
459	}
460
461	/* update ttls */
462	prun_add_ttls(gt);
463
464	gt->gt_next = *gtnp;
465	*gtnp = gt;
466	if (gt->gt_next)
467	    gt->gt_next->gt_prev = gt;
468	gt->gt_prev = prev_gt;
469
470	if (r) {
471	    if (find_src_grp(r->rt_origin, r->rt_originmask, gt->gt_mcastgrp)) {
472		struct gtable *g;
473
474		g = gtp ? gtp->gt_gnext : kernel_table;
475		logit(LOG_WARNING, 0, "Entry for (%s %s) (rt:%x) exists (rt:%x)",
476		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
477		    inet_fmt(g->gt_mcastgrp, s2),
478		    r, g->gt_route);
479	    } else {
480		if (gtp) {
481		    gt->gt_gnext = gtp->gt_gnext;
482		    gt->gt_gprev = gtp;
483		    gtp->gt_gnext = gt;
484		} else {
485		    gt->gt_gnext = kernel_table;
486		    gt->gt_gprev = NULL;
487		    kernel_table = gt;
488		}
489		if (gt->gt_gnext)
490		    gt->gt_gnext->gt_gprev = gt;
491	    }
492	} else {
493	    gt->gt_gnext = gt->gt_gprev = NULL;
494	}
495    }
496
497    stnp = &gt->gt_srctbl;
498    while ((st = *stnp) != NULL) {
499	if (ntohl(st->st_origin) >= ntohl(origin))
500	    break;
501	stnp = &st->st_next;
502    }
503
504    if (st == NULL || st->st_origin != origin) {
505	st = (struct stable *)malloc(sizeof(struct stable));
506	if (st == NULL)
507	    logit(LOG_ERR, 0, "ran out of memory");
508
509	st->st_origin = origin;
510	st->st_pktcnt = 0;
511	st->st_next = *stnp;
512	*stnp = st;
513    } else {
514#ifdef DEBUG_MFC
515	md_logit(MD_DUPE, origin, mcastgrp);
516#endif
517	logit(LOG_WARNING, 0, "kernel entry already exists for (%s %s)",
518		inet_fmt(origin, s1), inet_fmt(mcastgrp, s2));
519	/* XXX Doing this should cause no harm, and may ensure
520	 * kernel<>mrouted synchronization */
521	k_add_rg(origin, gt);
522	return;
523    }
524
525    kroutes++;
526    k_add_rg(origin, gt);
527
528    logit(LOG_DEBUG, 0, "add cache entry (%s %s) gm:%x, parent-vif:%d",
529	inet_fmt(origin, s1),
530	inet_fmt(mcastgrp, s2),
531	gt->gt_grpmems, r ? r->rt_parent : -1);
532
533    /* If there are no leaf vifs
534     * which have this group, then
535     * mark this src-grp as a prune candidate.
536     */
537    if (!gt->gt_prsent_timer && !gt->gt_grpmems && r && r->rt_gateway)
538	send_prune(gt);
539}
540
541/*
542 * An mrouter has gone down and come up on an interface
543 * Forward on that interface immediately
544 */
545void
546reset_neighbor_state(vifi_t vifi, u_int32_t addr)
547{
548    struct rtentry *r;
549    struct gtable *g;
550    struct ptable *pt, **ptnp;
551    struct stable *st;
552
553    for (g = kernel_table; g; g = g->gt_gnext) {
554	r = g->gt_route;
555
556	/*
557	 * If neighbor was the parent, remove the prune sent state
558	 * and all of the source cache info so that prunes get
559	 * regenerated.
560	 */
561	if (vifi == r->rt_parent) {
562	    if (addr == r->rt_gateway) {
563		logit(LOG_DEBUG, 0, "reset_neighbor_state parent reset (%s %s)",
564		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
565		    inet_fmt(g->gt_mcastgrp, s2));
566
567		g->gt_prsent_timer = 0;
568		g->gt_grftsnt = 0;
569		while (st = g->gt_srctbl) {
570		    g->gt_srctbl = st->st_next;
571		    k_del_rg(st->st_origin, g);
572		    kroutes--;
573		    free(st);
574		}
575	    }
576	} else {
577	    /*
578	     * Neighbor was not the parent, send grafts to join the groups
579	     */
580	    if (g->gt_prsent_timer) {
581		g->gt_grftsnt = 1;
582		send_graft(g);
583		g->gt_prsent_timer = 0;
584	    }
585
586	    /*
587	     * Remove any prunes that this router has sent us.
588	     */
589	    ptnp = &g->gt_pruntbl;
590	    while ((pt = *ptnp) != NULL) {
591		if (pt->pt_vifi == vifi && pt->pt_router == addr) {
592		    *ptnp = pt->pt_next;
593		    free(pt);
594		} else
595		    ptnp = &pt->pt_next;
596	    }
597
598	    /*
599	     * And see if we want to forward again.
600	     */
601	    if (!VIFM_ISSET(vifi, g->gt_grpmems)) {
602		if (VIFM_ISSET(vifi, r->rt_children) &&
603		    !(VIFM_ISSET(vifi, r->rt_leaves)))
604		    VIFM_SET(vifi, g->gt_grpmems);
605
606		if (VIFM_ISSET(vifi, r->rt_leaves) &&
607		    grplst_mem(vifi, g->gt_mcastgrp))
608		    VIFM_SET(vifi, g->gt_grpmems);
609
610		g->gt_grpmems &= ~g->gt_scope;
611		prun_add_ttls(g);
612
613		/* Update kernel state */
614		update_kernel(g);
615#ifdef RSRR
616		/* Send route change notification to reservation protocol. */
617		rsrr_cache_send(g,1);
618#endif /* RSRR */
619
620		logit(LOG_DEBUG, 0, "reset member state (%s %s) gm:%x",
621		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
622		    inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
623	    }
624	}
625    }
626}
627
628/*
629 * Delete table entry from the kernel
630 * del_flag determines how many entries to delete
631 */
632void
633del_table_entry(struct rtentry *r, u_int32_t mcastgrp, u_int del_flag)
634{
635    struct gtable *g, *prev_g;
636    struct stable *st, *prev_st;
637    struct ptable *pt, *prev_pt;
638
639    if (del_flag == DEL_ALL_ROUTES) {
640	g = r->rt_groups;
641	while (g) {
642	    logit(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
643		inet_fmts(r->rt_origin, r->rt_originmask, s1),
644		inet_fmt(g->gt_mcastgrp, s2));
645	    st = g->gt_srctbl;
646	    while (st) {
647		if (k_del_rg(st->st_origin, g) < 0) {
648		    logit(LOG_WARNING, errno,
649			"del_table_entry trying to delete (%s, %s)",
650			inet_fmt(st->st_origin, s1),
651			inet_fmt(g->gt_mcastgrp, s2));
652		}
653		kroutes--;
654		prev_st = st;
655		st = st->st_next;
656		free(prev_st);
657	    }
658	    g->gt_srctbl = NULL;
659
660	    pt = g->gt_pruntbl;
661	    while (pt) {
662		prev_pt = pt;
663		pt = pt->pt_next;
664		free(prev_pt);
665	    }
666	    g->gt_pruntbl = NULL;
667
668	    if (g->gt_gnext)
669		g->gt_gnext->gt_gprev = g->gt_gprev;
670	    if (g->gt_gprev)
671		g->gt_gprev->gt_gnext = g->gt_gnext;
672	    else
673		kernel_table = g->gt_gnext;
674
675#ifdef RSRR
676	    /* Send route change notification to reservation protocol. */
677	    rsrr_cache_send(g,0);
678	    rsrr_cache_clean(g);
679#endif /* RSRR */
680	    prev_g = g;
681	    g = g->gt_next;
682	    free(prev_g);
683	}
684	r->rt_groups = NULL;
685    }
686
687    /*
688     * Dummy routine - someday this may be needed, so it is just there
689     */
690    if (del_flag == DEL_RTE_GROUP) {
691	prev_g = (struct gtable *)&r->rt_groups;
692	for (g = r->rt_groups; g; g = g->gt_next) {
693	    if (g->gt_mcastgrp == mcastgrp) {
694		logit(LOG_DEBUG, 0, "del_table_entry deleting (%s %s)",
695		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
696		    inet_fmt(g->gt_mcastgrp, s2));
697		st = g->gt_srctbl;
698		while (st) {
699		    if (k_del_rg(st->st_origin, g) < 0) {
700			logit(LOG_WARNING, errno,
701			    "del_table_entry trying to delete (%s, %s)",
702			    inet_fmt(st->st_origin, s1),
703			    inet_fmt(g->gt_mcastgrp, s2));
704		    }
705		    kroutes--;
706		    prev_st = st;
707		    st = st->st_next;
708		    free(prev_st);
709		}
710		g->gt_srctbl = NULL;
711
712		pt = g->gt_pruntbl;
713		while (pt) {
714		    prev_pt = pt;
715		    pt = pt->pt_next;
716		    free(prev_pt);
717		}
718		g->gt_pruntbl = NULL;
719
720		if (g->gt_gnext)
721		    g->gt_gnext->gt_gprev = g->gt_gprev;
722		if (g->gt_gprev)
723		    g->gt_gprev->gt_gnext = g->gt_gnext;
724		else
725		    kernel_table = g->gt_gnext;
726
727		if (prev_g != (struct gtable *)&r->rt_groups)
728		    g->gt_next->gt_prev = prev_g;
729		else
730		    g->gt_next->gt_prev = NULL;
731		prev_g->gt_next = g->gt_next;
732
733#ifdef RSRR
734		/* Send route change notification to reservation protocol. */
735		rsrr_cache_send(g,0);
736		rsrr_cache_clean(g);
737#endif /* RSRR */
738		free(g);
739		g = prev_g;
740	    } else {
741		prev_g = g;
742	    }
743	}
744    }
745}
746
747/*
748 * update kernel table entry when a route entry changes
749 */
750void
751update_table_entry(struct rtentry *r)
752{
753    struct gtable *g;
754    struct ptable *pt, *prev_pt;
755    vifi_t i;
756
757    for (g = r->rt_groups; g; g = g->gt_next) {
758	pt = g->gt_pruntbl;
759	while (pt) {
760	    prev_pt = pt->pt_next;
761	    free(pt);
762	    pt = prev_pt;
763	}
764	g->gt_pruntbl = NULL;
765
766	g->gt_grpmems = 0;
767
768	/* obtain the multicast group membership list */
769	for (i = 0; i < numvifs; i++) {
770	    if (VIFM_ISSET(i, r->rt_children) &&
771		!(VIFM_ISSET(i, r->rt_leaves)))
772		VIFM_SET(i, g->gt_grpmems);
773
774	    if (VIFM_ISSET(i, r->rt_leaves) && grplst_mem(i, g->gt_mcastgrp))
775		VIFM_SET(i, g->gt_grpmems);
776	}
777	if (VIFM_ISSET(r->rt_parent, g->gt_scope))
778	    g->gt_scope = -1;
779	g->gt_grpmems &= ~g->gt_scope;
780
781	logit(LOG_DEBUG, 0, "updating cache entries (%s %s) gm:%x",
782	    inet_fmts(r->rt_origin, r->rt_originmask, s1),
783	    inet_fmt(g->gt_mcastgrp, s2),
784	    g->gt_grpmems);
785
786	if (g->gt_grpmems && g->gt_prsent_timer) {
787	    g->gt_grftsnt = 1;
788	    send_graft(g);
789	    g->gt_prsent_timer = 0;
790	}
791
792	/* update ttls and add entry into kernel */
793	prun_add_ttls(g);
794	update_kernel(g);
795#ifdef RSRR
796	/* Send route change notification to reservation protocol. */
797	rsrr_cache_send(g,1);
798#endif /* RSRR */
799
800	/* Check if we want to prune this group */
801	if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway) {
802	    g->gt_timer = CACHE_LIFETIME(cache_lifetime);
803	    send_prune(g);
804	}
805    }
806}
807
808/*
809 * set the forwarding flag for all mcastgrps on this vifi
810 */
811void
812update_lclgrp(vifi_t vifi, u_int32_t mcastgrp)
813{
814    struct rtentry *r;
815    struct gtable *g;
816
817    logit(LOG_DEBUG, 0, "group %s joined on vif %d",
818	inet_fmt(mcastgrp, s1), vifi);
819
820    for (g = kernel_table; g; g = g->gt_gnext) {
821	if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
822	    break;
823
824	r = g->gt_route;
825	if (g->gt_mcastgrp == mcastgrp &&
826	    VIFM_ISSET(vifi, r->rt_children)) {
827
828	    VIFM_SET(vifi, g->gt_grpmems);
829	    g->gt_grpmems &= ~g->gt_scope;
830	    if (g->gt_grpmems == 0)
831		continue;
832
833	    prun_add_ttls(g);
834	    logit(LOG_DEBUG, 0, "update lclgrp (%s %s) gm:%x",
835		inet_fmts(r->rt_origin, r->rt_originmask, s1),
836		inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
837
838	    update_kernel(g);
839#ifdef RSRR
840	    /* Send route change notification to reservation protocol. */
841	    rsrr_cache_send(g,1);
842#endif /* RSRR */
843	}
844    }
845}
846
847/*
848 * reset forwarding flag for all mcastgrps on this vifi
849 */
850void
851delete_lclgrp(vifi_t vifi, u_int32_t mcastgrp)
852{
853    struct rtentry *r;
854    struct gtable *g;
855
856    logit(LOG_DEBUG, 0, "group %s left on vif %d",
857	inet_fmt(mcastgrp, s1), vifi);
858
859    for (g = kernel_table; g; g = g->gt_gnext) {
860	if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
861	    break;
862
863	if (g->gt_mcastgrp == mcastgrp) {
864	    int stop_sending = 1;
865
866	    r = g->gt_route;
867	    /*
868	     * If this is not a leaf, then we have router neighbors on this
869	     * vif.  Only turn off forwarding if they have all pruned.
870	     */
871	    if (!VIFM_ISSET(vifi, r->rt_leaves)) {
872		struct listaddr *vr;
873
874		for (vr = uvifs[vifi].uv_neighbors; vr; vr = vr->al_next)
875		  if (find_prune_entry(vr->al_addr, g->gt_pruntbl) == NULL) {
876		      stop_sending = 0;
877		      break;
878		  }
879	    }
880
881	    if (stop_sending) {
882		VIFM_CLR(vifi, g->gt_grpmems);
883		logit(LOG_DEBUG, 0, "delete lclgrp (%s %s) gm:%x",
884		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
885		    inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
886
887		prun_add_ttls(g);
888		update_kernel(g);
889#ifdef RSRR
890		/* Send route change notification to reservation protocol. */
891		rsrr_cache_send(g,1);
892#endif /* RSRR */
893
894		/*
895		 * If there are no more members of this particular group,
896		 *  send prune upstream
897		 */
898		if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway)
899		    send_prune(g);
900	    }
901	}
902    }
903}
904
905/*
906 * Takes the prune message received and then strips it to
907 * determine the (src, grp) pair to be pruned.
908 *
909 * Adds the router to the (src, grp) entry then.
910 *
911 * Determines if further packets have to be sent down that vif
912 *
913 * Determines if a corresponding prune message has to be generated
914 */
915void
916accept_prune(u_int32_t src, u_int32_t dst, char *p, int datalen)
917{
918    u_int32_t prun_src;
919    u_int32_t prun_grp;
920    u_int32_t prun_tmr;
921    vifi_t vifi;
922    int i;
923    int stop_sending;
924    struct rtentry *r;
925    struct gtable *g;
926    struct ptable *pt;
927    struct listaddr *vr;
928
929    /* Don't process any prunes if router is not pruning */
930    if (pruning == 0)
931	return;
932
933    if ((vifi = find_vif(src, dst)) == NO_VIF) {
934	logit(LOG_INFO, 0,
935	    "ignoring prune report from non-neighbor %s",
936	    inet_fmt(src, s1));
937	return;
938    }
939
940    /* Check if enough data is present */
941    if (datalen < 12)
942	{
943	    logit(LOG_WARNING, 0,
944		"non-decipherable prune from %s",
945		inet_fmt(src, s1));
946	    return;
947	}
948
949    for (i = 0; i< 4; i++)
950	((char *)&prun_src)[i] = *p++;
951    for (i = 0; i< 4; i++)
952	((char *)&prun_grp)[i] = *p++;
953    for (i = 0; i< 4; i++)
954	((char *)&prun_tmr)[i] = *p++;
955    prun_tmr = ntohl(prun_tmr);
956
957    logit(LOG_DEBUG, 0, "%s on vif %d prunes (%s %s)/%d",
958	inet_fmt(src, s1), vifi,
959	inet_fmt(prun_src, s2), inet_fmt(prun_grp, s3), prun_tmr);
960
961    /*
962     * Find the subnet for the prune
963     */
964    if (find_src_grp(prun_src, 0, prun_grp)) {
965	g = gtp ? gtp->gt_gnext : kernel_table;
966	r = g->gt_route;
967
968	if (!VIFM_ISSET(vifi, r->rt_children)) {
969	    logit(LOG_WARNING, 0, "prune received from non-child %s for (%s %s)",
970		inet_fmt(src, s1), inet_fmt(prun_src, s2),
971		inet_fmt(prun_grp, s3));
972	    return;
973	}
974	if (VIFM_ISSET(vifi, g->gt_scope)) {
975	    logit(LOG_WARNING, 0, "prune received from %s on scoped grp (%s %s)",
976		inet_fmt(src, s1), inet_fmt(prun_src, s2),
977		inet_fmt(prun_grp, s3));
978	    return;
979	}
980	if ((pt = find_prune_entry(src, g->gt_pruntbl)) != NULL) {
981	    /*
982	     * If it's about to expire, then it's only still around because
983	     * of timer granularity, so don't warn about it.
984	     */
985	    if (pt->pt_timer > 10) {
986		logit(LOG_WARNING, 0, "%s %d from %s for (%s %s)/%d %s %d %s %x",
987		    "duplicate prune received on vif",
988		    vifi, inet_fmt(src, s1), inet_fmt(prun_src, s2),
989		    inet_fmt(prun_grp, s3), prun_tmr,
990		    "old timer:", pt->pt_timer, "cur gm:", g->gt_grpmems);
991	    }
992	    pt->pt_timer = prun_tmr;
993	} else {
994	    /* allocate space for the prune structure */
995	    pt = (struct ptable *)(malloc(sizeof(struct ptable)));
996	    if (pt == NULL)
997	      logit(LOG_ERR, 0, "pt: ran out of memory");
998
999	    pt->pt_vifi = vifi;
1000	    pt->pt_router = src;
1001	    pt->pt_timer = prun_tmr;
1002
1003	    pt->pt_next = g->gt_pruntbl;
1004	    g->gt_pruntbl = pt;
1005	}
1006
1007	/* Refresh the group's lifetime */
1008	g->gt_timer = CACHE_LIFETIME(cache_lifetime);
1009	if (g->gt_timer < prun_tmr)
1010	    g->gt_timer = prun_tmr;
1011
1012	/*
1013	 * check if any more packets need to be sent on the
1014	 * vif which sent this message
1015	 */
1016	stop_sending = 1;
1017	for (vr = uvifs[vifi].uv_neighbors; vr; vr = vr->al_next)
1018	  if (find_prune_entry(vr->al_addr, g->gt_pruntbl) == NULL)  {
1019	      stop_sending = 0;
1020	      break;
1021	  }
1022
1023	if (stop_sending && !grplst_mem(vifi, prun_grp)) {
1024	    VIFM_CLR(vifi, g->gt_grpmems);
1025	    logit(LOG_DEBUG, 0, "prune (%s %s), stop sending on vif %d, gm:%x",
1026		inet_fmts(r->rt_origin, r->rt_originmask, s1),
1027		inet_fmt(g->gt_mcastgrp, s2), vifi, g->gt_grpmems);
1028
1029	    prun_add_ttls(g);
1030	    update_kernel(g);
1031#ifdef RSRR
1032	    /* Send route change notification to reservation protocol. */
1033	    rsrr_cache_send(g,1);
1034#endif /* RSRR */
1035	}
1036
1037	/*
1038	 * check if all the child routers have expressed no interest
1039	 * in this group and if this group does not exist in the
1040	 * interface
1041	 * Send a prune message then upstream
1042	 */
1043	if (!g->gt_prsent_timer && g->gt_grpmems == 0 && r->rt_gateway) {
1044	    send_prune(g);
1045	}
1046    } else {
1047	/*
1048	 * There is no kernel entry for this group.  Therefore, we can
1049	 * simply ignore the prune, as we are not forwarding this traffic
1050	 * downstream.
1051	 */
1052	logit(LOG_DEBUG, 0, "%s (%s %s)/%d from %s",
1053	    "prune message received with no kernel entry for",
1054	    inet_fmt(prun_src, s1), inet_fmt(prun_grp, s2),
1055	    prun_tmr, inet_fmt(src, s3));
1056	return;
1057    }
1058}
1059
1060/*
1061 * Checks if this mcastgrp is present in the kernel table
1062 * If so and if a prune was sent, it sends a graft upwards
1063 */
1064void
1065chkgrp_graft(vifi_t vifi, u_int32_t mcastgrp)
1066{
1067    struct rtentry *r;
1068    struct gtable *g;
1069
1070    for (g = kernel_table; g; g = g->gt_gnext) {
1071	if (ntohl(mcastgrp) < ntohl(g->gt_mcastgrp))
1072	    break;
1073
1074	r = g->gt_route;
1075	if (g->gt_mcastgrp == mcastgrp && VIFM_ISSET(vifi, r->rt_children))
1076	    if (g->gt_prsent_timer) {
1077		VIFM_SET(vifi, g->gt_grpmems);
1078
1079		/*
1080		 * If the vif that was joined was a scoped vif,
1081		 * ignore it ; don't graft back
1082		 */
1083		g->gt_grpmems &= ~g->gt_scope;
1084		if (g->gt_grpmems == 0)
1085		    continue;
1086
1087		/* set the flag for graft retransmission */
1088		g->gt_grftsnt = 1;
1089
1090		/* send graft upwards */
1091		send_graft(g);
1092
1093		/* reset the prune timer and update cache timer*/
1094		g->gt_prsent_timer = 0;
1095		g->gt_timer = max_prune_lifetime;
1096
1097		logit(LOG_DEBUG, 0, "chkgrp graft (%s %s) gm:%x",
1098		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
1099		    inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1100
1101		prun_add_ttls(g);
1102		update_kernel(g);
1103#ifdef RSRR
1104		/* Send route change notification to reservation protocol. */
1105		rsrr_cache_send(g,1);
1106#endif /* RSRR */
1107	    }
1108    }
1109}
1110
1111/* determine the multicast group and src
1112 *
1113 * if it does, then determine if a prune was sent
1114 * upstream.
1115 * if prune sent upstream, send graft upstream and send
1116 * ack downstream.
1117 *
1118 * if no prune sent upstream, change the forwarding bit
1119 * for this interface and send ack downstream.
1120 *
1121 * if no entry exists for this group send ack downstream.
1122 */
1123void
1124accept_graft(u_int32_t src, u_int32_t dst, char *p, int datalen)
1125{
1126    vifi_t	vifi;
1127    u_int32_t	graft_src;
1128    u_int32_t	graft_grp;
1129    int		i;
1130    struct rtentry *r;
1131    struct gtable *g;
1132    struct ptable *pt, **ptnp;
1133
1134    if ((vifi = find_vif(src, dst)) == NO_VIF) {
1135	logit(LOG_INFO, 0,
1136	    "ignoring graft from non-neighbor %s",
1137	    inet_fmt(src, s1));
1138	return;
1139    }
1140
1141    if (datalen < 8) {
1142	logit(LOG_WARNING, 0,
1143	    "received non-decipherable graft from %s",
1144	    inet_fmt(src, s1));
1145	return;
1146    }
1147
1148    for (i = 0; i< 4; i++)
1149	((char *)&graft_src)[i] = *p++;
1150    for (i = 0; i< 4; i++)
1151	((char *)&graft_grp)[i] = *p++;
1152
1153    logit(LOG_DEBUG, 0, "%s on vif %d grafts (%s %s)",
1154	inet_fmt(src, s1), vifi,
1155	inet_fmt(graft_src, s2), inet_fmt(graft_grp, s3));
1156
1157    /*
1158     * Find the subnet for the graft
1159     */
1160    if (find_src_grp(graft_src, 0, graft_grp)) {
1161	g = gtp ? gtp->gt_gnext : kernel_table;
1162	r = g->gt_route;
1163
1164	if (VIFM_ISSET(vifi, g->gt_scope)) {
1165	    logit(LOG_WARNING, 0, "graft received from %s on scoped grp (%s %s)",
1166		inet_fmt(src, s1), inet_fmt(graft_src, s2),
1167		inet_fmt(graft_grp, s3));
1168	    return;
1169	}
1170
1171	ptnp = &g->gt_pruntbl;
1172	while ((pt = *ptnp) != NULL) {
1173	    if ((pt->pt_vifi == vifi) && (pt->pt_router == src)) {
1174		*ptnp = pt->pt_next;
1175		free(pt);
1176
1177		VIFM_SET(vifi, g->gt_grpmems);
1178		logit(LOG_DEBUG, 0, "accept graft (%s %s) gm:%x",
1179		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
1180		    inet_fmt(g->gt_mcastgrp, s2), g->gt_grpmems);
1181
1182		prun_add_ttls(g);
1183		update_kernel(g);
1184#ifdef RSRR
1185		/* Send route change notification to reservation protocol. */
1186		rsrr_cache_send(g,1);
1187#endif /* RSRR */
1188		break;
1189	    } else {
1190		ptnp = &pt->pt_next;
1191	    }
1192	}
1193
1194	/* send ack downstream */
1195	send_graft_ack(dst, src, graft_src, graft_grp);
1196	g->gt_timer = max_prune_lifetime;
1197
1198	if (g->gt_prsent_timer) {
1199	    /* set the flag for graft retransmission */
1200	    g->gt_grftsnt = 1;
1201
1202	    /* send graft upwards */
1203	    send_graft(g);
1204
1205	    /* reset the prune sent timer */
1206	    g->gt_prsent_timer = 0;
1207	}
1208    } else {
1209	/*
1210	 * We have no state for the source and group in question.
1211	 * We can simply acknowledge the graft, since we know
1212	 * that we have no prune state, and grafts are requests
1213	 * to remove prune state.
1214	 */
1215	send_graft_ack(dst, src, graft_src, graft_grp);
1216	logit(LOG_DEBUG, 0, "%s (%s %s) from %s",
1217	    "graft received with no kernel entry for",
1218	    inet_fmt(graft_src, s1), inet_fmt(graft_grp, s2),
1219	    inet_fmt(src, s3));
1220	return;
1221    }
1222}
1223
1224/*
1225 * find out which group is involved first of all
1226 * then determine if a graft was sent.
1227 * if no graft sent, ignore the message
1228 * if graft was sent and the ack is from the right
1229 * source, remove the graft timer so that we don't
1230 * have send a graft again
1231 */
1232void
1233accept_g_ack(u_int32_t src, u_int32_t dst, char *p, int datalen)
1234{
1235    struct gtable *g;
1236    vifi_t	vifi;
1237    u_int32_t	grft_src;
1238    u_int32_t	grft_grp;
1239    int		i;
1240
1241    if ((vifi = find_vif(src, dst)) == NO_VIF) {
1242	logit(LOG_INFO, 0,
1243	    "ignoring graft ack from non-neighbor %s",
1244	    inet_fmt(src, s1));
1245	return;
1246    }
1247
1248    if (datalen < 0  || datalen > 8) {
1249	logit(LOG_WARNING, 0,
1250	    "received non-decipherable graft ack from %s",
1251	    inet_fmt(src, s1));
1252	return;
1253    }
1254
1255    for (i = 0; i< 4; i++)
1256	((char *)&grft_src)[i] = *p++;
1257    for (i = 0; i< 4; i++)
1258	((char *)&grft_grp)[i] = *p++;
1259
1260    logit(LOG_DEBUG, 0, "%s on vif %d acks graft (%s, %s)",
1261	inet_fmt(src, s1), vifi,
1262	inet_fmt(grft_src, s2), inet_fmt(grft_grp, s3));
1263
1264    /*
1265     * Find the subnet for the graft ack
1266     */
1267    if (find_src_grp(grft_src, 0, grft_grp)) {
1268	g = gtp ? gtp->gt_gnext : kernel_table;
1269	g->gt_grftsnt = 0;
1270    } else {
1271	logit(LOG_WARNING, 0, "%s (%s, %s) from %s",
1272	    "rcvd graft ack with no kernel entry for",
1273	    inet_fmt(grft_src, s1), inet_fmt(grft_grp, s2),
1274	    inet_fmt(src, s3));
1275	return;
1276    }
1277}
1278
1279
1280/*
1281 * free all prune entries and kernel routes
1282 * normally, this should inform the kernel that all of its routes
1283 * are going away, but this is only called by restart(), which is
1284 * about to call MRT_DONE which does that anyway.
1285 */
1286void
1287free_all_prunes(void)
1288{
1289    register struct rtentry *r;
1290    register struct gtable *g, *prev_g;
1291    register struct stable *s, *prev_s;
1292    register struct ptable *p, *prev_p;
1293
1294    for (r = routing_table; r; r = r->rt_next) {
1295	g = r->rt_groups;
1296	while (g) {
1297	    s = g->gt_srctbl;
1298	    while (s) {
1299		prev_s = s;
1300		s = s->st_next;
1301		free(prev_s);
1302	    }
1303
1304	    p = g->gt_pruntbl;
1305	    while (p) {
1306		prev_p = p;
1307		p = p->pt_next;
1308		free(prev_p);
1309	    }
1310
1311	    prev_g = g;
1312	    g = g->gt_next;
1313	    free(prev_g);
1314	}
1315	r->rt_groups = NULL;
1316    }
1317    kernel_table = NULL;
1318
1319    g = kernel_no_route;
1320    while (g) {
1321	if (g->gt_srctbl)
1322	    free(g->gt_srctbl);
1323
1324	prev_g = g;
1325	g = g->gt_next;
1326	free(prev_g);
1327    }
1328    kernel_no_route = NULL;
1329}
1330
1331/*
1332 * When a new route is created, search
1333 * a) The less-specific part of the routing table
1334 * b) The route-less kernel table
1335 * for sources that the new route might want to handle.
1336 *
1337 * "Inheriting" these sources might be cleanest, but simply deleting
1338 * them is easier, and letting the kernel re-request them.
1339 */
1340void
1341steal_sources(struct rtentry *rt)
1342{
1343    register struct rtentry *rp;
1344    register struct gtable *gt, **gtnp;
1345    register struct stable *st, **stnp;
1346
1347    for (rp = rt->rt_next; rp; rp = rp->rt_next) {
1348	if ((rt->rt_origin & rp->rt_originmask) == rp->rt_origin) {
1349	    logit(LOG_DEBUG, 0, "Route for %s stealing sources from %s",
1350		inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1351		inet_fmts(rp->rt_origin, rp->rt_originmask, s2));
1352	    for (gt = rp->rt_groups; gt; gt = gt->gt_next) {
1353		stnp = &gt->gt_srctbl;
1354		while ((st = *stnp) != NULL) {
1355		    if ((st->st_origin & rt->rt_originmask) == rt->rt_origin) {
1356			logit(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1357			    inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1358			    inet_fmt(st->st_origin, s3),
1359			    inet_fmt(gt->gt_mcastgrp, s4),
1360			    inet_fmts(rp->rt_origin, rp->rt_originmask, s2));
1361			if (k_del_rg(st->st_origin, gt) < 0) {
1362			    logit(LOG_WARNING, errno, "%s (%s, %s)",
1363				"steal_sources trying to delete",
1364				inet_fmt(st->st_origin, s1),
1365				inet_fmt(gt->gt_mcastgrp, s2));
1366			}
1367			*stnp = st->st_next;
1368			kroutes--;
1369			free(st);
1370		    } else {
1371			stnp = &st->st_next;
1372		    }
1373		}
1374	    }
1375	}
1376    }
1377
1378    gtnp = &kernel_no_route;
1379    while ((gt = *gtnp) != NULL) {
1380	if (gt->gt_srctbl && ((gt->gt_srctbl->st_origin & rt->rt_originmask)
1381				    == rt->rt_origin)) {
1382	    logit(LOG_DEBUG, 0, "%s stealing (%s %s) from %s",
1383		inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1384		inet_fmt(gt->gt_srctbl->st_origin, s3),
1385		inet_fmt(gt->gt_mcastgrp, s4),
1386		"no_route table");
1387	    if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
1388		logit(LOG_WARNING, errno, "%s (%s %s)",
1389		    "steal_sources trying to delete",
1390		    inet_fmt(gt->gt_srctbl->st_origin, s1),
1391		    inet_fmt(gt->gt_mcastgrp, s2));
1392	    }
1393	    kroutes--;
1394	    free(gt->gt_srctbl);
1395	    *gtnp = gt->gt_next;
1396	    if (gt->gt_next)
1397		gt->gt_next->gt_prev = gt->gt_prev;
1398	    free(gt);
1399	} else {
1400	    gtnp = &gt->gt_next;
1401	}
1402    }
1403}
1404
1405/*
1406 * Advance the timers on all the cache entries.
1407 * If there are any entries whose timers have expired,
1408 * remove these entries from the kernel cache.
1409 */
1410void
1411age_table_entry(void)
1412{
1413    struct rtentry *r;
1414    struct gtable *gt, **gtnptr;
1415    struct stable *st, **stnp;
1416    struct ptable *pt, **ptnp;
1417    struct sioc_sg_req sg_req;
1418
1419    logit(LOG_DEBUG, 0, "ageing entries");
1420
1421    gtnptr = &kernel_table;
1422    while ((gt = *gtnptr) != NULL) {
1423	r = gt->gt_route;
1424
1425	/* advance the timer for the kernel entry */
1426	gt->gt_timer -= ROUTE_MAX_REPORT_DELAY;
1427
1428	/* decrement prune timer if need be */
1429	if (gt->gt_prsent_timer > 0) {
1430	    gt->gt_prsent_timer -= ROUTE_MAX_REPORT_DELAY;
1431	    if (gt->gt_prsent_timer <= 0) {
1432		logit(LOG_DEBUG, 0, "upstream prune tmo (%s %s)",
1433		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
1434		    inet_fmt(gt->gt_mcastgrp, s2));
1435		gt->gt_prsent_timer = -1;
1436	    }
1437	}
1438
1439	/* retransmit graft if graft sent flag is still set */
1440	if (gt->gt_grftsnt) {
1441	    register int y;
1442	    CHK_GS(gt->gt_grftsnt++, y);
1443	    if (y)
1444		send_graft(gt);
1445	}
1446
1447	/*
1448	 * Age prunes
1449	 *
1450	 * If a prune expires, forward again on that vif.
1451	 */
1452	ptnp = &gt->gt_pruntbl;
1453	while ((pt = *ptnp) != NULL) {
1454	    if ((pt->pt_timer -= ROUTE_MAX_REPORT_DELAY) <= 0) {
1455		logit(LOG_DEBUG, 0, "expire prune (%s %s) from %s on vif %d",
1456		    inet_fmts(r->rt_origin, r->rt_originmask, s1),
1457		    inet_fmt(gt->gt_mcastgrp, s2),
1458		    inet_fmt(pt->pt_router, s3),
1459		    pt->pt_vifi);
1460
1461		expire_prune(pt->pt_vifi, gt);
1462
1463		/* remove the router's prune entry and await new one */
1464		*ptnp = pt->pt_next;
1465		free(pt);
1466	    } else {
1467		ptnp = &pt->pt_next;
1468	    }
1469	}
1470
1471	/*
1472	 * If the cache entry has expired, delete source table entries for
1473	 * silent sources.  If there are no source entries left, and there
1474	 * are no downstream prunes, then the entry is deleted.
1475	 * Otherwise, the cache entry's timer is refreshed.
1476	 */
1477	if (gt->gt_timer <= 0) {
1478	    /* Check for traffic before deleting source entries */
1479	    sg_req.grp.s_addr = gt->gt_mcastgrp;
1480	    stnp = &gt->gt_srctbl;
1481	    while ((st = *stnp) != NULL) {
1482		sg_req.src.s_addr = st->st_origin;
1483		if (ioctl(udp_socket, SIOCGETSGCNT, (char *)&sg_req) < 0) {
1484		    logit(LOG_WARNING, errno, "%s (%s %s)",
1485			"age_table_entry: SIOCGETSGCNT failing for",
1486			inet_fmt(st->st_origin, s1),
1487			inet_fmt(gt->gt_mcastgrp, s2));
1488		    /* Make sure it gets deleted below */
1489		    sg_req.pktcnt = st->st_pktcnt;
1490		}
1491		if (sg_req.pktcnt == st->st_pktcnt) {
1492		    *stnp = st->st_next;
1493		    logit(LOG_DEBUG, 0, "age_table_entry deleting (%s %s)",
1494			inet_fmt(st->st_origin, s1),
1495			inet_fmt(gt->gt_mcastgrp, s2));
1496		    if (k_del_rg(st->st_origin, gt) < 0) {
1497			logit(LOG_WARNING, errno,
1498			    "age_table_entry trying to delete (%s %s)",
1499			    inet_fmt(st->st_origin, s1),
1500			    inet_fmt(gt->gt_mcastgrp, s2));
1501		    }
1502		    kroutes--;
1503		    free(st);
1504		} else {
1505		    st->st_pktcnt = sg_req.pktcnt;
1506		    stnp = &st->st_next;
1507		}
1508	    }
1509
1510	    /*
1511	     * Retain the group entry if we have downstream prunes or if
1512	     * there is at least one source in the list that still has
1513	     * traffic, or if our upstream prune timer is running.
1514	     */
1515	    if (gt->gt_pruntbl != NULL || gt->gt_srctbl != NULL ||
1516		gt->gt_prsent_timer > 0) {
1517		gt->gt_timer = CACHE_LIFETIME(cache_lifetime);
1518		if (gt->gt_prsent_timer == -1)
1519		    if (gt->gt_grpmems == 0)
1520			send_prune(gt);
1521		    else
1522			gt->gt_prsent_timer = 0;
1523		gtnptr = &gt->gt_gnext;
1524		continue;
1525	    }
1526
1527	    logit(LOG_DEBUG, 0, "timeout cache entry (%s, %s)",
1528		inet_fmts(r->rt_origin, r->rt_originmask, s1),
1529		inet_fmt(gt->gt_mcastgrp, s2));
1530
1531	    if (gt->gt_prev)
1532		gt->gt_prev->gt_next = gt->gt_next;
1533	    else
1534		gt->gt_route->rt_groups = gt->gt_next;
1535	    if (gt->gt_next)
1536		gt->gt_next->gt_prev = gt->gt_prev;
1537
1538	    if (gt->gt_gprev) {
1539		gt->gt_gprev->gt_gnext = gt->gt_gnext;
1540		gtnptr = &gt->gt_gprev->gt_gnext;
1541	    } else {
1542		kernel_table = gt->gt_gnext;
1543		gtnptr = &kernel_table;
1544	    }
1545	    if (gt->gt_gnext)
1546		gt->gt_gnext->gt_gprev = gt->gt_gprev;
1547
1548#ifdef RSRR
1549	    /* Send route change notification to reservation protocol. */
1550	    rsrr_cache_send(gt,0);
1551	    rsrr_cache_clean(gt);
1552#endif /* RSRR */
1553	    free((char *)gt);
1554	} else {
1555	    if (gt->gt_prsent_timer == -1)
1556		if (gt->gt_grpmems == 0)
1557		    send_prune(gt);
1558		else
1559		    gt->gt_prsent_timer = 0;
1560	    gtnptr = &gt->gt_gnext;
1561	}
1562    }
1563
1564    /*
1565     * When traversing the no_route table, the decision is much easier.
1566     * Just delete it if it has timed out.
1567     */
1568    gtnptr = &kernel_no_route;
1569    while ((gt = *gtnptr) != NULL) {
1570	/* advance the timer for the kernel entry */
1571	gt->gt_timer -= ROUTE_MAX_REPORT_DELAY;
1572
1573	if (gt->gt_timer < 0) {
1574	    if (gt->gt_srctbl) {
1575		if (k_del_rg(gt->gt_srctbl->st_origin, gt) < 0) {
1576		    logit(LOG_WARNING, errno, "%s (%s %s)",
1577			"age_table_entry trying to delete no-route",
1578			inet_fmt(gt->gt_srctbl->st_origin, s1),
1579			inet_fmt(gt->gt_mcastgrp, s2));
1580		}
1581		free(gt->gt_srctbl);
1582	    }
1583	    *gtnptr = gt->gt_next;
1584	    if (gt->gt_next)
1585		gt->gt_next->gt_prev = gt->gt_prev;
1586
1587	    free((char *)gt);
1588	} else {
1589	    gtnptr = &gt->gt_next;
1590	}
1591    }
1592}
1593
1594/*
1595 * Modify the kernel to forward packets when one or multiple prunes that
1596 * were received on the vif given by vifi, for the group given by gt,
1597 * have expired.
1598 */
1599static void
1600expire_prune(vifi_t vifi, struct gtable *gt)
1601{
1602    /*
1603     * No need to send a graft, any prunes that we sent
1604     * will expire before any prunes that we have received.
1605     */
1606    if (gt->gt_prsent_timer > 0) {
1607        logit(LOG_DEBUG, 0, "prune expired with %d left on %s",
1608		gt->gt_prsent_timer, "prsent_timer");
1609        gt->gt_prsent_timer = 0;
1610    }
1611
1612    /* modify the kernel entry to forward packets */
1613    if (!VIFM_ISSET(vifi, gt->gt_grpmems)) {
1614        struct rtentry *rt = gt->gt_route;
1615        VIFM_SET(vifi, gt->gt_grpmems);
1616        logit(LOG_DEBUG, 0, "forw again (%s %s) gm:%x vif:%d",
1617	inet_fmts(rt->rt_origin, rt->rt_originmask, s1),
1618	inet_fmt(gt->gt_mcastgrp, s2), gt->gt_grpmems, vifi);
1619
1620        prun_add_ttls(gt);
1621        update_kernel(gt);
1622#ifdef RSRR
1623        /* Send route change notification to reservation protocol. */
1624        rsrr_cache_send(gt,1);
1625#endif /* RSRR */
1626    }
1627}
1628
1629
1630static char *
1631scaletime(u_long t)
1632{
1633    static char buf1[5];
1634    static char buf2[5];
1635    static char *buf=buf1;
1636    char s;
1637    char *p;
1638
1639    p = buf;
1640    if (buf == buf1)
1641	buf = buf2;
1642    else
1643	buf = buf1;
1644
1645    if (t < 120) {
1646	s = 's';
1647    } else if (t < 3600) {
1648	t /= 60;
1649	s = 'm';
1650    } else if (t < 86400) {
1651	t /= 3600;
1652	s = 'h';
1653    } else if (t < 864000) {
1654	t /= 86400;
1655	s = 'd';
1656    } else {
1657	t /= 604800;
1658	s = 'w';
1659    }
1660    if (t > 999)
1661	return "*** ";
1662
1663    snprintf(p, 5, "%3d%c", (int)t, s);
1664
1665    return p;
1666}
1667
1668/*
1669 * Print the contents of the cache table on file 'fp2'.
1670 */
1671void
1672dump_cache(FILE *fp2)
1673{
1674    register struct rtentry *r;
1675    register struct gtable *gt;
1676    register struct stable *st;
1677    register vifi_t i;
1678    register time_t thyme = time(0);
1679
1680    fprintf(fp2,
1681	    "Multicast Routing Cache Table (%d entries)\n%s", kroutes,
1682    " Origin             Mcast-group     CTmr  Age Ptmr IVif Forwvifs\n");
1683
1684    for (gt = kernel_no_route; gt; gt = gt->gt_next) {
1685	if (gt->gt_srctbl) {
1686	    fprintf(fp2, " %-18s %-15s %-4s %-4s    - -1\n",
1687		inet_fmts(gt->gt_srctbl->st_origin, 0xffffffff, s1),
1688		inet_fmt(gt->gt_mcastgrp, s2), scaletime(gt->gt_timer),
1689		scaletime(thyme - gt->gt_ctime));
1690	    fprintf(fp2, ">%s\n", inet_fmt(gt->gt_srctbl->st_origin, s1));
1691	}
1692    }
1693
1694    for (gt = kernel_table; gt; gt = gt->gt_gnext) {
1695	r = gt->gt_route;
1696	fprintf(fp2, " %-18s %-15s",
1697	    inet_fmts(r->rt_origin, r->rt_originmask, s1),
1698	    inet_fmt(gt->gt_mcastgrp, s2));
1699
1700	fprintf(fp2, " %-4s", scaletime(gt->gt_timer));
1701
1702	fprintf(fp2, " %-4s %-4s ", scaletime(thyme - gt->gt_ctime),
1703			gt->gt_prsent_timer ? scaletime(gt->gt_prsent_timer) :
1704					      "   -");
1705
1706	fprintf(fp2, "%2u%c%c ", r->rt_parent,
1707	    gt->gt_prsent_timer ? 'P' : ' ',
1708	    VIFM_ISSET(r->rt_parent, gt->gt_scope) ? 'B' : ' ');
1709
1710	for (i = 0; i < numvifs; ++i) {
1711	    if (VIFM_ISSET(i, gt->gt_grpmems))
1712		fprintf(fp2, " %u ", i);
1713	    else if (VIFM_ISSET(i, r->rt_children) &&
1714		     !VIFM_ISSET(i, r->rt_leaves))
1715		fprintf(fp2, " %u%c", i,
1716			VIFM_ISSET(i, gt->gt_scope) ? 'b' : 'p');
1717	}
1718	fprintf(fp2, "\n");
1719	for (st = gt->gt_srctbl; st; st = st->st_next) {
1720	    fprintf(fp2, ">%s\n", inet_fmt(st->st_origin, s1));
1721	}
1722#ifdef DEBUG_PRUNES
1723	for (pt = gt->gt_pruntbl; pt; pt = pt->pt_next) {
1724	    fprintf(fp2, "<r:%s v:%d t:%d\n", inet_fmt(pt->pt_router, s1),
1725		pt->pt_vifi, pt->pt_timer);
1726	}
1727#endif
1728    }
1729}
1730
1731/*
1732 * Traceroute function which returns traceroute replies to the requesting
1733 * router. Also forwards the request to downstream routers.
1734 * NOTE: u_int no is narrowed to u_char
1735 */
1736void
1737accept_mtrace(u_int32_t src, u_int32_t dst, u_int32_t group,
1738    char *data, u_int no, int datalen)
1739{
1740    u_char type;
1741    struct rtentry *rt;
1742    struct gtable *gt;
1743    struct tr_query *qry;
1744    struct tr_resp  *resp;
1745    int vifi;
1746    char *p;
1747    int rcount;
1748    int errcode = TR_NO_ERR;
1749    int resptype;
1750    struct timeval tp;
1751    struct sioc_vif_req v_req;
1752    struct sioc_sg_req sg_req;
1753
1754    /* Remember qid across invocations */
1755    static u_int32_t oqid = 0;
1756
1757    /* timestamp the request/response */
1758    gettimeofday(&tp, 0);
1759
1760    /*
1761     * Check if it is a query or a response
1762     */
1763    if (datalen == QLEN) {
1764	type = QUERY;
1765	logit(LOG_DEBUG, 0, "Initial traceroute query rcvd from %s to %s",
1766	    inet_fmt(src, s1), inet_fmt(dst, s2));
1767    }
1768    else if ((datalen - QLEN) % RLEN == 0) {
1769	type = RESP;
1770	logit(LOG_DEBUG, 0, "In-transit traceroute query rcvd from %s to %s",
1771	    inet_fmt(src, s1), inet_fmt(dst, s2));
1772	if (IN_MULTICAST(ntohl(dst))) {
1773	    logit(LOG_DEBUG, 0, "Dropping multicast response");
1774	    return;
1775	}
1776    }
1777    else {
1778	logit(LOG_WARNING, 0, "%s from %s to %s",
1779	    "Non decipherable traceroute request received",
1780	    inet_fmt(src, s1), inet_fmt(dst, s2));
1781	return;
1782    }
1783
1784    qry = (struct tr_query *)data;
1785
1786    /*
1787     * if it is a packet with all reports filled, drop it
1788     */
1789    if ((rcount = (datalen - QLEN)/RLEN) == no) {
1790	logit(LOG_DEBUG, 0, "packet with all reports filled in");
1791	return;
1792    }
1793
1794    logit(LOG_DEBUG, 0, "s: %s g: %s d: %s ", inet_fmt(qry->tr_src, s1),
1795	    inet_fmt(group, s2), inet_fmt(qry->tr_dst, s3));
1796    logit(LOG_DEBUG, 0, "rttl: %d rd: %s", qry->tr_rttl,
1797	    inet_fmt(qry->tr_raddr, s1));
1798    logit(LOG_DEBUG, 0, "rcount:%d, qid:%06x", rcount, qry->tr_qid);
1799
1800    /* determine the routing table entry for this traceroute */
1801    rt = determine_route(qry->tr_src);
1802    if (rt) {
1803	logit(LOG_DEBUG, 0, "rt parent vif: %d rtr: %s metric: %d",
1804		rt->rt_parent, inet_fmt(rt->rt_gateway, s1), rt->rt_metric);
1805	logit(LOG_DEBUG, 0, "rt origin %s",
1806		inet_fmts(rt->rt_origin, rt->rt_originmask, s1));
1807    } else
1808	logit(LOG_DEBUG, 0, "...no route");
1809
1810    /*
1811     * Query type packet - check if rte exists
1812     * Check if the query destination is a vif connected to me.
1813     * and if so, whether I should start response back
1814     */
1815    if (type == QUERY) {
1816	if (oqid == qry->tr_qid) {
1817	    /*
1818	     * If the multicast router is a member of the group being
1819	     * queried, and the query is multicasted, then the router can
1820	     * receive multiple copies of the same query.  If we have already
1821	     * replied to this traceroute, just ignore it this time.
1822	     *
1823	     * This is not a total solution, but since if this fails you
1824	     * only get N copies, N <= the number of interfaces on the router,
1825	     * it is not fatal.
1826	     */
1827	    logit(LOG_DEBUG, 0, "ignoring duplicate traceroute packet");
1828	    return;
1829	}
1830
1831	if (rt == NULL) {
1832	    logit(LOG_DEBUG, 0, "Mcast traceroute: no route entry %s",
1833		   inet_fmt(qry->tr_src, s1));
1834	    if (IN_MULTICAST(ntohl(dst)))
1835		return;
1836	}
1837	vifi = find_vif(qry->tr_dst, 0);
1838
1839	if (vifi == NO_VIF) {
1840	    /* The traceroute destination is not on one of my subnet vifs. */
1841	    logit(LOG_DEBUG, 0, "Destination %s not an interface",
1842		   inet_fmt(qry->tr_dst, s1));
1843	    if (IN_MULTICAST(ntohl(dst)))
1844		return;
1845	    errcode = TR_WRONG_IF;
1846	} else if (rt != NULL && !VIFM_ISSET(vifi, rt->rt_children)) {
1847	    logit(LOG_DEBUG, 0, "Destination %s not on forwarding tree for src %s",
1848		   inet_fmt(qry->tr_dst, s1), inet_fmt(qry->tr_src, s2));
1849	    if (IN_MULTICAST(ntohl(dst)))
1850		return;
1851	    errcode = TR_WRONG_IF;
1852	}
1853    }
1854    else {
1855	/*
1856	 * determine which interface the packet came in on
1857	 * RESP packets travel hop-by-hop so this either traversed
1858	 * a tunnel or came from a directly attached mrouter.
1859	 */
1860	if ((vifi = find_vif(src, dst)) == NO_VIF) {
1861	    logit(LOG_DEBUG, 0, "Wrong interface for packet");
1862	    errcode = TR_WRONG_IF;
1863	}
1864    }
1865
1866    /* Now that we've decided to send a response, save the qid */
1867    oqid = qry->tr_qid;
1868
1869    logit(LOG_DEBUG, 0, "Sending traceroute response");
1870
1871    /* copy the packet to the sending buffer */
1872    p = send_buf + MIN_IP_HEADER_LEN + IGMP_MINLEN;
1873
1874    bcopy(data, p, datalen);
1875
1876    p += datalen;
1877
1878    /*
1879     * If there is no room to insert our reply, coopt the previous hop
1880     * error indication to relay this fact.
1881     */
1882    if (p + sizeof(struct tr_resp) > send_buf + RECV_BUF_SIZE) {
1883	resp = (struct tr_resp *)p - 1;
1884	resp->tr_rflags = TR_NO_SPACE;
1885	rt = NULL;
1886	goto sendit;
1887    }
1888
1889    /*
1890     * fill in initial response fields
1891     */
1892    resp = (struct tr_resp *)p;
1893    bzero(resp, sizeof(struct tr_resp));
1894    datalen += RLEN;
1895
1896    resp->tr_qarr    = htonl((tp.tv_sec + JAN_1970) << 16) +
1897				((tp.tv_usec >> 4) & 0xffff);
1898
1899    resp->tr_rproto  = PROTO_DVMRP;
1900    if (errcode != TR_NO_ERR) {
1901	resp->tr_rflags	 = errcode;
1902	rt = NULL;	/* hack to enforce send straight to requestor */
1903	goto sendit;
1904    }
1905    resp->tr_outaddr = uvifs[vifi].uv_lcl_addr;
1906    resp->tr_fttl    = uvifs[vifi].uv_threshold;
1907    resp->tr_rflags  = TR_NO_ERR;
1908
1909    /*
1910     * obtain # of packets out on interface
1911     */
1912    v_req.vifi = vifi;
1913    if (ioctl(udp_socket, SIOCGETVIFCNT, (char *)&v_req) >= 0)
1914	resp->tr_vifout  =  htonl(v_req.ocount);
1915
1916    /*
1917     * fill in scoping & pruning information
1918     */
1919    if (rt)
1920	for (gt = rt->rt_groups; gt; gt = gt->gt_next) {
1921	    if (gt->gt_mcastgrp >= group)
1922		break;
1923	}
1924    else
1925	gt = NULL;
1926
1927    if (gt && gt->gt_mcastgrp == group) {
1928	sg_req.src.s_addr = qry->tr_src;
1929	sg_req.grp.s_addr = group;
1930	if (ioctl(udp_socket, SIOCGETSGCNT, (char *)&sg_req) >= 0)
1931	    resp->tr_pktcnt = htonl(sg_req.pktcnt);
1932
1933	if (VIFM_ISSET(vifi, gt->gt_scope))
1934	    resp->tr_rflags = TR_SCOPED;
1935	else if (gt->gt_prsent_timer)
1936	    resp->tr_rflags = TR_PRUNED;
1937	else if (!VIFM_ISSET(vifi, gt->gt_grpmems))
1938	    if (VIFM_ISSET(vifi, rt->rt_children) &&
1939		!VIFM_ISSET(vifi, rt->rt_leaves))
1940		resp->tr_rflags = TR_OPRUNED;
1941	    else
1942		resp->tr_rflags = TR_NO_FWD;
1943    } else {
1944	if (scoped_addr(vifi, group))
1945	    resp->tr_rflags = TR_SCOPED;
1946	else if (rt && !VIFM_ISSET(vifi, rt->rt_children))
1947	    resp->tr_rflags = TR_NO_FWD;
1948    }
1949
1950    /*
1951     *  if no rte exists, set NO_RTE error
1952     */
1953    if (rt == NULL) {
1954	src = dst;		/* the dst address of resp. pkt */
1955	resp->tr_inaddr   = 0;
1956	resp->tr_rflags   = TR_NO_RTE;
1957	resp->tr_rmtaddr  = 0;
1958    } else {
1959	/* get # of packets in on interface */
1960	v_req.vifi = rt->rt_parent;
1961	if (ioctl(udp_socket, SIOCGETVIFCNT, (char *)&v_req) >= 0)
1962	    resp->tr_vifin = htonl(v_req.icount);
1963
1964	MASK_TO_VAL(rt->rt_originmask, resp->tr_smask);
1965	src = uvifs[rt->rt_parent].uv_lcl_addr;
1966	resp->tr_inaddr = src;
1967	resp->tr_rmtaddr = rt->rt_gateway;
1968	if (!VIFM_ISSET(vifi, rt->rt_children)) {
1969	    logit(LOG_DEBUG, 0, "Destination %s not on forwarding tree for src %s",
1970		   inet_fmt(qry->tr_dst, s1), inet_fmt(qry->tr_src, s2));
1971	    resp->tr_rflags = TR_WRONG_IF;
1972	}
1973	if (rt->rt_metric >= UNREACHABLE) {
1974	    resp->tr_rflags = TR_NO_RTE;
1975	    /* Hack to send reply directly */
1976	    rt = NULL;
1977	}
1978    }
1979
1980sendit:
1981    /*
1982     * if metric is 1 or no. of reports is 1, send response to requestor
1983     * else send to upstream router.  If the upstream router can't handle
1984     * mtrace, set an error code and send to requestor anyway.
1985     */
1986    logit(LOG_DEBUG, 0, "rcount:%d, no:%d", rcount, no);
1987
1988    if ((rcount + 1 == no) || (rt == NULL) || (rt->rt_metric == 1)) {
1989	resptype = IGMP_MTRACE_REPLY;
1990	dst = qry->tr_raddr;
1991    } else
1992	if (!can_mtrace(rt->rt_parent, rt->rt_gateway)) {
1993	    dst = qry->tr_raddr;
1994	    resp->tr_rflags = TR_OLD_ROUTER;
1995	    resptype = IGMP_MTRACE_REPLY;
1996	} else {
1997	    dst = rt->rt_gateway;
1998	    resptype = IGMP_MTRACE_QUERY;
1999	}
2000
2001    if (IN_MULTICAST(ntohl(dst))) {
2002	/*
2003	 * Send the reply on a known multicast capable vif.
2004	 * If we don't have one, we can't source any multicasts anyway.
2005	 */
2006	if (phys_vif != -1) {
2007	    logit(LOG_DEBUG, 0, "Sending reply to %s from %s",
2008		inet_fmt(dst, s1), inet_fmt(uvifs[phys_vif].uv_lcl_addr, s2));
2009	    k_set_ttl(qry->tr_rttl);
2010	    send_igmp(uvifs[phys_vif].uv_lcl_addr, dst,
2011		      resptype, no, group,
2012		      datalen);
2013	    k_set_ttl(1);
2014	} else
2015	    logit(LOG_INFO, 0, "No enabled phyints -- %s",
2016			"dropping traceroute reply");
2017    } else {
2018	logit(LOG_DEBUG, 0, "Sending %s to %s from %s",
2019	    resptype == IGMP_MTRACE_REPLY ?  "reply" : "request on",
2020	    inet_fmt(dst, s1), inet_fmt(src, s2));
2021
2022	send_igmp(src, dst,
2023		  resptype, no, group,
2024		  datalen);
2025    }
2026    return;
2027}
2028