1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1983, 1988, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD$
32 */
33
34#include "defs.h"
35
36__RCSID("$FreeBSD$");
37
38
39u_int update_seqno;
40
41
42/* walk the tree of routes with this for output
43 */
44static struct {
45	struct sockaddr_in to;
46	naddr	to_mask;
47	naddr	to_net;
48	naddr	to_std_mask;
49	naddr	to_std_net;
50	struct interface *ifp;		/* usually output interface */
51	struct auth *a;
52	char	metric;			/* adjust metrics by interface */
53	int	npackets;
54	int	gen_limit;
55	u_int	state;
56#define	    WS_ST_FLASH	    0x001	/* send only changed routes */
57#define	    WS_ST_RIP2_ALL  0x002	/* send full featured RIPv2 */
58#define	    WS_ST_AG	    0x004	/* ok to aggregate subnets */
59#define	    WS_ST_SUPER_AG  0x008	/* ok to aggregate networks */
60#define	    WS_ST_QUERY	    0x010	/* responding to a query */
61#define	    WS_ST_TO_ON_NET 0x020	/* sending onto one of our nets */
62#define	    WS_ST_DEFAULT   0x040	/* faking a default */
63} ws;
64
65/* A buffer for what can be heard by both RIPv1 and RIPv2 listeners */
66struct ws_buf v12buf;
67static union pkt_buf ripv12_buf;
68
69/* Another for only RIPv2 listeners */
70static struct ws_buf v2buf;
71static union pkt_buf rip_v2_buf;
72
73
74
75void
76bufinit(void)
77{
78	ripv12_buf.rip.rip_cmd = RIPCMD_RESPONSE;
79	v12buf.buf = &ripv12_buf.rip;
80	v12buf.base = &v12buf.buf->rip_nets[0];
81
82	rip_v2_buf.rip.rip_cmd = RIPCMD_RESPONSE;
83	rip_v2_buf.rip.rip_vers = RIPv2;
84	v2buf.buf = &rip_v2_buf.rip;
85	v2buf.base = &v2buf.buf->rip_nets[0];
86}
87
88
89/* Send the contents of the global buffer via the non-multicast socket
90 */
91int					/* <0 on failure */
92output(enum output_type type,
93       struct sockaddr_in *dst,		/* send to here */
94       struct interface *ifp,
95       struct rip *buf,
96       int size)			/* this many bytes */
97{
98	struct sockaddr_in osin;
99	int flags;
100	const char *msg;
101	int res;
102	int soc;
103	int serrno;
104
105	assert(ifp != NULL);
106	osin = *dst;
107	if (osin.sin_port == 0)
108		osin.sin_port = htons(RIP_PORT);
109#ifdef _HAVE_SIN_LEN
110	if (osin.sin_len == 0)
111		osin.sin_len = sizeof(osin);
112#endif
113
114	soc = rip_sock;
115	flags = 0;
116
117	switch (type) {
118	case OUT_QUERY:
119		msg = "Answer Query";
120		if (soc < 0)
121			soc = ifp->int_rip_sock;
122		break;
123	case OUT_UNICAST:
124		msg = "Send";
125		if (soc < 0)
126			soc = ifp->int_rip_sock;
127		flags = MSG_DONTROUTE;
128		break;
129	case OUT_BROADCAST:
130		if (ifp->int_if_flags & IFF_POINTOPOINT) {
131			msg = "Send";
132		} else {
133			msg = "Send bcast";
134		}
135		flags = MSG_DONTROUTE;
136		break;
137	case OUT_MULTICAST:
138		if ((ifp->int_if_flags & (IFF_POINTOPOINT|IFF_MULTICAST)) ==
139		    IFF_POINTOPOINT) {
140			msg = "Send pt-to-pt";
141		} else if (ifp->int_state & IS_DUP) {
142			trace_act("abort multicast output via %s"
143				  " with duplicate address",
144				  ifp->int_name);
145			return 0;
146		} else {
147			msg = "Send mcast";
148			if (rip_sock_mcast != ifp) {
149				struct ip_mreqn mreqn;
150
151				memset(&mreqn, 0, sizeof(struct ip_mreqn));
152				mreqn.imr_ifindex = ifp->int_index;
153				if (0 > setsockopt(rip_sock,
154						   IPPROTO_IP,
155						   IP_MULTICAST_IF,
156						   &mreqn,
157						   sizeof(mreqn))) {
158					serrno = errno;
159					LOGERR("setsockopt(rip_sock, "
160					       "IP_MULTICAST_IF)");
161					errno = serrno;
162					ifp = NULL;
163					return -1;
164				}
165				rip_sock_mcast = ifp;
166			}
167			osin.sin_addr.s_addr = htonl(INADDR_RIP_GROUP);
168		}
169		break;
170
171	case NO_OUT_MULTICAST:
172	case NO_OUT_RIPV2:
173	default:
174#ifdef DEBUG
175		abort();
176#endif
177		return -1;
178	}
179
180	trace_rip(msg, "to", &osin, ifp, buf, size);
181
182	res = sendto(soc, buf, size, flags,
183		     (struct sockaddr *)&osin, sizeof(osin));
184	if (res < 0
185	    && (ifp == NULL || !(ifp->int_state & IS_BROKE))) {
186		serrno = errno;
187		msglog("%s sendto(%s%s%s.%d): %s", msg,
188		       ifp != NULL ? ifp->int_name : "",
189		       ifp != NULL ? ", " : "",
190		       inet_ntoa(osin.sin_addr),
191		       ntohs(osin.sin_port),
192		       strerror(errno));
193		errno = serrno;
194	}
195
196	return res;
197}
198
199
200/* Find the first key for a packet to send.
201 * Try for a key that is eligible and has not expired, but settle for
202 * the last key if they have all expired.
203 * If no key is ready yet, give up.
204 */
205struct auth *
206find_auth(struct interface *ifp)
207{
208	struct auth *ap, *res;
209	int i;
210
211
212	if (ifp == NULL)
213		return 0;
214
215	res = NULL;
216	ap = ifp->int_auth;
217	for (i = 0; i < MAX_AUTH_KEYS; i++, ap++) {
218		/* stop looking after the last key */
219		if (ap->type == RIP_AUTH_NONE)
220			break;
221
222		/* ignore keys that are not ready yet */
223		if ((u_long)ap->start > (u_long)clk.tv_sec)
224			continue;
225
226		if ((u_long)ap->end < (u_long)clk.tv_sec) {
227			/* note best expired password as a fall-back */
228			if (res == NULL || (u_long)ap->end > (u_long)res->end)
229				res = ap;
230			continue;
231		}
232
233		/* note key with the best future */
234		if (res == NULL || (u_long)res->end < (u_long)ap->end)
235			res = ap;
236	}
237	return res;
238}
239
240
241void
242clr_ws_buf(struct ws_buf *wb,
243	   struct auth *ap)
244{
245	struct netauth *na;
246
247	wb->lim = wb->base + NETS_LEN;
248	wb->n = wb->base;
249	memset(wb->n, 0, NETS_LEN*sizeof(*wb->n));
250
251	/* (start to) install authentication if appropriate
252	 */
253	if (ap == NULL)
254		return;
255
256	na = (struct netauth*)wb->n;
257	if (ap->type == RIP_AUTH_PW) {
258		na->a_family = RIP_AF_AUTH;
259		na->a_type = RIP_AUTH_PW;
260		memcpy(na->au.au_pw, ap->key, sizeof(na->au.au_pw));
261		wb->n++;
262
263	} else if (ap->type ==  RIP_AUTH_MD5) {
264		na->a_family = RIP_AF_AUTH;
265		na->a_type = RIP_AUTH_MD5;
266		na->au.a_md5.md5_keyid = ap->keyid;
267		na->au.a_md5.md5_auth_len = RIP_AUTH_MD5_KEY_LEN;
268		na->au.a_md5.md5_seqno = htonl(clk.tv_sec);
269		wb->n++;
270		wb->lim--;		/* make room for trailer */
271	}
272}
273
274
275void
276end_md5_auth(struct ws_buf *wb,
277	     struct auth *ap)
278{
279	struct netauth *na, *na2;
280	MD5_CTX md5_ctx;
281	int len;
282
283
284	na = (struct netauth*)wb->base;
285	na2 = (struct netauth*)wb->n;
286	len = (char *)na2-(char *)wb->buf;
287	na2->a_family = RIP_AF_AUTH;
288	na2->a_type = htons(1);
289	na->au.a_md5.md5_pkt_len = htons(len);
290	MD5Init(&md5_ctx);
291	MD5Update(&md5_ctx, (u_char *)wb->buf, len + RIP_AUTH_MD5_HASH_XTRA);
292	MD5Update(&md5_ctx, ap->key, RIP_AUTH_MD5_KEY_LEN);
293	MD5Final(na2->au.au_pw, &md5_ctx);
294	wb->n++;
295}
296
297
298/* Send the buffer
299 */
300static void
301supply_write(struct ws_buf *wb)
302{
303	/* Output multicast only if legal.
304	 * If we would multicast and it would be illegal, then discard the
305	 * packet.
306	 */
307	switch (wb->type) {
308	case NO_OUT_MULTICAST:
309		trace_pkt("skip multicast to %s because impossible",
310			  naddr_ntoa(ws.to.sin_addr.s_addr));
311		break;
312	case NO_OUT_RIPV2:
313		break;
314	default:
315		if (ws.a != NULL && ws.a->type == RIP_AUTH_MD5)
316			end_md5_auth(wb,ws.a);
317		if (output(wb->type, &ws.to, ws.ifp, wb->buf,
318			   ((char *)wb->n - (char*)wb->buf)) < 0
319		    && ws.ifp != NULL)
320			if_sick(ws.ifp);
321		ws.npackets++;
322		break;
323	}
324
325	clr_ws_buf(wb,ws.a);
326}
327
328
329/* put an entry into the packet
330 */
331static void
332supply_out(struct ag_info *ag)
333{
334	int i;
335	naddr mask, v1_mask, dst_h, ddst_h = 0;
336	struct ws_buf *wb;
337
338
339	/* Skip this route if doing a flash update and it and the routes
340	 * it aggregates have not changed recently.
341	 */
342	if (ag->ag_seqno < update_seqno
343	    && (ws.state & WS_ST_FLASH))
344		return;
345
346	dst_h = ag->ag_dst_h;
347	mask = ag->ag_mask;
348	v1_mask = ripv1_mask_host(htonl(dst_h),
349				  (ws.state & WS_ST_TO_ON_NET) ? ws.ifp : 0);
350	i = 0;
351
352	/* If we are sending RIPv2 packets that cannot (or must not) be
353	 * heard by RIPv1 listeners, do not worry about sub- or supernets.
354	 * Subnets (from other networks) can only be sent via multicast.
355	 * A pair of subnet routes might have been promoted so that they
356	 * are legal to send by RIPv1.
357	 * If RIPv1 is off, use the multicast buffer.
358	 */
359	if ((ws.state & WS_ST_RIP2_ALL)
360	    || ((ag->ag_state & AGS_RIPV2) && v1_mask != mask)) {
361		/* use the RIPv2-only buffer */
362		wb = &v2buf;
363
364	} else {
365		/* use the RIPv1-or-RIPv2 buffer */
366		wb = &v12buf;
367
368		/* Convert supernet route into corresponding set of network
369		 * routes for RIPv1, but leave non-contiguous netmasks
370		 * to ag_check().
371		 */
372		if (v1_mask > mask
373		    && mask + (mask & -mask) == 0) {
374			ddst_h = v1_mask & -v1_mask;
375			i = (v1_mask & ~mask)/ddst_h;
376
377			if (i > ws.gen_limit) {
378				/* Punt if we would have to generate an
379				 * unreasonable number of routes.
380				 */
381				if (TRACECONTENTS)
382					trace_misc("sending %s-->%s as 1"
383						   " instead of %d routes",
384						   addrname(htonl(dst_h), mask,
385							1),
386						   naddr_ntoa(ws.to.sin_addr
387							.s_addr),
388						   i+1);
389				i = 0;
390
391			} else {
392				mask = v1_mask;
393				ws.gen_limit -= i;
394			}
395		}
396	}
397
398	do {
399		wb->n->n_family = RIP_AF_INET;
400		wb->n->n_dst = htonl(dst_h);
401		/* If the route is from router-discovery or we are
402		 * shutting down, admit only a bad metric.
403		 */
404		wb->n->n_metric = ((stopint || ag->ag_metric < 1)
405				   ? HOPCNT_INFINITY
406				   : ag->ag_metric);
407		wb->n->n_metric = htonl(wb->n->n_metric);
408		/* Any non-zero bits in the supposedly unused RIPv1 fields
409		 * cause the old `routed` to ignore the route.
410		 * That means the mask and so forth cannot be sent
411		 * in the hybrid RIPv1/RIPv2 mode.
412		 */
413		if (ws.state & WS_ST_RIP2_ALL) {
414			if (ag->ag_nhop != 0
415			    && ((ws.state & WS_ST_QUERY)
416				|| (ag->ag_nhop != ws.ifp->int_addr
417				    && on_net(ag->ag_nhop,
418					      ws.ifp->int_net,
419					      ws.ifp->int_mask))))
420				wb->n->n_nhop = ag->ag_nhop;
421			wb->n->n_mask = htonl(mask);
422			wb->n->n_tag = ag->ag_tag;
423		}
424		dst_h += ddst_h;
425
426		if (++wb->n >= wb->lim)
427			supply_write(wb);
428	} while (i-- != 0);
429}
430
431
432/* supply one route from the table
433 */
434/* ARGSUSED */
435static int
436walk_supply(struct radix_node *rn,
437	    struct walkarg *argp UNUSED)
438{
439#define RT ((struct rt_entry *)rn)
440	u_short ags;
441	char metric, pref;
442	naddr dst, nhop;
443	struct rt_spare *rts;
444	int i;
445
446
447	/* Do not advertise external remote interfaces or passive interfaces.
448	 */
449	if ((RT->rt_state & RS_IF)
450	    && RT->rt_ifp != 0
451	    && (RT->rt_ifp->int_state & IS_PASSIVE)
452	    && !(RT->rt_state & RS_MHOME))
453		return 0;
454
455	/* If being quiet about our ability to forward, then
456	 * do not say anything unless responding to a query,
457	 * except about our main interface.
458	 */
459	if (!supplier && !(ws.state & WS_ST_QUERY)
460	    && !(RT->rt_state & RS_MHOME))
461		return 0;
462
463	dst = RT->rt_dst;
464
465	/* do not collide with the fake default route */
466	if (dst == RIP_DEFAULT
467	    && (ws.state & WS_ST_DEFAULT))
468		return 0;
469
470	if (RT->rt_state & RS_NET_SYN) {
471		if (RT->rt_state & RS_NET_INT) {
472			/* Do not send manual synthetic network routes
473			 * into the subnet.
474			 */
475			if (on_net(ws.to.sin_addr.s_addr,
476				   ntohl(dst), RT->rt_mask))
477				return 0;
478
479		} else {
480			/* Do not send automatic synthetic network routes
481			 * if they are not needed because no RIPv1 listeners
482			 * can hear them.
483			 */
484			if (ws.state & WS_ST_RIP2_ALL)
485				return 0;
486
487			/* Do not send automatic synthetic network routes to
488			 * the real subnet.
489			 */
490			if (on_net(ws.to.sin_addr.s_addr,
491				   ntohl(dst), RT->rt_mask))
492				return 0;
493		}
494		nhop = 0;
495
496	} else {
497		/* Advertise the next hop if this is not a route for one
498		 * of our interfaces and the next hop is on the same
499		 * network as the target.
500		 * The final determination is made by supply_out().
501		 */
502		if (!(RT->rt_state & RS_IF)
503		    && RT->rt_gate != myaddr
504		    && RT->rt_gate != loopaddr)
505			nhop = RT->rt_gate;
506		else
507			nhop = 0;
508	}
509
510	metric = RT->rt_metric;
511	ags = 0;
512
513	if (RT->rt_state & RS_MHOME) {
514		/* retain host route of multi-homed servers */
515		;
516
517	} else if (RT_ISHOST(RT)) {
518		/* We should always suppress (into existing network routes)
519		 * the host routes for the local end of our point-to-point
520		 * links.
521		 * If we are suppressing host routes in general, then do so.
522		 * Avoid advertising host routes onto their own network,
523		 * where they should be handled by proxy-ARP.
524		 */
525		if ((RT->rt_state & RS_LOCAL)
526		    || ridhosts
527		    || on_net(dst, ws.to_net, ws.to_mask))
528			ags |= AGS_SUPPRESS;
529
530		/* Aggregate stray host routes into network routes if allowed.
531		 * We cannot aggregate host routes into small network routes
532		 * without confusing RIPv1 listeners into thinking the
533		 * network routes are host routes.
534		 */
535		if ((ws.state & WS_ST_AG) && (ws.state & WS_ST_RIP2_ALL))
536			ags |= AGS_AGGREGATE;
537
538	} else {
539		/* Always suppress network routes into other, existing
540		 * network routes
541		 */
542		ags |= AGS_SUPPRESS;
543
544		/* Generate supernets if allowed.
545		 * If we can be heard by RIPv1 systems, we will
546		 * later convert back to ordinary nets.
547		 * This unifies dealing with received supernets.
548		 */
549		if ((ws.state & WS_ST_AG)
550		    && ((RT->rt_state & RS_SUBNET)
551			|| (ws.state & WS_ST_SUPER_AG)))
552			ags |= AGS_AGGREGATE;
553	}
554
555	/* Do not send RIPv1 advertisements of subnets to other
556	 * networks. If possible, multicast them by RIPv2.
557	 */
558	if ((RT->rt_state & RS_SUBNET)
559	    && !(ws.state & WS_ST_RIP2_ALL)
560	    && !on_net(dst, ws.to_std_net, ws.to_std_mask))
561		ags |= AGS_RIPV2 | AGS_AGGREGATE;
562
563
564	/* Do not send a route back to where it came from, except in
565	 * response to a query.  This is "split-horizon".  That means not
566	 * advertising back to the same network	and so via the same interface.
567	 *
568	 * We want to suppress routes that might have been fragmented
569	 * from this route by a RIPv1 router and sent back to us, and so we
570	 * cannot forget this route here.  Let the split-horizon route
571	 * suppress the fragmented routes and then itself be forgotten.
572	 *
573	 * Include the routes for both ends of point-to-point interfaces
574	 * among those suppressed by split-horizon, since the other side
575	 * should knows them as well as we do.
576	 *
577	 * Notice spare routes with the same metric that we are about to
578	 * advertise, to split the horizon on redundant, inactive paths.
579	 *
580	 * Do not suppress advertisements of interface-related addresses on
581	 * non-point-to-point interfaces.  This ensures that we have something
582	 * to say every 30 seconds to help detect broken Ethernets or
583	 * other interfaces where one packet every 30 seconds costs nothing.
584	 */
585	if (ws.ifp != NULL
586	    && !(ws.state & WS_ST_QUERY)
587	    && (ws.state & WS_ST_TO_ON_NET)
588	    && (!(RT->rt_state & RS_IF)
589		|| ws.ifp->int_if_flags & IFF_POINTOPOINT)) {
590		for (rts = RT->rt_spares, i = NUM_SPARES; i != 0; i--, rts++) {
591			if (rts->rts_metric > metric
592			    || rts->rts_ifp != ws.ifp)
593				continue;
594
595			/* If we do not mark the route with AGS_SPLIT_HZ here,
596			 * it will be poisoned-reverse, or advertised back
597			 * toward its source with an infinite metric.
598			 * If we have recently advertised the route with a
599			 * better metric than we now have, then we should
600			 * poison-reverse the route before suppressing it for
601			 * split-horizon.
602			 *
603			 * In almost all cases, if there is no spare for the
604			 * route then it is either old and dead or a brand
605			 * new route. If it is brand new, there is no need
606			 * for poison-reverse. If it is old and dead, it
607			 * is already poisoned.
608			 */
609			if (RT->rt_poison_time < now_expire
610			    || RT->rt_poison_metric >= metric
611			    || RT->rt_spares[1].rts_gate == 0) {
612				ags |= AGS_SPLIT_HZ;
613				ags &= ~AGS_SUPPRESS;
614			}
615			metric = HOPCNT_INFINITY;
616			break;
617		}
618	}
619
620	/* Keep track of the best metric with which the
621	 * route has been advertised recently.
622	 */
623	if (RT->rt_poison_metric >= metric
624	    || RT->rt_poison_time < now_expire) {
625		RT->rt_poison_time = now.tv_sec;
626		RT->rt_poison_metric = metric;
627	}
628
629	/* Adjust the outgoing metric by the cost of the link.
630	 * Avoid aggregation when a route is counting to infinity.
631	 */
632	pref = RT->rt_poison_metric + ws.metric;
633	metric += ws.metric;
634
635	/* Do not advertise stable routes that will be ignored,
636	 * unless we are answering a query.
637	 * If the route recently was advertised with a metric that
638	 * would have been less than infinity through this interface,
639	 * we need to continue to advertise it in order to poison it.
640	 */
641	if (metric >= HOPCNT_INFINITY) {
642		if (!(ws.state & WS_ST_QUERY)
643		    && (pref >= HOPCNT_INFINITY
644			|| RT->rt_poison_time < now_garbage))
645			return 0;
646
647		metric = HOPCNT_INFINITY;
648	}
649
650	ag_check(dst, RT->rt_mask, 0, nhop, metric, pref,
651		 RT->rt_seqno, RT->rt_tag, ags, supply_out);
652	return 0;
653#undef RT
654}
655
656
657/* Supply dst with the contents of the routing tables.
658 * If this won't fit in one packet, chop it up into several.
659 */
660void
661supply(struct sockaddr_in *dst,
662       struct interface *ifp,		/* output interface */
663       enum output_type type,
664       int flash,			/* 1=flash update */
665       int vers,			/* RIP version */
666       int passwd_ok)			/* OK to include cleartext password */
667{
668	struct rt_entry *rt;
669	int def_metric;
670
671	ws.state = 0;
672	ws.gen_limit = 1024;
673
674	ws.to = *dst;
675	ws.to_std_mask = std_mask(ws.to.sin_addr.s_addr);
676	ws.to_std_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_std_mask;
677
678	if (ifp != NULL) {
679		ws.to_mask = ifp->int_mask;
680		ws.to_net = ifp->int_net;
681		if (on_net(ws.to.sin_addr.s_addr, ws.to_net, ws.to_mask))
682			ws.state |= WS_ST_TO_ON_NET;
683
684	} else {
685		ws.to_mask = ripv1_mask_net(ws.to.sin_addr.s_addr, 0);
686		ws.to_net = ntohl(ws.to.sin_addr.s_addr) & ws.to_mask;
687		rt = rtfind(dst->sin_addr.s_addr);
688		if (rt)
689			ifp = rt->rt_ifp;
690	}
691
692	ws.npackets = 0;
693	if (flash)
694		ws.state |= WS_ST_FLASH;
695
696	if ((ws.ifp = ifp) == NULL) {
697		ws.metric = 1;
698	} else {
699		/* Adjust the advertised metric by the outgoing interface
700		 * metric.
701		 */
702		ws.metric = ifp->int_metric + 1 + ifp->int_adj_outmetric;
703	}
704
705	ripv12_buf.rip.rip_vers = vers;
706
707	switch (type) {
708	case OUT_MULTICAST:
709		if (ifp != NULL && ifp->int_if_flags & IFF_MULTICAST)
710			v2buf.type = OUT_MULTICAST;
711		else
712			v2buf.type = NO_OUT_MULTICAST;
713		v12buf.type = OUT_BROADCAST;
714		break;
715
716	case OUT_QUERY:
717		ws.state |= WS_ST_QUERY;
718		/* FALLTHROUGH */
719	case OUT_BROADCAST:
720	case OUT_UNICAST:
721		v2buf.type = (vers == RIPv2) ? type : NO_OUT_RIPV2;
722		v12buf.type = type;
723		break;
724
725	case NO_OUT_MULTICAST:
726	case NO_OUT_RIPV2:
727		break;			/* no output */
728	}
729
730	if (vers == RIPv2) {
731		/* full RIPv2 only if cannot be heard by RIPv1 listeners */
732		if (type != OUT_BROADCAST)
733			ws.state |= WS_ST_RIP2_ALL;
734		if ((ws.state & WS_ST_QUERY)
735		    || !(ws.state & WS_ST_TO_ON_NET)) {
736			ws.state |= (WS_ST_AG | WS_ST_SUPER_AG);
737		} else if (ifp == NULL || !(ifp->int_state & IS_NO_AG)) {
738			ws.state |= WS_ST_AG;
739			if (type != OUT_BROADCAST
740			    && (ifp == NULL
741				|| !(ifp->int_state & IS_NO_SUPER_AG)))
742				ws.state |= WS_ST_SUPER_AG;
743		}
744	}
745
746	ws.a = (vers == RIPv2) ? find_auth(ifp) : 0;
747	if (!passwd_ok && ws.a != NULL && ws.a->type == RIP_AUTH_PW)
748		ws.a = NULL;
749	clr_ws_buf(&v12buf,ws.a);
750	clr_ws_buf(&v2buf,ws.a);
751
752	/*  Fake a default route if asked and if there is not already
753	 * a better, real default route.
754	 */
755	if (supplier && ifp && (def_metric = ifp->int_d_metric) != 0) {
756		if ((rt = rtget(RIP_DEFAULT, 0)) == NULL
757		    || rt->rt_metric+ws.metric >= def_metric) {
758			ws.state |= WS_ST_DEFAULT;
759			ag_check(0, 0, 0, 0, def_metric, def_metric,
760				 0, 0, 0, supply_out);
761		} else {
762			def_metric = rt->rt_metric+ws.metric;
763		}
764
765		/* If both RIPv2 and the poor-man's router discovery
766		 * kludge are on, arrange to advertise an extra
767		 * default route via RIPv1.
768		 */
769		if ((ws.state & WS_ST_RIP2_ALL)
770		    && (ifp->int_state & IS_PM_RDISC)) {
771			ripv12_buf.rip.rip_vers = RIPv1;
772			v12buf.n->n_family = RIP_AF_INET;
773			v12buf.n->n_dst = htonl(RIP_DEFAULT);
774			v12buf.n->n_metric = htonl(def_metric);
775			v12buf.n++;
776		}
777	}
778
779	(void)rn_walktree(rhead, walk_supply, 0);
780	ag_flush(0,0,supply_out);
781
782	/* Flush the packet buffers, provided they are not empty and
783	 * do not contain only the password.
784	 */
785	if (v12buf.n != v12buf.base
786	    && (v12buf.n > v12buf.base+1
787		|| v12buf.base->n_family != RIP_AF_AUTH))
788		supply_write(&v12buf);
789	if (v2buf.n != v2buf.base
790	    && (v2buf.n > v2buf.base+1
791		|| v2buf.base->n_family != RIP_AF_AUTH))
792		supply_write(&v2buf);
793
794	/* If we sent nothing and this is an answer to a query, send
795	 * an empty buffer.
796	 */
797	if (ws.npackets == 0
798	    && (ws.state & WS_ST_QUERY))
799		supply_write(&v12buf);
800}
801
802
803/* send all of the routing table or just do a flash update
804 */
805void
806rip_bcast(int flash)
807{
808#ifdef _HAVE_SIN_LEN
809	static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
810#else
811	static struct sockaddr_in dst = {AF_INET};
812#endif
813	struct interface *ifp;
814	enum output_type type;
815	int vers;
816	struct timeval rtime;
817
818
819	need_flash = 0;
820	intvl_random(&rtime, MIN_WAITTIME, MAX_WAITTIME);
821	no_flash = rtime;
822	timevaladd(&no_flash, &now);
823
824	if (rip_sock < 0)
825		return;
826
827	trace_act("send %s and inhibit dynamic updates for %.3f sec",
828		  flash ? "dynamic update" : "all routes",
829		  rtime.tv_sec + ((float)rtime.tv_usec)/1000000.0);
830
831	LIST_FOREACH(ifp, &ifnet, int_list) {
832		/* Skip interfaces not doing RIP.
833		 * Do try broken interfaces to see if they have healed.
834		 */
835		if (IS_RIP_OUT_OFF(ifp->int_state))
836			continue;
837
838		/* skip turned off interfaces */
839		if (!iff_up(ifp->int_if_flags))
840			continue;
841
842		vers = (ifp->int_state & IS_NO_RIPV1_OUT) ? RIPv2 : RIPv1;
843
844		if (ifp->int_if_flags & IFF_BROADCAST) {
845			/* ordinary, hardware interface */
846			dst.sin_addr.s_addr = ifp->int_brdaddr;
847
848			if (vers == RIPv2
849			    && !(ifp->int_state  & IS_NO_RIP_MCAST)) {
850				type = OUT_MULTICAST;
851			} else {
852				type = OUT_BROADCAST;
853			}
854
855		} else if (ifp->int_if_flags & IFF_POINTOPOINT) {
856			/* point-to-point hardware interface */
857			dst.sin_addr.s_addr = ifp->int_dstaddr;
858			if (vers == RIPv2 &&
859			    ifp->int_if_flags & IFF_MULTICAST &&
860			    !(ifp->int_state  & IS_NO_RIP_MCAST)) {
861				type = OUT_MULTICAST;
862			} else {
863				type = OUT_UNICAST;
864			}
865
866		} else if (ifp->int_state & IS_REMOTE) {
867			/* remote interface */
868			dst.sin_addr.s_addr = ifp->int_addr;
869			type = OUT_UNICAST;
870
871		} else {
872			/* ATM, HIPPI, etc. */
873			continue;
874		}
875
876		supply(&dst, ifp, type, flash, vers, 1);
877	}
878
879	update_seqno++;			/* all routes are up to date */
880}
881
882
883/* Ask for routes
884 * Do it only once to an interface, and not even after the interface
885 * was broken and recovered.
886 */
887void
888rip_query(void)
889{
890#ifdef _HAVE_SIN_LEN
891	static struct sockaddr_in dst = {sizeof(dst), AF_INET, 0, {0}, {0}};
892#else
893	static struct sockaddr_in dst = {AF_INET};
894#endif
895	struct interface *ifp;
896	struct rip buf;
897	enum output_type type;
898
899
900	if (rip_sock < 0)
901		return;
902
903	memset(&buf, 0, sizeof(buf));
904
905	LIST_FOREACH(ifp, &ifnet, int_list) {
906		/* Skip interfaces those already queried.
907		 * Do not ask via interfaces through which we don't
908		 * accept input.  Do not ask via interfaces that cannot
909		 * send RIP packets.
910		 * Do try broken interfaces to see if they have healed.
911		 */
912		if (IS_RIP_IN_OFF(ifp->int_state)
913		    || ifp->int_query_time != NEVER)
914			continue;
915
916		/* skip turned off interfaces */
917		if (!iff_up(ifp->int_if_flags))
918			continue;
919
920		buf.rip_vers = (ifp->int_state&IS_NO_RIPV1_OUT) ? RIPv2:RIPv1;
921		buf.rip_cmd = RIPCMD_REQUEST;
922		buf.rip_nets[0].n_family = RIP_AF_UNSPEC;
923		buf.rip_nets[0].n_metric = htonl(HOPCNT_INFINITY);
924
925		/* Send a RIPv1 query only if allowed and if we will
926		 * listen to RIPv1 routers.
927		 */
928		if ((ifp->int_state & IS_NO_RIPV1_OUT)
929		    || (ifp->int_state & IS_NO_RIPV1_IN)) {
930			buf.rip_vers = RIPv2;
931		} else {
932			buf.rip_vers = RIPv1;
933		}
934
935		if (ifp->int_if_flags & IFF_BROADCAST) {
936			/* ordinary, hardware interface */
937			dst.sin_addr.s_addr = ifp->int_brdaddr;
938
939			/* Broadcast RIPv1 queries and RIPv2 queries
940			 * when the hardware cannot multicast.
941			 */
942			if (buf.rip_vers == RIPv2
943			    && (ifp->int_if_flags & IFF_MULTICAST)
944			    && !(ifp->int_state  & IS_NO_RIP_MCAST)) {
945				type = OUT_MULTICAST;
946			} else {
947				type = OUT_BROADCAST;
948			}
949
950		} else if (ifp->int_if_flags & IFF_POINTOPOINT) {
951			/* point-to-point hardware interface */
952			dst.sin_addr.s_addr = ifp->int_dstaddr;
953			type = OUT_UNICAST;
954
955		} else if (ifp->int_state & IS_REMOTE) {
956			/* remote interface */
957			dst.sin_addr.s_addr = ifp->int_addr;
958			type = OUT_UNICAST;
959
960		} else {
961			/* ATM, HIPPI, etc. */
962			continue;
963		}
964
965		ifp->int_query_time = now.tv_sec+SUPPLY_INTERVAL;
966		if (output(type, &dst, ifp, &buf, sizeof(buf)) < 0)
967			if_sick(ifp);
968	}
969}
970