1/*	$OpenBSD: rde.c,v 1.91 2023/03/08 04:43:14 guenther Exp $ */
2
3/*
4 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5 * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <sys/types.h>
22#include <sys/socket.h>
23#include <sys/queue.h>
24#include <net/if_types.h>
25#include <netinet/in.h>
26#include <arpa/inet.h>
27#include <err.h>
28#include <errno.h>
29#include <stdlib.h>
30#include <signal.h>
31#include <string.h>
32#include <pwd.h>
33#include <unistd.h>
34#include <event.h>
35
36#include "ospf6.h"
37#include "ospf6d.h"
38#include "ospfe.h"
39#include "log.h"
40#include "rde.h"
41
42#define MINIMUM(a, b)	(((a) < (b)) ? (a) : (b))
43
44void		 rde_sig_handler(int sig, short, void *);
45__dead void	 rde_shutdown(void);
46void		 rde_dispatch_imsg(int, short, void *);
47void		 rde_dispatch_parent(int, short, void *);
48void		 rde_dump_area(struct area *, int, pid_t);
49
50void		 rde_send_summary(pid_t);
51void		 rde_send_summary_area(struct area *, pid_t);
52void		 rde_nbr_init(u_int32_t);
53void		 rde_nbr_free(void);
54struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
55void		 rde_nbr_del(struct rde_nbr *);
56
57void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
58int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
59void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
60void		 rde_req_list_free(struct rde_nbr *);
61
62struct iface	*rde_asext_lookup(struct in6_addr, int);
63void		 rde_asext_get(struct kroute *);
64void		 rde_asext_put(struct kroute *);
65
66int		 comp_asext(struct lsa *, struct lsa *);
67struct lsa	*orig_asext_lsa(struct kroute *, u_int16_t);
68struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
69struct lsa	*orig_intra_lsa_net(struct area *, struct iface *,
70		 struct vertex *);
71struct lsa	*orig_intra_lsa_rtr(struct area *, struct vertex *);
72void		 append_prefix_lsa(struct lsa **, u_int16_t *,
73		    struct lsa_prefix *);
74
75/* A 32-bit value != any ifindex.
76 * We assume ifindex is bound by [1, USHRT_MAX] inclusive. */
77#define	LS_ID_INTRA_RTR	0x01000000
78
79/* Tree of prefixes with global scope on given a link,
80 * see orig_intra_lsa_*() */
81struct prefix_node {
82	RB_ENTRY(prefix_node)	 entry;
83	struct lsa_prefix	*prefix;
84};
85RB_HEAD(prefix_tree, prefix_node);
86RB_PROTOTYPE(prefix_tree, prefix_node, entry, prefix_compare);
87int		 prefix_compare(struct prefix_node *, struct prefix_node *);
88void		 prefix_tree_add(struct prefix_tree *, struct lsa_link *);
89
90struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
91static struct imsgev	*iev_ospfe;
92static struct imsgev	*iev_main;
93struct rde_nbr		*nbrself;
94struct lsa_tree		 asext_tree;
95
96void
97rde_sig_handler(int sig, short event, void *arg)
98{
99	/*
100	 * signal handler rules don't apply, libevent decouples for us
101	 */
102
103	switch (sig) {
104	case SIGINT:
105	case SIGTERM:
106		rde_shutdown();
107		/* NOTREACHED */
108	default:
109		fatalx("unexpected signal");
110	}
111}
112
113/* route decision engine */
114pid_t
115rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
116    int pipe_parent2ospfe[2])
117{
118	struct event		 ev_sigint, ev_sigterm;
119	struct timeval		 now;
120	struct passwd		*pw;
121	pid_t			 pid;
122
123	switch (pid = fork()) {
124	case -1:
125		fatal("cannot fork");
126		/* NOTREACHED */
127	case 0:
128		break;
129	default:
130		return (pid);
131	}
132
133	rdeconf = xconf;
134
135	if ((pw = getpwnam(OSPF6D_USER)) == NULL)
136		fatal("getpwnam");
137
138	if (chroot(pw->pw_dir) == -1)
139		fatal("chroot");
140	if (chdir("/") == -1)
141		fatal("chdir(\"/\")");
142
143	setproctitle("route decision engine");
144	/*
145	 * XXX needed with fork+exec
146	 * log_init(debug, LOG_DAEMON);
147	 * log_setverbose(verbose);
148	 */
149
150	ospfd_process = PROC_RDE_ENGINE;
151	log_procinit(log_procnames[ospfd_process]);
152
153	if (setgroups(1, &pw->pw_gid) ||
154	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
155	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
156		fatal("can't drop privileges");
157
158	if (pledge("stdio", NULL) == -1)
159		fatal("pledge");
160
161	event_init();
162	rde_nbr_init(NBR_HASHSIZE);
163	lsa_init(&asext_tree);
164
165	/* setup signal handler */
166	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
167	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
168	signal_add(&ev_sigint, NULL);
169	signal_add(&ev_sigterm, NULL);
170	signal(SIGPIPE, SIG_IGN);
171	signal(SIGHUP, SIG_IGN);
172
173	/* setup pipes */
174	close(pipe_ospfe2rde[0]);
175	close(pipe_parent2rde[0]);
176	close(pipe_parent2ospfe[0]);
177	close(pipe_parent2ospfe[1]);
178
179	if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL ||
180	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
181		fatal(NULL);
182	imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]);
183	iev_ospfe->handler = rde_dispatch_imsg;
184	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
185	iev_main->handler = rde_dispatch_parent;
186
187	/* setup event handler */
188	iev_ospfe->events = EV_READ;
189	event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events,
190	    iev_ospfe->handler, iev_ospfe);
191	event_add(&iev_ospfe->ev, NULL);
192
193	iev_main->events = EV_READ;
194	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
195	    iev_main->handler, iev_main);
196	event_add(&iev_main->ev, NULL);
197
198	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
199	cand_list_init();
200	rt_init();
201
202	/* remove unneeded stuff from config */
203	conf_clear_redist_list(&rdeconf->redist_list);
204
205	gettimeofday(&now, NULL);
206	rdeconf->uptime = now.tv_sec;
207
208	event_dispatch();
209
210	rde_shutdown();
211	/* NOTREACHED */
212
213	return (0);
214}
215
216__dead void
217rde_shutdown(void)
218{
219	struct area	*a;
220	struct vertex	*v, *nv;
221
222	/* close pipes */
223	msgbuf_clear(&iev_ospfe->ibuf.w);
224	close(iev_ospfe->ibuf.fd);
225	msgbuf_clear(&iev_main->ibuf.w);
226	close(iev_main->ibuf.fd);
227
228	stop_spf_timer(rdeconf);
229	cand_list_clr();
230	rt_clear();
231
232	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
233		LIST_REMOVE(a, entry);
234		area_del(a);
235	}
236	for (v = RB_MIN(lsa_tree, &asext_tree); v != NULL; v = nv) {
237		nv = RB_NEXT(lsa_tree, &asext_tree, v);
238		vertex_free(v);
239	}
240	rde_nbr_free();
241
242	free(iev_ospfe);
243	free(iev_main);
244	free(rdeconf);
245
246	log_info("route decision engine exiting");
247	_exit(0);
248}
249
250int
251rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
252    u_int16_t datalen)
253{
254	return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1,
255	    data, datalen));
256}
257
258void
259rde_dispatch_imsg(int fd, short event, void *bula)
260{
261	struct imsgev		*iev = bula;
262	struct imsgbuf		*ibuf = &iev->ibuf;
263	struct imsg		 imsg;
264	struct in_addr		 aid;
265	struct ls_req_hdr	 req_hdr;
266	struct lsa_hdr		 lsa_hdr, *db_hdr;
267	struct rde_nbr		 rn, *nbr;
268	struct timespec		 tp;
269	struct lsa		*lsa;
270	struct area		*area;
271	struct vertex		*v;
272	char			*buf;
273	ssize_t			 n;
274	time_t			 now;
275	int			 r, state, self, shut = 0, verbose;
276	u_int16_t		 l;
277
278	if (event & EV_READ) {
279		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
280			fatal("imsg_read error");
281		if (n == 0)	/* connection closed */
282			shut = 1;
283	}
284	if (event & EV_WRITE) {
285		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
286			fatal("msgbuf_write");
287		if (n == 0)	/* connection closed */
288			shut = 1;
289	}
290
291	clock_gettime(CLOCK_MONOTONIC, &tp);
292	now = tp.tv_sec;
293
294	for (;;) {
295		if ((n = imsg_get(ibuf, &imsg)) == -1)
296			fatal("rde_dispatch_imsg: imsg_get error");
297		if (n == 0)
298			break;
299
300		switch (imsg.hdr.type) {
301		case IMSG_NEIGHBOR_UP:
302			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
303				fatalx("invalid size of OE request");
304			memcpy(&rn, imsg.data, sizeof(rn));
305
306			if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL)
307				fatalx("rde_dispatch_imsg: "
308				    "neighbor already exists");
309			break;
310		case IMSG_NEIGHBOR_DOWN:
311			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
312			break;
313		case IMSG_NEIGHBOR_CHANGE:
314			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
315				fatalx("invalid size of OE request");
316			memcpy(&state, imsg.data, sizeof(state));
317
318			nbr = rde_nbr_find(imsg.hdr.peerid);
319			if (nbr == NULL)
320				break;
321
322			if (state != nbr->state &&
323			    (nbr->state & NBR_STA_FULL ||
324			    state & NBR_STA_FULL)) {
325				nbr->state = state;
326				area_track(nbr->area);
327				orig_intra_area_prefix_lsas(nbr->area);
328			}
329
330			nbr->state = state;
331			if (nbr->state & NBR_STA_FULL)
332				rde_req_list_free(nbr);
333			break;
334		case IMSG_AREA_CHANGE:
335			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
336				fatalx("invalid size of OE request");
337
338			LIST_FOREACH(area, &rdeconf->area_list, entry) {
339				if (area->id.s_addr == imsg.hdr.peerid)
340					break;
341			}
342			if (area == NULL)
343				break;
344			memcpy(&state, imsg.data, sizeof(state));
345			area->active = state;
346			break;
347		case IMSG_DB_SNAPSHOT:
348			nbr = rde_nbr_find(imsg.hdr.peerid);
349			if (nbr == NULL)
350				break;
351
352			lsa_snap(nbr);
353
354			imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid,
355			    0, -1, NULL, 0);
356			break;
357		case IMSG_DD:
358			nbr = rde_nbr_find(imsg.hdr.peerid);
359			if (nbr == NULL)
360				break;
361
362			buf = imsg.data;
363			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
364			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
365				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
366				buf += sizeof(lsa_hdr);
367
368				v = lsa_find(nbr->iface, lsa_hdr.type,
369				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
370				if (v == NULL)
371					db_hdr = NULL;
372				else
373					db_hdr = &v->lsa->hdr;
374
375				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
376					/*
377					 * only request LSAs that are
378					 * newer or missing
379					 */
380					rde_req_list_add(nbr, &lsa_hdr);
381					imsg_compose_event(iev_ospfe, IMSG_DD,
382					    imsg.hdr.peerid, 0, -1, &lsa_hdr,
383					    sizeof(lsa_hdr));
384				}
385			}
386			if (l != 0)
387				log_warnx("rde_dispatch_imsg: peerid %u, "
388				    "trailing garbage in Database Description "
389				    "packet", imsg.hdr.peerid);
390
391			imsg_compose_event(iev_ospfe, IMSG_DD_END,
392			    imsg.hdr.peerid, 0, -1, NULL, 0);
393			break;
394		case IMSG_LS_REQ:
395			nbr = rde_nbr_find(imsg.hdr.peerid);
396			if (nbr == NULL)
397				break;
398
399			buf = imsg.data;
400			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
401			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
402				memcpy(&req_hdr, buf, sizeof(req_hdr));
403				buf += sizeof(req_hdr);
404
405				if ((v = lsa_find(nbr->iface,
406				    req_hdr.type, req_hdr.ls_id,
407				    req_hdr.adv_rtr)) == NULL) {
408					imsg_compose_event(iev_ospfe,
409					    IMSG_LS_BADREQ, imsg.hdr.peerid,
410					    0, -1, NULL, 0);
411					continue;
412				}
413				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
414				    imsg.hdr.peerid, 0, -1, v->lsa,
415				    ntohs(v->lsa->hdr.len));
416			}
417			if (l != 0)
418				log_warnx("rde_dispatch_imsg: peerid %u, "
419				    "trailing garbage in LS Request "
420				    "packet", imsg.hdr.peerid);
421			break;
422		case IMSG_LS_UPD:
423			nbr = rde_nbr_find(imsg.hdr.peerid);
424			if (nbr == NULL)
425				break;
426
427			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
428			if (lsa == NULL)
429				fatal(NULL);
430			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
431
432			if (!lsa_check(nbr, lsa,
433			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
434				free(lsa);
435				break;
436			}
437
438			v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
439			    lsa->hdr.adv_rtr);
440			if (v == NULL)
441				db_hdr = NULL;
442			else
443				db_hdr = &v->lsa->hdr;
444
445			if (nbr->self) {
446				lsa_merge(nbr, lsa, v);
447				/* lsa_merge frees the right lsa */
448				break;
449			}
450
451			r = lsa_newer(&lsa->hdr, db_hdr);
452			if (r > 0) {
453				/* new LSA newer than DB */
454				if (v && v->flooded &&
455				    v->changed + MIN_LS_ARRIVAL >= now) {
456					free(lsa);
457					break;
458				}
459
460				rde_req_list_del(nbr, &lsa->hdr);
461
462				if (!(self = lsa_self(nbr, lsa, v)))
463					if (lsa_add(nbr, lsa))
464						/* delayed lsa */
465						break;
466
467				/* flood and perhaps ack LSA */
468				imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD,
469				    imsg.hdr.peerid, 0, -1, lsa,
470				    ntohs(lsa->hdr.len));
471
472				/* reflood self originated LSA */
473				if (self && v)
474					imsg_compose_event(iev_ospfe,
475					    IMSG_LS_FLOOD, v->peerid, 0, -1,
476					    v->lsa, ntohs(v->lsa->hdr.len));
477				/* new LSA was not added so free it */
478				if (self)
479					free(lsa);
480			} else if (r < 0) {
481				/*
482				 * point 6 of "The Flooding Procedure"
483				 * We are violating the RFC here because
484				 * it does not make sense to reset a session
485				 * because an equal LSA is already in the table.
486				 * Only if the LSA sent is older than the one
487				 * in the table we should reset the session.
488				 */
489				if (rde_req_list_exists(nbr, &lsa->hdr)) {
490					imsg_compose_event(iev_ospfe,
491					    IMSG_LS_BADREQ, imsg.hdr.peerid,
492					    0, -1, NULL, 0);
493					free(lsa);
494					break;
495				}
496
497				/* lsa no longer needed */
498				free(lsa);
499
500				/* new LSA older than DB */
501				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
502				    ntohs(db_hdr->age) == MAX_AGE)
503					/* seq-num wrap */
504					break;
505
506				if (v->changed + MIN_LS_ARRIVAL >= now)
507					break;
508
509				/* directly send current LSA, no ack */
510				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
511				    imsg.hdr.peerid, 0, -1, v->lsa,
512				    ntohs(v->lsa->hdr.len));
513			} else {
514				/* LSA equal send direct ack */
515				imsg_compose_event(iev_ospfe, IMSG_LS_ACK,
516				    imsg.hdr.peerid, 0, -1, &lsa->hdr,
517				    sizeof(lsa->hdr));
518				free(lsa);
519			}
520			break;
521		case IMSG_LS_MAXAGE:
522			nbr = rde_nbr_find(imsg.hdr.peerid);
523			if (nbr == NULL)
524				break;
525
526			if (imsg.hdr.len != IMSG_HEADER_SIZE +
527			    sizeof(struct lsa_hdr))
528				fatalx("invalid size of OE request");
529			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
530
531			if (rde_nbr_loading(nbr->area))
532				break;
533
534			v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
535			    lsa_hdr.adv_rtr);
536			if (v == NULL)
537				db_hdr = NULL;
538			else
539				db_hdr = &v->lsa->hdr;
540
541			/*
542			 * only delete LSA if the one in the db is not newer
543			 */
544			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
545				lsa_del(nbr, &lsa_hdr);
546			break;
547		case IMSG_CTL_SHOW_DATABASE:
548		case IMSG_CTL_SHOW_DB_EXT:
549		case IMSG_CTL_SHOW_DB_LINK:
550		case IMSG_CTL_SHOW_DB_NET:
551		case IMSG_CTL_SHOW_DB_RTR:
552		case IMSG_CTL_SHOW_DB_INTRA:
553		case IMSG_CTL_SHOW_DB_SELF:
554		case IMSG_CTL_SHOW_DB_SUM:
555		case IMSG_CTL_SHOW_DB_ASBR:
556			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
557			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
558				log_warnx("rde_dispatch_imsg: wrong imsg len");
559				break;
560			}
561			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
562				LIST_FOREACH(area, &rdeconf->area_list, entry) {
563					rde_dump_area(area, imsg.hdr.type,
564					    imsg.hdr.pid);
565				}
566				lsa_dump(&asext_tree, imsg.hdr.type,
567				    imsg.hdr.pid);
568			} else {
569				memcpy(&aid, imsg.data, sizeof(aid));
570				if ((area = area_find(rdeconf, aid)) != NULL) {
571					rde_dump_area(area, imsg.hdr.type,
572					    imsg.hdr.pid);
573					if (!area->stub)
574						lsa_dump(&asext_tree,
575						    imsg.hdr.type,
576						    imsg.hdr.pid);
577				}
578			}
579			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
580			    imsg.hdr.pid, -1, NULL, 0);
581			break;
582		case IMSG_CTL_SHOW_RIB:
583			LIST_FOREACH(area, &rdeconf->area_list, entry) {
584				imsg_compose_event(iev_ospfe, IMSG_CTL_AREA,
585				    0, imsg.hdr.pid, -1, area, sizeof(*area));
586
587				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
588				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
589			}
590			aid.s_addr = 0;
591			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
592
593			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
594			    imsg.hdr.pid, -1, NULL, 0);
595			break;
596		case IMSG_CTL_SHOW_SUM:
597			rde_send_summary(imsg.hdr.pid);
598			LIST_FOREACH(area, &rdeconf->area_list, entry)
599				rde_send_summary_area(area, imsg.hdr.pid);
600			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
601			    imsg.hdr.pid, -1, NULL, 0);
602			break;
603		case IMSG_IFINFO:
604			if (imsg.hdr.len != IMSG_HEADER_SIZE +
605			    sizeof(int))
606				fatalx("IFINFO imsg with wrong len");
607
608			nbr = rde_nbr_find(imsg.hdr.peerid);
609			if (nbr == NULL)
610				fatalx("IFINFO imsg with bad peerid");
611			memcpy(&nbr->iface->state, imsg.data, sizeof(int));
612
613			/* Resend LSAs if interface state changes. */
614			orig_intra_area_prefix_lsas(nbr->area);
615			break;
616		case IMSG_CTL_LOG_VERBOSE:
617			/* already checked by ospfe */
618			memcpy(&verbose, imsg.data, sizeof(verbose));
619			log_setverbose(verbose);
620			break;
621		default:
622			log_debug("rde_dispatch_imsg: unexpected imsg %d",
623			    imsg.hdr.type);
624			break;
625		}
626		imsg_free(&imsg);
627	}
628	if (!shut)
629		imsg_event_add(iev);
630	else {
631		/* this pipe is dead, so remove the event handler */
632		event_del(&iev->ev);
633		event_loopexit(NULL);
634	}
635}
636
637void
638rde_dispatch_parent(int fd, short event, void *bula)
639{
640	static struct area	*narea;
641	struct area		*area;
642	struct iface		*iface, *ifp, *i;
643	struct ifaddrchange	*ifc;
644	struct iface_addr	*ia, *nia;
645	struct imsg		 imsg;
646	struct kroute		 kr;
647	struct imsgev		*iev = bula;
648	struct imsgbuf		*ibuf = &iev->ibuf;
649	ssize_t			 n;
650	int			 shut = 0, link_ok, prev_link_ok, orig_lsa;
651
652	if (event & EV_READ) {
653		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
654			fatal("imsg_read error");
655		if (n == 0)	/* connection closed */
656			shut = 1;
657	}
658	if (event & EV_WRITE) {
659		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
660			fatal("msgbuf_write");
661		if (n == 0)	/* connection closed */
662			shut = 1;
663	}
664
665	for (;;) {
666		if ((n = imsg_get(ibuf, &imsg)) == -1)
667			fatal("rde_dispatch_parent: imsg_get error");
668		if (n == 0)
669			break;
670
671		switch (imsg.hdr.type) {
672		case IMSG_NETWORK_ADD:
673			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
674				log_warnx("rde_dispatch_parent: "
675				    "wrong imsg len");
676				break;
677			}
678			memcpy(&kr, imsg.data, sizeof(kr));
679			rde_asext_get(&kr);
680			break;
681		case IMSG_NETWORK_DEL:
682			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
683				log_warnx("rde_dispatch_parent: "
684				    "wrong imsg len");
685				break;
686			}
687			memcpy(&kr, imsg.data, sizeof(kr));
688			rde_asext_put(&kr);
689			break;
690		case IMSG_IFINFO:
691			if (imsg.hdr.len != IMSG_HEADER_SIZE +
692			    sizeof(struct iface))
693				fatalx("IFINFO imsg with wrong len");
694
695			ifp = imsg.data;
696
697			LIST_FOREACH(area, &rdeconf->area_list, entry) {
698				orig_lsa = 0;
699				LIST_FOREACH(i, &area->iface_list, entry) {
700					if (strcmp(i->dependon,
701					    ifp->name) == 0) {
702						i->depend_ok =
703						    ifstate_is_up(ifp);
704						if (ifstate_is_up(i))
705							orig_lsa = 1;
706					}
707				}
708				if (orig_lsa)
709					orig_intra_area_prefix_lsas(area);
710			}
711
712			if (!(ifp->cflags & F_IFACE_CONFIGURED))
713				break;
714			iface = if_find(ifp->ifindex);
715			if (iface == NULL)
716				fatalx("interface lost in rde");
717
718			prev_link_ok = (iface->flags & IFF_UP) &&
719			    LINK_STATE_IS_UP(iface->linkstate);
720
721			if_update(iface, ifp->mtu, ifp->flags, ifp->if_type,
722			    ifp->linkstate, ifp->baudrate, ifp->rdomain);
723
724			/* Resend LSAs if interface state changes. */
725			link_ok = (iface->flags & IFF_UP) &&
726			          LINK_STATE_IS_UP(iface->linkstate);
727			if (prev_link_ok == link_ok)
728				break;
729
730			orig_intra_area_prefix_lsas(iface->area);
731
732			break;
733		case IMSG_IFADDRNEW:
734			if (imsg.hdr.len != IMSG_HEADER_SIZE +
735			    sizeof(struct ifaddrchange))
736				fatalx("IFADDRNEW imsg with wrong len");
737			ifc = imsg.data;
738
739			iface = if_find(ifc->ifindex);
740			if (iface == NULL)
741				fatalx("IFADDRNEW interface lost in rde");
742
743			if ((ia = calloc(1, sizeof(struct iface_addr))) ==
744			    NULL)
745				fatal("rde_dispatch_parent IFADDRNEW");
746			ia->addr = ifc->addr;
747			ia->dstbrd = ifc->dstbrd;
748			ia->prefixlen = ifc->prefixlen;
749
750			TAILQ_INSERT_TAIL(&iface->ifa_list, ia, entry);
751			if (iface->area)
752				orig_intra_area_prefix_lsas(iface->area);
753			break;
754		case IMSG_IFADDRDEL:
755			if (imsg.hdr.len != IMSG_HEADER_SIZE +
756			    sizeof(struct ifaddrchange))
757				fatalx("IFADDRDEL imsg with wrong len");
758			ifc = imsg.data;
759
760			iface = if_find(ifc->ifindex);
761			if (iface == NULL)
762				fatalx("IFADDRDEL interface lost in rde");
763
764			for (ia = TAILQ_FIRST(&iface->ifa_list); ia != NULL;
765			    ia = nia) {
766				nia = TAILQ_NEXT(ia, entry);
767
768				if (IN6_ARE_ADDR_EQUAL(&ia->addr,
769				    &ifc->addr)) {
770					TAILQ_REMOVE(&iface->ifa_list, ia,
771					    entry);
772					free(ia);
773					break;
774				}
775			}
776			if (iface->area)
777				orig_intra_area_prefix_lsas(iface->area);
778			break;
779		case IMSG_RECONF_CONF:
780			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
781			    NULL)
782				fatal(NULL);
783			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
784
785			LIST_INIT(&nconf->area_list);
786			LIST_INIT(&nconf->cand_list);
787			break;
788		case IMSG_RECONF_AREA:
789			if ((narea = area_new()) == NULL)
790				fatal(NULL);
791			memcpy(narea, imsg.data, sizeof(struct area));
792
793			LIST_INIT(&narea->iface_list);
794			LIST_INIT(&narea->nbr_list);
795			RB_INIT(&narea->lsa_tree);
796
797			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
798			break;
799		case IMSG_RECONF_END:
800			merge_config(rdeconf, nconf);
801			nconf = NULL;
802			break;
803		default:
804			log_debug("rde_dispatch_parent: unexpected imsg %d",
805			    imsg.hdr.type);
806			break;
807		}
808		imsg_free(&imsg);
809	}
810	if (!shut)
811		imsg_event_add(iev);
812	else {
813		/* this pipe is dead, so remove the event handler */
814		event_del(&iev->ev);
815		event_loopexit(NULL);
816	}
817}
818
819void
820rde_dump_area(struct area *area, int imsg_type, pid_t pid)
821{
822	struct iface	*iface;
823
824	/* dump header */
825	imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1,
826	    area, sizeof(*area));
827
828	/* dump link local lsa */
829	LIST_FOREACH(iface, &area->iface_list, entry) {
830		imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE,
831		    0, pid, -1, iface, sizeof(*iface));
832		lsa_dump(&iface->lsa_tree, imsg_type, pid);
833	}
834
835	/* dump area lsa */
836	lsa_dump(&area->lsa_tree, imsg_type, pid);
837}
838
839u_int32_t
840rde_router_id(void)
841{
842	return (rdeconf->rtr_id.s_addr);
843}
844
845void
846rde_send_change_kroute(struct rt_node *r)
847{
848	int			 krcount = 0;
849	struct kroute		 kr;
850	struct rt_nexthop	*rn;
851	struct ibuf		*wbuf;
852
853	if ((wbuf = imsg_create(&iev_main->ibuf, IMSG_KROUTE_CHANGE, 0, 0,
854	    sizeof(kr))) == NULL) {
855		return;
856	}
857
858	TAILQ_FOREACH(rn, &r->nexthop, entry) {
859		if (rn->invalid)
860			continue;
861		if (rn->connected)
862			/* skip self-originated routes */
863			continue;
864		krcount++;
865
866		bzero(&kr, sizeof(kr));
867		kr.prefix = r->prefix;
868		kr.nexthop = rn->nexthop;
869		if (IN6_IS_ADDR_LINKLOCAL(&rn->nexthop) ||
870		    IN6_IS_ADDR_MC_LINKLOCAL(&rn->nexthop))
871			kr.scope = rn->ifindex;
872		kr.ifindex = rn->ifindex;
873		kr.prefixlen = r->prefixlen;
874		kr.ext_tag = r->ext_tag;
875		imsg_add(wbuf, &kr, sizeof(kr));
876	}
877	if (krcount == 0) {
878		/* no valid nexthop or self originated, so remove */
879		ibuf_free(wbuf);
880		rde_send_delete_kroute(r);
881		return;
882	}
883
884	imsg_close(&iev_main->ibuf, wbuf);
885	imsg_event_add(iev_main);
886}
887
888void
889rde_send_delete_kroute(struct rt_node *r)
890{
891	struct kroute	 kr;
892
893	bzero(&kr, sizeof(kr));
894	kr.prefix = r->prefix;
895	kr.prefixlen = r->prefixlen;
896
897	imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1,
898	    &kr, sizeof(kr));
899}
900
901void
902rde_send_summary(pid_t pid)
903{
904	static struct ctl_sum	 sumctl;
905	struct timeval		 now;
906	struct area		*area;
907	struct vertex		*v;
908
909	bzero(&sumctl, sizeof(struct ctl_sum));
910
911	sumctl.rtr_id.s_addr = rde_router_id();
912	sumctl.spf_delay = rdeconf->spf_delay;
913	sumctl.spf_hold_time = rdeconf->spf_hold_time;
914
915	LIST_FOREACH(area, &rdeconf->area_list, entry)
916		sumctl.num_area++;
917
918	RB_FOREACH(v, lsa_tree, &asext_tree)
919		sumctl.num_ext_lsa++;
920
921	gettimeofday(&now, NULL);
922	if (rdeconf->uptime < now.tv_sec)
923		sumctl.uptime = now.tv_sec - rdeconf->uptime;
924	else
925		sumctl.uptime = 0;
926
927	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
928	    sizeof(sumctl));
929}
930
931void
932rde_send_summary_area(struct area *area, pid_t pid)
933{
934	static struct ctl_sum_area	 sumareactl;
935	struct iface			*iface;
936	struct rde_nbr			*nbr;
937	struct lsa_tree			*tree = &area->lsa_tree;
938	struct vertex			*v;
939
940	bzero(&sumareactl, sizeof(struct ctl_sum_area));
941
942	sumareactl.area.s_addr = area->id.s_addr;
943	sumareactl.num_spf_calc = area->num_spf_calc;
944
945	LIST_FOREACH(iface, &area->iface_list, entry)
946		sumareactl.num_iface++;
947
948	LIST_FOREACH(nbr, &area->nbr_list, entry)
949		if (nbr->state == NBR_STA_FULL && !nbr->self)
950			sumareactl.num_adj_nbr++;
951
952	RB_FOREACH(v, lsa_tree, tree)
953		sumareactl.num_lsa++;
954
955	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
956	    sizeof(sumareactl));
957}
958
959LIST_HEAD(rde_nbr_head, rde_nbr);
960
961struct nbr_table {
962	struct rde_nbr_head	*hashtbl;
963	u_int32_t		 hashmask;
964} rdenbrtable;
965
966#define RDE_NBR_HASH(x)		\
967	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
968
969void
970rde_nbr_init(u_int32_t hashsize)
971{
972	struct rde_nbr_head	*head;
973	u_int32_t		 hs, i;
974
975	for (hs = 1; hs < hashsize; hs <<= 1)
976		;
977	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
978	if (rdenbrtable.hashtbl == NULL)
979		fatal("rde_nbr_init");
980
981	for (i = 0; i < hs; i++)
982		LIST_INIT(&rdenbrtable.hashtbl[i]);
983
984	rdenbrtable.hashmask = hs - 1;
985
986	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
987		fatal("rde_nbr_init");
988
989	nbrself->id.s_addr = rde_router_id();
990	nbrself->peerid = NBR_IDSELF;
991	nbrself->state = NBR_STA_DOWN;
992	nbrself->self = 1;
993	head = RDE_NBR_HASH(NBR_IDSELF);
994	LIST_INSERT_HEAD(head, nbrself, hash);
995}
996
997void
998rde_nbr_free(void)
999{
1000	free(nbrself);
1001	free(rdenbrtable.hashtbl);
1002}
1003
1004struct rde_nbr *
1005rde_nbr_find(u_int32_t peerid)
1006{
1007	struct rde_nbr_head	*head;
1008	struct rde_nbr		*nbr;
1009
1010	head = RDE_NBR_HASH(peerid);
1011
1012	LIST_FOREACH(nbr, head, hash) {
1013		if (nbr->peerid == peerid)
1014			return (nbr);
1015	}
1016
1017	return (NULL);
1018}
1019
1020struct rde_nbr *
1021rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
1022{
1023	struct rde_nbr_head	*head;
1024	struct rde_nbr		*nbr;
1025	struct area		*area;
1026	struct iface		*iface;
1027
1028	if (rde_nbr_find(peerid))
1029		return (NULL);
1030	if ((area = area_find(rdeconf, new->area_id)) == NULL)
1031		fatalx("rde_nbr_new: unknown area");
1032
1033	if ((iface = if_find(new->ifindex)) == NULL)
1034		fatalx("rde_nbr_new: unknown interface");
1035
1036	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
1037		fatal("rde_nbr_new");
1038
1039	memcpy(nbr, new, sizeof(*nbr));
1040	nbr->peerid = peerid;
1041	nbr->area = area;
1042	nbr->iface = iface;
1043
1044	TAILQ_INIT(&nbr->req_list);
1045
1046	head = RDE_NBR_HASH(peerid);
1047	LIST_INSERT_HEAD(head, nbr, hash);
1048	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
1049
1050	return (nbr);
1051}
1052
1053void
1054rde_nbr_del(struct rde_nbr *nbr)
1055{
1056	if (nbr == NULL)
1057		return;
1058
1059	rde_req_list_free(nbr);
1060
1061	LIST_REMOVE(nbr, entry);
1062	LIST_REMOVE(nbr, hash);
1063
1064	free(nbr);
1065}
1066
1067int
1068rde_nbr_loading(struct area *area)
1069{
1070	struct rde_nbr		*nbr;
1071	int			 checkall = 0;
1072
1073	if (area == NULL) {
1074		area = LIST_FIRST(&rdeconf->area_list);
1075		checkall = 1;
1076	}
1077
1078	while (area != NULL) {
1079		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1080			if (nbr->self)
1081				continue;
1082			if (nbr->state & NBR_STA_XCHNG ||
1083			    nbr->state & NBR_STA_LOAD)
1084				return (1);
1085		}
1086		if (!checkall)
1087			break;
1088		area = LIST_NEXT(area, entry);
1089	}
1090
1091	return (0);
1092}
1093
1094struct rde_nbr *
1095rde_nbr_self(struct area *area)
1096{
1097	struct rde_nbr		*nbr;
1098
1099	LIST_FOREACH(nbr, &area->nbr_list, entry)
1100		if (nbr->self)
1101			return (nbr);
1102
1103	/* this may not happen */
1104	fatalx("rde_nbr_self: area without self");
1105	return (NULL);
1106}
1107
1108/*
1109 * LSA req list
1110 */
1111void
1112rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
1113{
1114	struct rde_req_entry	*le;
1115
1116	if ((le = calloc(1, sizeof(*le))) == NULL)
1117		fatal("rde_req_list_add");
1118
1119	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
1120	le->type = lsa->type;
1121	le->ls_id = lsa->ls_id;
1122	le->adv_rtr = lsa->adv_rtr;
1123}
1124
1125int
1126rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1127{
1128	struct rde_req_entry	*le;
1129
1130	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1131		if ((lsa_hdr->type == le->type) &&
1132		    (lsa_hdr->ls_id == le->ls_id) &&
1133		    (lsa_hdr->adv_rtr == le->adv_rtr))
1134			return (1);
1135	}
1136	return (0);
1137}
1138
1139void
1140rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1141{
1142	struct rde_req_entry	*le;
1143
1144	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1145		if ((lsa_hdr->type == le->type) &&
1146		    (lsa_hdr->ls_id == le->ls_id) &&
1147		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1148			TAILQ_REMOVE(&nbr->req_list, le, entry);
1149			free(le);
1150			return;
1151		}
1152	}
1153}
1154
1155void
1156rde_req_list_free(struct rde_nbr *nbr)
1157{
1158	struct rde_req_entry	*le;
1159
1160	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1161		TAILQ_REMOVE(&nbr->req_list, le, entry);
1162		free(le);
1163	}
1164}
1165
1166/*
1167 * as-external LSA handling
1168 */
1169struct iface *
1170rde_asext_lookup(struct in6_addr prefix, int plen)
1171{
1172
1173	struct area		*area;
1174	struct iface		*iface;
1175	struct iface_addr	*ia;
1176	struct in6_addr		 ina, inb;
1177
1178	LIST_FOREACH(area, &rdeconf->area_list, entry) {
1179		LIST_FOREACH(iface, &area->iface_list, entry) {
1180			TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1181				if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1182					continue;
1183
1184				inet6applymask(&ina, &ia->addr, ia->prefixlen);
1185				inet6applymask(&inb, &prefix, ia->prefixlen);
1186				if (IN6_ARE_ADDR_EQUAL(&ina, &inb) &&
1187				    (plen == -1 || plen == ia->prefixlen))
1188					return (iface);
1189			}
1190		}
1191	}
1192	return (NULL);
1193}
1194
1195void
1196rde_asext_get(struct kroute *kr)
1197{
1198	struct vertex	*v;
1199	struct lsa	*lsa;
1200
1201	if (rde_asext_lookup(kr->prefix, kr->prefixlen)) {
1202		/* already announced as (stub) net LSA */
1203		log_debug("rde_asext_get: %s/%d is net LSA",
1204		    log_in6addr(&kr->prefix), kr->prefixlen);
1205		return;
1206	}
1207
1208	/* update of seqnum is done by lsa_merge */
1209	if ((lsa = orig_asext_lsa(kr, DEFAULT_AGE))) {
1210		v = lsa_find(NULL, lsa->hdr.type, lsa->hdr.ls_id,
1211		    lsa->hdr.adv_rtr);
1212		lsa_merge(nbrself, lsa, v);
1213	}
1214}
1215
1216void
1217rde_asext_put(struct kroute *kr)
1218{
1219	struct vertex	*v;
1220	struct lsa	*lsa;
1221	/*
1222	 * just try to remove the LSA. If the prefix is announced as
1223	 * stub net LSA lsa_find() will fail later and nothing will happen.
1224	 */
1225
1226	/* remove by reflooding with MAX_AGE */
1227	if ((lsa = orig_asext_lsa(kr, MAX_AGE))) {
1228		v = lsa_find(NULL, lsa->hdr.type, lsa->hdr.ls_id,
1229		    lsa->hdr.adv_rtr);
1230
1231		/*
1232		 * if v == NULL no LSA is in the table and
1233		 * nothing has to be done.
1234		 */
1235		if (v)
1236			lsa_merge(nbrself, lsa, v);
1237		else
1238			free(lsa);
1239	}
1240}
1241
1242/*
1243 * summary LSA stuff
1244 */
1245void
1246rde_summary_update(struct rt_node *rte, struct area *area)
1247{
1248	struct vertex		*v = NULL;
1249#if 0 /* XXX */
1250	struct lsa		*lsa;
1251	u_int16_t		 type = 0;
1252#endif
1253
1254	/* first check if we actually need to announce this route */
1255	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1256		return;
1257	/* never create summaries for as-ext LSA */
1258	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1259		return;
1260	/* no need for summary LSA in the originating area */
1261	if (rte->area.s_addr == area->id.s_addr)
1262		return;
1263	/* no need to originate inter-area routes to the backbone */
1264	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1265		return;
1266	/* TODO nexthop check, nexthop part of area -> no summary */
1267	if (rte->cost >= LS_INFINITY)
1268		return;
1269	/* TODO AS border router specific checks */
1270	/* TODO inter-area network route stuff */
1271	/* TODO intra-area stuff -- condense LSA ??? */
1272
1273#if 0 /* XXX a lot todo */
1274	if (rte->d_type == DT_NET) {
1275		type = LSA_TYPE_INTER_A_PREFIX;
1276	} else if (rte->d_type == DT_RTR) {
1277		type = LSA_TYPE_INTER_A_ROUTER;
1278	} else
1279
1280	/* update lsa but only if it was changed */
1281	v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1282	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1283	lsa_merge(rde_nbr_self(area), lsa, v);
1284
1285	if (v == NULL)
1286		v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1287#endif
1288
1289	/* suppressed/deleted routes are not found in the second lsa_find */
1290	if (v)
1291		v->cost = rte->cost;
1292}
1293
1294/*
1295 * Functions for self-originated LSAs
1296 */
1297
1298/* Prefix LSAs have variable size. We have to be careful to copy the right
1299 * amount of bytes, and to realloc() the right amount of memory. */
1300void
1301append_prefix_lsa(struct lsa **lsa, u_int16_t *len, struct lsa_prefix *prefix)
1302{
1303	struct lsa_prefix	*copy;
1304	unsigned int		 lsa_prefix_len;
1305	unsigned int		 new_len;
1306	char			*new_lsa;
1307
1308	lsa_prefix_len = sizeof(struct lsa_prefix)
1309	    + LSA_PREFIXSIZE(prefix->prefixlen);
1310
1311	new_len = *len + lsa_prefix_len;
1312
1313	/* Make sure we have enough space for this prefix. */
1314	if ((new_lsa = realloc(*lsa, new_len)) == NULL)
1315		fatalx("append_prefix_lsa");
1316
1317	/* Append prefix to LSA. */
1318	copy = (struct lsa_prefix *)(new_lsa + *len);
1319	memcpy(copy, prefix, lsa_prefix_len);
1320
1321	*lsa = (struct lsa *)new_lsa;
1322	*len = new_len;
1323}
1324
1325int
1326prefix_compare(struct prefix_node *a, struct prefix_node *b)
1327{
1328	struct lsa_prefix	*p;
1329	struct lsa_prefix	*q;
1330	int			 i;
1331	int			 len;
1332
1333	p = a->prefix;
1334	q = b->prefix;
1335
1336	len = MINIMUM(LSA_PREFIXSIZE(p->prefixlen), LSA_PREFIXSIZE(q->prefixlen));
1337
1338	i = memcmp(p + 1, q + 1, len);
1339	if (i)
1340		return (i);
1341	if (p->prefixlen < q->prefixlen)
1342		return (-1);
1343	if (p->prefixlen > q->prefixlen)
1344		return (1);
1345	return (0);
1346}
1347
1348void
1349prefix_tree_add(struct prefix_tree *tree, struct lsa_link *lsa)
1350{
1351	struct prefix_node	*old;
1352	struct prefix_node	*new;
1353	struct in6_addr		 addr;
1354	unsigned int		 len;
1355	unsigned int		 i;
1356	char			*cur_prefix;
1357
1358	cur_prefix = (char *)(lsa + 1);
1359
1360	for (i = 0; i < ntohl(lsa->numprefix); i++) {
1361		if ((new = calloc(1, sizeof(*new))) == NULL)
1362			fatal("prefix_tree_add");
1363		new->prefix = (struct lsa_prefix *)cur_prefix;
1364
1365		len = sizeof(*new->prefix)
1366		    + LSA_PREFIXSIZE(new->prefix->prefixlen);
1367
1368		bzero(&addr, sizeof(addr));
1369		memcpy(&addr, new->prefix + 1,
1370		    LSA_PREFIXSIZE(new->prefix->prefixlen));
1371
1372		new->prefix->metric = 0;
1373
1374		if (!(IN6_IS_ADDR_LINKLOCAL(&addr)) &&
1375		    (new->prefix->options & OSPF_PREFIX_NU) == 0 &&
1376		    (new->prefix->options & OSPF_PREFIX_LA) == 0) {
1377			old = RB_INSERT(prefix_tree, tree, new);
1378			if (old != NULL) {
1379				old->prefix->options |= new->prefix->options;
1380				free(new);
1381			}
1382		} else
1383			free(new);
1384
1385		cur_prefix = cur_prefix + len;
1386	}
1387}
1388
1389RB_GENERATE(prefix_tree, prefix_node, entry, prefix_compare)
1390
1391struct lsa *
1392orig_intra_lsa_net(struct area *area, struct iface *iface, struct vertex *old)
1393{
1394	struct lsa		*lsa;
1395	struct vertex		*v;
1396	struct rde_nbr		*nbr;
1397	struct prefix_node	*node;
1398	struct prefix_tree	 tree;
1399	int			 num_full_nbr;
1400	u_int16_t		 len;
1401	u_int16_t		 numprefix;
1402
1403	log_debug("orig_intra_lsa_net: area %s, interface %s",
1404	    inet_ntoa(area->id), iface->name);
1405
1406	RB_INIT(&tree);
1407
1408	if (iface->state & IF_STA_DR) {
1409		num_full_nbr = 0;
1410		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1411			if (nbr->self ||
1412			    nbr->iface->ifindex != iface->ifindex ||
1413			    (nbr->state & NBR_STA_FULL) == 0)
1414				continue;
1415			num_full_nbr++;
1416			v = lsa_find(iface, htons(LSA_TYPE_LINK),
1417			    htonl(nbr->iface_id), nbr->id.s_addr);
1418			if (v)
1419				prefix_tree_add(&tree, &v->lsa->data.link);
1420		}
1421		if (num_full_nbr == 0) {
1422			/* There are no adjacent neighbors on link.
1423			 * If a copy of this LSA already exists in DB,
1424			 * it needs to be flushed. orig_intra_lsa_rtr()
1425			 * will take care of prefixes configured on
1426			 * this interface. */
1427			if (!old)
1428				return NULL;
1429		} else {
1430			/* Add our own prefixes configured for this link. */
1431			v = lsa_find(iface, htons(LSA_TYPE_LINK),
1432			    htonl(iface->ifindex), rde_router_id());
1433			if (v)
1434				prefix_tree_add(&tree, &v->lsa->data.link);
1435		}
1436	/* Continue only if a copy of this LSA already exists in DB.
1437	 * It needs to be flushed. */
1438	} else if (!old)
1439		return NULL;
1440
1441	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1442	if ((lsa = calloc(1, len)) == NULL)
1443		fatal("orig_intra_lsa_net");
1444
1445	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_NETWORK);
1446	lsa->data.pref_intra.ref_ls_id = htonl(iface->ifindex);
1447	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1448
1449	numprefix = 0;
1450	RB_FOREACH(node, prefix_tree, &tree) {
1451		append_prefix_lsa(&lsa, &len, node->prefix);
1452		numprefix++;
1453	}
1454
1455	lsa->data.pref_intra.numprefix = htons(numprefix);
1456
1457	while (!RB_EMPTY(&tree))
1458		free(RB_REMOVE(prefix_tree, &tree, RB_ROOT(&tree)));
1459
1460	/* LSA header */
1461	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1462	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1463	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1464	lsa->hdr.ls_id = htonl(iface->ifindex);
1465	lsa->hdr.adv_rtr = rde_router_id();
1466	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1467	lsa->hdr.len = htons(len);
1468	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1469
1470	return lsa;
1471}
1472
1473struct lsa *
1474orig_intra_lsa_rtr(struct area *area, struct vertex *old)
1475{
1476	char			lsa_prefix_buf[sizeof(struct lsa_prefix)
1477				    + sizeof(struct in6_addr)];
1478	struct lsa		*lsa;
1479	struct lsa_prefix	*lsa_prefix;
1480	struct in6_addr		*prefix;
1481	struct iface		*iface;
1482	struct iface_addr	*ia;
1483	struct rde_nbr		*nbr;
1484	u_int16_t		 len;
1485	u_int16_t		 numprefix;
1486
1487	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1488	if ((lsa = calloc(1, len)) == NULL)
1489		fatal("orig_intra_lsa_rtr");
1490
1491	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_ROUTER);
1492	lsa->data.pref_intra.ref_ls_id = 0;
1493	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1494
1495	numprefix = 0;
1496	LIST_FOREACH(iface, &area->iface_list, entry) {
1497		if (!((iface->flags & IFF_UP) &&
1498		    LINK_STATE_IS_UP(iface->linkstate)) &&
1499		    !(iface->if_type == IFT_CARP))
1500			/* interface or link state down
1501			 * and not a carp interface */
1502			continue;
1503
1504		if (iface->if_type == IFT_CARP &&
1505		    (iface->linkstate == LINK_STATE_UNKNOWN ||
1506		    iface->linkstate == LINK_STATE_INVALID))
1507			/* carp interface in state invalid or unknown */
1508			continue;
1509
1510		if ((iface->state & IF_STA_DOWN) &&
1511		    !(iface->cflags & F_IFACE_PASSIVE))
1512			/* passive interfaces stay in state DOWN */
1513			continue;
1514
1515		/* Broadcast links with adjacencies are handled
1516		 * by orig_intra_lsa_net(), ignore. */
1517		if (iface->type == IF_TYPE_BROADCAST ||
1518		    iface->type == IF_TYPE_NBMA) {
1519			if (iface->state & IF_STA_WAITING)
1520				/* Skip, we're still waiting for
1521				 * adjacencies to form. */
1522				continue;
1523
1524			LIST_FOREACH(nbr, &area->nbr_list, entry)
1525				if (!nbr->self &&
1526				    nbr->iface->ifindex == iface->ifindex &&
1527				    nbr->state & NBR_STA_FULL)
1528					break;
1529			if (nbr)
1530				continue;
1531		}
1532
1533		lsa_prefix = (struct lsa_prefix *)lsa_prefix_buf;
1534
1535		TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1536			if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1537				continue;
1538
1539			bzero(lsa_prefix_buf, sizeof(lsa_prefix_buf));
1540
1541			if (iface->type == IF_TYPE_POINTOMULTIPOINT ||
1542			    iface->state & IF_STA_LOOPBACK) {
1543				lsa_prefix->prefixlen = 128;
1544				lsa_prefix->metric = 0;
1545			} else if ((iface->if_type == IFT_CARP &&
1546				   iface->linkstate == LINK_STATE_DOWN) ||
1547				   !(iface->depend_ok)) {
1548				/* carp interfaces in state backup are
1549				 * announced with high metric for faster
1550				 * failover. */
1551				lsa_prefix->prefixlen = ia->prefixlen;
1552				lsa_prefix->metric = MAX_METRIC;
1553			} else {
1554				lsa_prefix->prefixlen = ia->prefixlen;
1555				lsa_prefix->metric = htons(iface->metric);
1556			}
1557
1558			if (lsa_prefix->prefixlen == 128)
1559				lsa_prefix->options |= OSPF_PREFIX_LA;
1560
1561			log_debug("orig_intra_lsa_rtr: area %s, interface %s: "
1562			    "%s/%d, metric %d", inet_ntoa(area->id),
1563			    iface->name, log_in6addr(&ia->addr),
1564			    lsa_prefix->prefixlen, ntohs(lsa_prefix->metric));
1565
1566			prefix = (struct in6_addr *)(lsa_prefix + 1);
1567			inet6applymask(prefix, &ia->addr,
1568			    lsa_prefix->prefixlen);
1569			append_prefix_lsa(&lsa, &len, lsa_prefix);
1570			numprefix++;
1571		}
1572
1573		/* TOD: Add prefixes of directly attached hosts, too */
1574		/* TOD: Add prefixes for virtual links */
1575	}
1576
1577	/* If no prefixes were included, continue only if a copy of this
1578	 * LSA already exists in DB. It needs to be flushed. */
1579	if (numprefix == 0 && !old) {
1580		free(lsa);
1581		return NULL;
1582	}
1583
1584	lsa->data.pref_intra.numprefix = htons(numprefix);
1585
1586	/* LSA header */
1587	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1588	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1589	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1590	lsa->hdr.ls_id = htonl(LS_ID_INTRA_RTR);
1591	lsa->hdr.adv_rtr = rde_router_id();
1592	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1593	lsa->hdr.len = htons(len);
1594	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1595
1596	return lsa;
1597}
1598
1599void
1600orig_intra_area_prefix_lsas(struct area *area)
1601{
1602	struct lsa	*lsa;
1603	struct vertex	*old;
1604	struct iface	*iface;
1605
1606	LIST_FOREACH(iface, &area->iface_list, entry) {
1607		if (iface->type == IF_TYPE_BROADCAST ||
1608		    iface->type == IF_TYPE_NBMA) {
1609			old = lsa_find(iface, htons(LSA_TYPE_INTRA_A_PREFIX),
1610			    htonl(iface->ifindex), rde_router_id());
1611			lsa = orig_intra_lsa_net(area, iface, old);
1612			if (lsa)
1613				lsa_merge(rde_nbr_self(area), lsa, old);
1614		}
1615	}
1616
1617	old = lsa_find_tree(&area->lsa_tree, htons(LSA_TYPE_INTRA_A_PREFIX),
1618		htonl(LS_ID_INTRA_RTR), rde_router_id());
1619	lsa = orig_intra_lsa_rtr(area, old);
1620	if (lsa)
1621		lsa_merge(rde_nbr_self(area), lsa, old);
1622}
1623
1624int
1625comp_asext(struct lsa *a, struct lsa *b)
1626{
1627	/* compare prefixes, if they are equal or not */
1628	if (a->data.asext.prefix.prefixlen != b->data.asext.prefix.prefixlen)
1629		return (-1);
1630	return (memcmp(
1631	    (char *)a + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1632	    (char *)b + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1633	    LSA_PREFIXSIZE(a->data.asext.prefix.prefixlen)));
1634}
1635
1636struct lsa *
1637orig_asext_lsa(struct kroute *kr, u_int16_t age)
1638{
1639	struct lsa	*lsa;
1640	u_int32_t	 ext_tag;
1641	u_int16_t	 len, ext_off;
1642
1643	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext) +
1644	    LSA_PREFIXSIZE(kr->prefixlen);
1645
1646	/*
1647	 * nexthop -- on connected routes we are the nexthop,
1648	 * on all other cases we should announce the true nexthop
1649	 * unless that nexthop is outside of the ospf cloud.
1650	 * XXX for now we don't do this.
1651	 */
1652
1653	ext_off = len;
1654	if (kr->ext_tag) {
1655		len += sizeof(ext_tag);
1656	}
1657	if ((lsa = calloc(1, len)) == NULL)
1658		fatal("orig_asext_lsa");
1659
1660	log_debug("orig_asext_lsa: %s/%d age %d",
1661	    log_in6addr(&kr->prefix), kr->prefixlen, age);
1662
1663	/* LSA header */
1664	lsa->hdr.age = htons(age);
1665	lsa->hdr.type = htons(LSA_TYPE_EXTERNAL);
1666	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1667	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1668	lsa->hdr.len = htons(len);
1669
1670	lsa->data.asext.prefix.prefixlen = kr->prefixlen;
1671	memcpy((char *)lsa + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1672	    &kr->prefix, LSA_PREFIXSIZE(kr->prefixlen));
1673
1674	lsa->hdr.ls_id = lsa_find_lsid(&asext_tree, comp_asext, lsa);
1675
1676	if (age == MAX_AGE) {
1677		/* inherit metric and ext_tag from the current LSA,
1678		 * some routers don't like to get withdraws that are
1679		 * different from what they have in their table.
1680		 */
1681		struct vertex *v;
1682		v = lsa_find(NULL, lsa->hdr.type, lsa->hdr.ls_id,
1683		    lsa->hdr.adv_rtr);
1684		if (v != NULL) {
1685			kr->metric = ntohl(v->lsa->data.asext.metric);
1686			if (kr->metric & LSA_ASEXT_T_FLAG) {
1687				memcpy(&ext_tag, (char *)v->lsa + ext_off,
1688				    sizeof(ext_tag));
1689				kr->ext_tag = ntohl(ext_tag);
1690			}
1691			kr->metric &= LSA_METRIC_MASK;
1692		}
1693	}
1694
1695	if (kr->ext_tag) {
1696		lsa->data.asext.metric = htonl(kr->metric | LSA_ASEXT_T_FLAG);
1697		ext_tag = htonl(kr->ext_tag);
1698		memcpy((char *)lsa + ext_off, &ext_tag, sizeof(ext_tag));
1699	} else {
1700		lsa->data.asext.metric = htonl(kr->metric);
1701	}
1702
1703	lsa->hdr.ls_chksum = 0;
1704	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1705
1706	return (lsa);
1707}
1708
1709struct lsa *
1710orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1711{
1712#if 0 /* XXX a lot todo */
1713	struct lsa	*lsa;
1714	u_int16_t	 len;
1715
1716	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1717	if ((lsa = calloc(1, len)) == NULL)
1718		fatal("orig_sum_lsa");
1719
1720	/* LSA header */
1721	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1722	lsa->hdr.type = type;
1723	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1724	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1725	lsa->hdr.len = htons(len);
1726
1727	/* prefix and mask */
1728	/*
1729	 * TODO ls_id must be unique, for overlapping routes this may
1730	 * not be true. In this case a hack needs to be done to
1731	 * make the ls_id unique.
1732	 */
1733	lsa->hdr.ls_id = rte->prefix.s_addr;
1734	if (type == LSA_TYPE_SUM_NETWORK)
1735		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1736	else
1737		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1738
1739	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1740
1741	lsa->hdr.ls_chksum = 0;
1742	lsa->hdr.ls_chksum =
1743	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1744
1745	return (lsa);
1746#endif
1747	return NULL;
1748}
1749