rde.c revision 1.40
1/*	$OpenBSD: rde.c,v 1.40 2010/06/01 11:29:29 bluhm Exp $ */
2
3/*
4 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5 * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <sys/types.h>
22#include <sys/socket.h>
23#include <sys/queue.h>
24#include <sys/param.h>
25#include <netinet/in.h>
26#include <arpa/inet.h>
27#include <err.h>
28#include <errno.h>
29#include <stdlib.h>
30#include <signal.h>
31#include <string.h>
32#include <pwd.h>
33#include <unistd.h>
34#include <event.h>
35
36#include "ospf6.h"
37#include "ospf6d.h"
38#include "ospfe.h"
39#include "log.h"
40#include "rde.h"
41
42void		 rde_sig_handler(int sig, short, void *);
43void		 rde_shutdown(void);
44void		 rde_dispatch_imsg(int, short, void *);
45void		 rde_dispatch_parent(int, short, void *);
46void		 rde_dump_area(struct area *, int, pid_t);
47
48void		 rde_send_summary(pid_t);
49void		 rde_send_summary_area(struct area *, pid_t);
50void		 rde_nbr_init(u_int32_t);
51void		 rde_nbr_free(void);
52struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
53void		 rde_nbr_del(struct rde_nbr *);
54
55void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
56int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
57void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
58void		 rde_req_list_free(struct rde_nbr *);
59
60struct lsa	*rde_asext_get(struct rroute *);
61struct lsa	*rde_asext_put(struct rroute *);
62
63int		 comp_asext(struct lsa *, struct lsa *);
64struct lsa	*orig_asext_lsa(struct rroute *, u_int16_t);
65struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
66struct lsa	*orig_intra_lsa_net(struct iface *, struct vertex *);
67struct lsa	*orig_intra_lsa_rtr(struct area *, struct vertex *);
68void		 orig_intra_area_prefix_lsas(struct area *);
69void		 append_prefix_lsa(struct lsa **, u_int16_t *,
70		    struct lsa_prefix *);
71int		 link_lsa_from_full_nbr(struct lsa *, struct iface *);
72
73/* A 32-bit value != any ifindex.
74 * We assume ifindex is bound by [1, USHRT_MAX] inclusive. */
75#define	LS_ID_INTRA_RTR	0x01000000
76
77/* Tree of prefixes with global scope on given a link,
78 * see orig_intra_lsa_*() */
79struct prefix_node {
80	RB_ENTRY(prefix_node)	 entry;
81	struct lsa_prefix	*prefix;
82};
83RB_HEAD(prefix_tree, prefix_node);
84RB_PROTOTYPE(prefix_tree, prefix_node, entry, prefix_compare);
85int		 prefix_compare(struct prefix_node *, struct prefix_node *);
86void		 prefix_tree_add(struct prefix_tree *, struct lsa_link *);
87
88struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
89struct imsgev		*iev_ospfe;
90struct imsgev		*iev_main;
91struct rde_nbr		*nbrself;
92struct lsa_tree		 asext_tree;
93
94/* ARGSUSED */
95void
96rde_sig_handler(int sig, short event, void *arg)
97{
98	/*
99	 * signal handler rules don't apply, libevent decouples for us
100	 */
101
102	switch (sig) {
103	case SIGINT:
104	case SIGTERM:
105		rde_shutdown();
106		/* NOTREACHED */
107	default:
108		fatalx("unexpected signal");
109	}
110}
111
112/* route decision engine */
113pid_t
114rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
115    int pipe_parent2ospfe[2])
116{
117	struct event		 ev_sigint, ev_sigterm;
118	struct timeval		 now;
119	struct passwd		*pw;
120	struct redistribute	*r;
121	pid_t			 pid;
122
123	switch (pid = fork()) {
124	case -1:
125		fatal("cannot fork");
126		/* NOTREACHED */
127	case 0:
128		break;
129	default:
130		return (pid);
131	}
132
133	rdeconf = xconf;
134
135	if ((pw = getpwnam(OSPF6D_USER)) == NULL)
136		fatal("getpwnam");
137
138	if (chroot(pw->pw_dir) == -1)
139		fatal("chroot");
140	if (chdir("/") == -1)
141		fatal("chdir(\"/\")");
142
143	setproctitle("route decision engine");
144	ospfd_process = PROC_RDE_ENGINE;
145
146	if (setgroups(1, &pw->pw_gid) ||
147	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
148	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
149		fatal("can't drop privileges");
150
151	event_init();
152	rde_nbr_init(NBR_HASHSIZE);
153	lsa_init(&asext_tree);
154
155	/* setup signal handler */
156	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
157	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
158	signal_add(&ev_sigint, NULL);
159	signal_add(&ev_sigterm, NULL);
160	signal(SIGPIPE, SIG_IGN);
161	signal(SIGHUP, SIG_IGN);
162
163	/* setup pipes */
164	close(pipe_ospfe2rde[0]);
165	close(pipe_parent2rde[0]);
166	close(pipe_parent2ospfe[0]);
167	close(pipe_parent2ospfe[1]);
168
169	if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL ||
170	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
171		fatal(NULL);
172	imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]);
173	iev_ospfe->handler = rde_dispatch_imsg;
174	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
175	iev_main->handler = rde_dispatch_parent;
176
177	/* setup event handler */
178	iev_ospfe->events = EV_READ;
179	event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events,
180	    iev_ospfe->handler, iev_ospfe);
181	event_add(&iev_ospfe->ev, NULL);
182
183	iev_main->events = EV_READ;
184	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
185	    iev_main->handler, iev_main);
186	event_add(&iev_main->ev, NULL);
187
188	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
189	cand_list_init();
190	rt_init();
191
192	while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
193		SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
194		free(r);
195	}
196
197	gettimeofday(&now, NULL);
198	rdeconf->uptime = now.tv_sec;
199
200	event_dispatch();
201
202	rde_shutdown();
203	/* NOTREACHED */
204
205	return (0);
206}
207
208void
209rde_shutdown(void)
210{
211	struct area	*a;
212
213	stop_spf_timer(rdeconf);
214	cand_list_clr();
215	rt_clear();
216
217	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
218		LIST_REMOVE(a, entry);
219		area_del(a);
220	}
221	rde_nbr_free();
222
223	msgbuf_clear(&iev_ospfe->ibuf.w);
224	free(iev_ospfe);
225	msgbuf_clear(&iev_main->ibuf.w);
226	free(iev_main);
227	free(rdeconf);
228
229	log_info("route decision engine exiting");
230	_exit(0);
231}
232
233int
234rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
235    u_int16_t datalen)
236{
237	return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1,
238	    data, datalen));
239}
240
241/* ARGSUSED */
242void
243rde_dispatch_imsg(int fd, short event, void *bula)
244{
245	struct imsgev		*iev = bula;
246	struct imsgbuf		*ibuf = &iev->ibuf;
247	struct imsg		 imsg;
248	struct in_addr		 aid;
249	struct ls_req_hdr	 req_hdr;
250	struct lsa_hdr		 lsa_hdr, *db_hdr;
251	struct rde_nbr		 rn, *nbr;
252	struct timespec		 tp;
253	struct lsa		*lsa;
254	struct area		*area;
255	struct vertex		*v;
256	struct iface		*iface, *ifp;
257	char			*buf;
258	ssize_t			 n;
259	time_t			 now;
260	int			 r, state, self, shut = 0, verbose;
261	u_int16_t		 l;
262
263	if (event & EV_READ) {
264		if ((n = imsg_read(ibuf)) == -1)
265			fatal("imsg_read error");
266		if (n == 0)	/* connection closed */
267			shut = 1;
268	}
269	if (event & EV_WRITE) {
270		if (msgbuf_write(&ibuf->w) == -1)
271			fatal("msgbuf_write");
272	}
273
274	clock_gettime(CLOCK_MONOTONIC, &tp);
275	now = tp.tv_sec;
276
277	for (;;) {
278		if ((n = imsg_get(ibuf, &imsg)) == -1)
279			fatal("rde_dispatch_imsg: imsg_read error");
280		if (n == 0)
281			break;
282
283		switch (imsg.hdr.type) {
284		case IMSG_NEIGHBOR_UP:
285			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
286				fatalx("invalid size of OE request");
287			memcpy(&rn, imsg.data, sizeof(rn));
288
289			if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL)
290				fatalx("rde_dispatch_imsg: "
291				    "neighbor already exists");
292			break;
293		case IMSG_NEIGHBOR_DOWN:
294			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
295			break;
296		case IMSG_NEIGHBOR_CHANGE:
297			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
298				fatalx("invalid size of OE request");
299			memcpy(&state, imsg.data, sizeof(state));
300
301			nbr = rde_nbr_find(imsg.hdr.peerid);
302			if (nbr == NULL)
303				break;
304
305			if (state != nbr->state &&
306			    (nbr->state & NBR_STA_FULL ||
307			    state & NBR_STA_FULL)) {
308				nbr->state = state;
309				area_track(nbr->area, state);
310				orig_intra_area_prefix_lsas(nbr->area);
311			}
312
313			nbr->state = state;
314			if (nbr->state & NBR_STA_FULL)
315				rde_req_list_free(nbr);
316			break;
317		case IMSG_DB_SNAPSHOT:
318			nbr = rde_nbr_find(imsg.hdr.peerid);
319			if (nbr == NULL)
320				break;
321
322			lsa_snap(nbr, imsg.hdr.peerid);
323
324			imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid,
325			    0, -1, NULL, 0);
326			break;
327		case IMSG_DD:
328			nbr = rde_nbr_find(imsg.hdr.peerid);
329			if (nbr == NULL)
330				break;
331
332			buf = imsg.data;
333			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
334			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
335				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
336				buf += sizeof(lsa_hdr);
337
338				v = lsa_find(nbr->iface, lsa_hdr.type,
339				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
340				if (v == NULL)
341					db_hdr = NULL;
342				else
343					db_hdr = &v->lsa->hdr;
344
345				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
346					/*
347					 * only request LSAs that are
348					 * newer or missing
349					 */
350					rde_req_list_add(nbr, &lsa_hdr);
351					imsg_compose_event(iev_ospfe, IMSG_DD,
352					    imsg.hdr.peerid, 0, -1, &lsa_hdr,
353					    sizeof(lsa_hdr));
354				}
355			}
356			if (l != 0)
357				log_warnx("rde_dispatch_imsg: peerid %lu, "
358				    "trailing garbage in Database Description "
359				    "packet", imsg.hdr.peerid);
360
361			imsg_compose_event(iev_ospfe, IMSG_DD_END,
362			    imsg.hdr.peerid, 0, -1, NULL, 0);
363			break;
364		case IMSG_LS_REQ:
365			nbr = rde_nbr_find(imsg.hdr.peerid);
366			if (nbr == NULL)
367				break;
368
369			buf = imsg.data;
370			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
371			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
372				memcpy(&req_hdr, buf, sizeof(req_hdr));
373				buf += sizeof(req_hdr);
374
375				if ((v = lsa_find(nbr->iface,
376				    req_hdr.type, req_hdr.ls_id,
377				    req_hdr.adv_rtr)) == NULL) {
378					imsg_compose_event(iev_ospfe,
379					    IMSG_LS_BADREQ,
380					    imsg.hdr.peerid, 0, -1, NULL, 0);
381					continue;
382				}
383				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
384				    imsg.hdr.peerid, 0, -1, v->lsa,
385				    ntohs(v->lsa->hdr.len));
386			}
387			if (l != 0)
388				log_warnx("rde_dispatch_imsg: peerid %lu, "
389				    "trailing garbage in LS Request "
390				    "packet", imsg.hdr.peerid);
391			break;
392		case IMSG_LS_UPD:
393			nbr = rde_nbr_find(imsg.hdr.peerid);
394			if (nbr == NULL)
395				break;
396
397			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
398			if (lsa == NULL)
399				fatal(NULL);
400			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
401
402			if (!lsa_check(nbr, lsa,
403			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
404				free(lsa);
405				break;
406			}
407
408			v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
409				    lsa->hdr.adv_rtr);
410			if (v == NULL)
411				db_hdr = NULL;
412			else
413				db_hdr = &v->lsa->hdr;
414
415			if (nbr->self) {
416				lsa_merge(nbr, lsa, v);
417				/* lsa_merge frees the right lsa */
418				break;
419			}
420
421			r = lsa_newer(&lsa->hdr, db_hdr);
422			if (r > 0) {
423				/* new LSA newer than DB */
424				if (v && v->flooded &&
425				    v->changed + MIN_LS_ARRIVAL >= now) {
426					free(lsa);
427					break;
428				}
429
430				rde_req_list_del(nbr, &lsa->hdr);
431
432				self = lsa_self(lsa);
433				if (self) {
434					if (v == NULL)
435						/* LSA is no longer announced,
436						 * remove by premature aging. */
437						lsa_flush(nbr, lsa);
438					else
439						lsa_reflood(v, lsa);
440				} else if (lsa_add(nbr, lsa))
441					/* delayed lsa, don't flood yet */
442					break;
443
444				/* flood and perhaps ack LSA */
445				imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD,
446				    imsg.hdr.peerid, 0, -1, lsa,
447				    ntohs(lsa->hdr.len));
448
449				/* reflood self originated LSA */
450				if (self && v)
451					imsg_compose_event(iev_ospfe,
452					    IMSG_LS_FLOOD,
453					    v->peerid, 0, -1, v->lsa,
454					    ntohs(v->lsa->hdr.len));
455				/* new LSA was not added so free it */
456				if (self)
457					free(lsa);
458			} else if (r < 0) {
459				/* lsa no longer needed */
460				free(lsa);
461
462				/*
463				 * point 6 of "The Flooding Procedure"
464				 * We are violating the RFC here because
465				 * it does not make sense to reset a session
466				 * because an equal LSA is already in the table.
467				 * Only if the LSA sent is older than the one
468				 * in the table we should reset the session.
469				 */
470				if (rde_req_list_exists(nbr, &lsa->hdr)) {
471					imsg_compose_event(iev_ospfe,
472					    IMSG_LS_BADREQ,
473					    imsg.hdr.peerid, 0, -1, NULL, 0);
474					break;
475				}
476
477				/* new LSA older than DB */
478				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
479				    ntohs(db_hdr->age) == MAX_AGE)
480					/* seq-num wrap */
481					break;
482
483				if (v->changed + MIN_LS_ARRIVAL >= now)
484					break;
485
486				/* directly send current LSA, no ack */
487				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
488				    imsg.hdr.peerid, 0, -1, v->lsa,
489				    ntohs(v->lsa->hdr.len));
490			} else {
491				/* LSA equal send direct ack */
492				imsg_compose_event(iev_ospfe, IMSG_LS_ACK,
493				    imsg.hdr.peerid, 0, -1, &lsa->hdr,
494				    sizeof(lsa->hdr));
495				free(lsa);
496			}
497			break;
498		case IMSG_LS_MAXAGE:
499			nbr = rde_nbr_find(imsg.hdr.peerid);
500			if (nbr == NULL)
501				break;
502
503			if (imsg.hdr.len != IMSG_HEADER_SIZE +
504			    sizeof(struct lsa_hdr))
505				fatalx("invalid size of OE request");
506			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
507
508			if (rde_nbr_loading(nbr->area))
509				break;
510
511			v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
512				    lsa_hdr.adv_rtr);
513			if (v == NULL)
514				db_hdr = NULL;
515			else
516				db_hdr = &v->lsa->hdr;
517
518			/*
519			 * only delete LSA if the one in the db is not newer
520			 */
521			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
522				lsa_del(nbr, &lsa_hdr);
523			break;
524		case IMSG_CTL_SHOW_DATABASE:
525		case IMSG_CTL_SHOW_DB_EXT:
526		case IMSG_CTL_SHOW_DB_LINK:
527		case IMSG_CTL_SHOW_DB_NET:
528		case IMSG_CTL_SHOW_DB_RTR:
529		case IMSG_CTL_SHOW_DB_INTRA:
530		case IMSG_CTL_SHOW_DB_SELF:
531		case IMSG_CTL_SHOW_DB_SUM:
532		case IMSG_CTL_SHOW_DB_ASBR:
533			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
534			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
535				log_warnx("rde_dispatch_imsg: wrong imsg len");
536				break;
537			}
538			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
539				LIST_FOREACH(area, &rdeconf->area_list, entry) {
540					rde_dump_area(area, imsg.hdr.type,
541					    imsg.hdr.pid);
542				}
543				lsa_dump(&asext_tree, imsg.hdr.type,
544				    imsg.hdr.pid);
545			} else {
546				memcpy(&aid, imsg.data, sizeof(aid));
547				if ((area = area_find(rdeconf, aid)) != NULL) {
548					rde_dump_area(area, imsg.hdr.type,
549					    imsg.hdr.pid);
550					if (!area->stub)
551						lsa_dump(&asext_tree,
552						    imsg.hdr.type,
553						    imsg.hdr.pid);
554				}
555			}
556			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
557			    imsg.hdr.pid, -1, NULL, 0);
558			break;
559		case IMSG_CTL_SHOW_RIB:
560			LIST_FOREACH(area, &rdeconf->area_list, entry) {
561				imsg_compose_event(iev_ospfe, IMSG_CTL_AREA,
562				    0, imsg.hdr.pid, -1, area, sizeof(*area));
563
564				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
565				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
566			}
567			aid.s_addr = 0;
568			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
569
570			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
571			    imsg.hdr.pid, -1, NULL, 0);
572			break;
573		case IMSG_CTL_SHOW_SUM:
574			rde_send_summary(imsg.hdr.pid);
575			LIST_FOREACH(area, &rdeconf->area_list, entry)
576				rde_send_summary_area(area, imsg.hdr.pid);
577			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
578			    imsg.hdr.pid, -1, NULL, 0);
579			break;
580		case IMSG_IFINFO:
581			if (imsg.hdr.len != IMSG_HEADER_SIZE +
582			    sizeof(struct iface))
583				fatalx("IFINFO imsg with wrong len");
584
585			ifp = imsg.data;
586
587			iface = if_find(ifp->ifindex);
588			if (iface == NULL)
589				fatalx("interface lost in rde");
590			iface->flags = ifp->flags;
591			iface->linkstate = ifp->linkstate;
592			iface->nh_reachable = ifp->nh_reachable;
593			if (iface->state != ifp->state) {
594				iface->state = ifp->state;
595				area = area_find(rdeconf, iface->area_id);
596				if (!area)
597					fatalx("interface lost area");
598				orig_intra_area_prefix_lsas(area);
599			}
600			break;
601		case IMSG_CTL_LOG_VERBOSE:
602			/* already checked by ospfe */
603			memcpy(&verbose, imsg.data, sizeof(verbose));
604			log_verbose(verbose);
605			break;
606		default:
607			log_debug("rde_dispatch_imsg: unexpected imsg %d",
608			    imsg.hdr.type);
609			break;
610		}
611		imsg_free(&imsg);
612	}
613	if (!shut)
614		imsg_event_add(iev);
615	else {
616		/* this pipe is dead, so remove the event handler */
617		event_del(&iev->ev);
618		event_loopexit(NULL);
619	}
620}
621
622/* ARGSUSED */
623void
624rde_dispatch_parent(int fd, short event, void *bula)
625{
626	static struct area	*narea;
627	struct iface		*niface, *iface;
628	struct imsg		 imsg;
629	struct kroute		 kr;
630	struct rroute		 rr;
631	struct imsgev		*iev = bula;
632	struct imsgbuf		*ibuf = &iev->ibuf;
633	struct lsa		*lsa;
634	struct vertex		*v;
635	struct rt_node		*rn;
636	ssize_t			 n;
637	int			 shut = 0;
638	unsigned int		 ifindex;
639
640	if (event & EV_READ) {
641		if ((n = imsg_read(ibuf)) == -1)
642			fatal("imsg_read error");
643		if (n == 0)	/* connection closed */
644			shut = 1;
645	}
646	if (event & EV_WRITE) {
647		if (msgbuf_write(&ibuf->w) == -1)
648			fatal("msgbuf_write");
649	}
650
651	for (;;) {
652		if ((n = imsg_get(ibuf, &imsg)) == -1)
653			fatal("rde_dispatch_parent: imsg_read error");
654		if (n == 0)
655			break;
656
657		switch (imsg.hdr.type) {
658		case IMSG_NETWORK_ADD:
659			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
660				log_warnx("rde_dispatch_parent: "
661				    "wrong imsg len");
662				break;
663			}
664			memcpy(&rr, imsg.data, sizeof(rr));
665
666			if ((lsa = rde_asext_get(&rr)) != NULL) {
667				v = lsa_find(NULL, lsa->hdr.type,
668				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
669
670				lsa_merge(nbrself, lsa, v);
671			}
672			break;
673		case IMSG_NETWORK_DEL:
674			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
675				log_warnx("rde_dispatch_parent: "
676				    "wrong imsg len");
677				break;
678			}
679			memcpy(&rr, imsg.data, sizeof(rr));
680
681			if ((lsa = rde_asext_put(&rr)) != NULL) {
682				v = lsa_find(NULL, lsa->hdr.type,
683				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
684
685				/*
686				 * if v == NULL no LSA is in the table and
687				 * nothing has to be done.
688				 */
689				if (v)
690					lsa_merge(nbrself, lsa, v);
691			}
692			break;
693		case IMSG_KROUTE_GET:
694			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
695				log_warnx("rde_dispatch_parent: "
696				    "wrong imsg len");
697				break;
698			}
699			memcpy(&kr, imsg.data, sizeof(kr));
700
701			if ((rn = rt_find(&kr.prefix, kr.prefixlen,
702			    DT_NET)) != NULL)
703				rde_send_change_kroute(rn);
704			else
705				/* should not happen */
706				imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0,
707				    0, -1, &kr, sizeof(kr));
708			break;
709		case IMSG_IFADD:
710			if ((niface = malloc(sizeof(struct iface))) == NULL)
711				fatal(NULL);
712			memcpy(niface, imsg.data, sizeof(struct iface));
713
714			LIST_INIT(&niface->nbr_list);
715			TAILQ_INIT(&niface->ls_ack_list);
716			RB_INIT(&niface->lsa_tree);
717
718			narea = area_find(rdeconf, niface->area_id);
719			LIST_INSERT_HEAD(&narea->iface_list, niface, entry);
720			break;
721		case IMSG_IFDELETE:
722			if (imsg.hdr.len != IMSG_HEADER_SIZE +
723			    sizeof(ifindex))
724				fatalx("IFDELETE imsg with wrong len");
725
726			memcpy(&ifindex, imsg.data, sizeof(ifindex));
727			iface = if_find(ifindex);
728			if (iface == NULL)
729				fatalx("interface lost in ospfe");
730
731			LIST_REMOVE(iface, entry);
732			if_del(iface);
733			break;
734		case IMSG_RECONF_CONF:
735			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
736			    NULL)
737				fatal(NULL);
738			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
739
740			LIST_INIT(&nconf->area_list);
741			LIST_INIT(&nconf->cand_list);
742			break;
743		case IMSG_RECONF_AREA:
744			if ((narea = area_new()) == NULL)
745				fatal(NULL);
746			memcpy(narea, imsg.data, sizeof(struct area));
747
748			LIST_INIT(&narea->iface_list);
749			LIST_INIT(&narea->nbr_list);
750			RB_INIT(&narea->lsa_tree);
751
752			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
753			break;
754		case IMSG_RECONF_END:
755			merge_config(rdeconf, nconf);
756			nconf = NULL;
757			break;
758		default:
759			log_debug("rde_dispatch_parent: unexpected imsg %d",
760			    imsg.hdr.type);
761			break;
762		}
763		imsg_free(&imsg);
764	}
765	if (!shut)
766		imsg_event_add(iev);
767	else {
768		/* this pipe is dead, so remove the event handler */
769		event_del(&iev->ev);
770		event_loopexit(NULL);
771	}
772}
773
774void
775rde_dump_area(struct area *area, int imsg_type, pid_t pid)
776{
777	struct iface	*iface;
778
779	/* dump header */
780	imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1,
781	    area, sizeof(*area));
782
783	/* dump link local lsa */
784	LIST_FOREACH(iface, &area->iface_list, entry) {
785		imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE,
786		    0, pid, -1, iface, sizeof(*iface));
787		lsa_dump(&iface->lsa_tree, imsg_type, pid);
788	}
789
790	/* dump area lsa */
791	lsa_dump(&area->lsa_tree, imsg_type, pid);
792}
793
794u_int32_t
795rde_router_id(void)
796{
797	return (rdeconf->rtr_id.s_addr);
798}
799
800void
801rde_send_change_kroute(struct rt_node *r)
802{
803	struct kroute		 kr;
804	struct rt_nexthop	*rn;
805
806	TAILQ_FOREACH(rn, &r->nexthop, entry) {
807		if (!rn->invalid)
808			break;
809	}
810	if (!rn)
811		fatalx("rde_send_change_kroute: no valid nexthop found");
812
813	bzero(&kr, sizeof(kr));
814	kr.prefix = r->prefix;
815	kr.nexthop = rn->nexthop;
816	if (IN6_IS_ADDR_LINKLOCAL(&rn->nexthop) ||
817	    IN6_IS_ADDR_MC_LINKLOCAL(&rn->nexthop))
818		kr.scope = rn->ifindex;
819	kr.prefixlen = r->prefixlen;
820	kr.ext_tag = r->ext_tag;
821
822	imsg_compose_event(iev_main, IMSG_KROUTE_CHANGE, 0, 0, -1,
823	    &kr, sizeof(kr));
824}
825
826void
827rde_send_delete_kroute(struct rt_node *r)
828{
829	struct kroute	 kr;
830
831	bzero(&kr, sizeof(kr));
832	kr.prefix = r->prefix;
833	kr.prefixlen = r->prefixlen;
834
835	imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1,
836	    &kr, sizeof(kr));
837}
838
839void
840rde_send_summary(pid_t pid)
841{
842	static struct ctl_sum	 sumctl;
843	struct timeval		 now;
844	struct area		*area;
845	struct vertex		*v;
846
847	bzero(&sumctl, sizeof(struct ctl_sum));
848
849	sumctl.rtr_id.s_addr = rde_router_id();
850	sumctl.spf_delay = rdeconf->spf_delay;
851	sumctl.spf_hold_time = rdeconf->spf_hold_time;
852
853	LIST_FOREACH(area, &rdeconf->area_list, entry)
854		sumctl.num_area++;
855
856	RB_FOREACH(v, lsa_tree, &asext_tree)
857		sumctl.num_ext_lsa++;
858
859	gettimeofday(&now, NULL);
860	if (rdeconf->uptime < now.tv_sec)
861		sumctl.uptime = now.tv_sec - rdeconf->uptime;
862	else
863		sumctl.uptime = 0;
864
865	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
866	    sizeof(sumctl));
867}
868
869void
870rde_send_summary_area(struct area *area, pid_t pid)
871{
872	static struct ctl_sum_area	 sumareactl;
873	struct iface			*iface;
874	struct rde_nbr			*nbr;
875	struct lsa_tree			*tree = &area->lsa_tree;
876	struct vertex			*v;
877
878	bzero(&sumareactl, sizeof(struct ctl_sum_area));
879
880	sumareactl.area.s_addr = area->id.s_addr;
881	sumareactl.num_spf_calc = area->num_spf_calc;
882
883	LIST_FOREACH(iface, &area->iface_list, entry)
884		sumareactl.num_iface++;
885
886	LIST_FOREACH(nbr, &area->nbr_list, entry)
887		if (nbr->state == NBR_STA_FULL && !nbr->self)
888			sumareactl.num_adj_nbr++;
889
890	RB_FOREACH(v, lsa_tree, tree)
891		sumareactl.num_lsa++;
892
893	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
894	    sizeof(sumareactl));
895}
896
897LIST_HEAD(rde_nbr_head, rde_nbr);
898
899struct nbr_table {
900	struct rde_nbr_head	*hashtbl;
901	u_int32_t		 hashmask;
902} rdenbrtable;
903
904#define RDE_NBR_HASH(x)		\
905	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
906
907void
908rde_nbr_init(u_int32_t hashsize)
909{
910	struct rde_nbr_head	*head;
911	u_int32_t		 hs, i;
912
913	for (hs = 1; hs < hashsize; hs <<= 1)
914		;
915	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
916	if (rdenbrtable.hashtbl == NULL)
917		fatal("rde_nbr_init");
918
919	for (i = 0; i < hs; i++)
920		LIST_INIT(&rdenbrtable.hashtbl[i]);
921
922	rdenbrtable.hashmask = hs - 1;
923
924	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
925		fatal("rde_nbr_init");
926
927	nbrself->id.s_addr = rde_router_id();
928	nbrself->peerid = NBR_IDSELF;
929	nbrself->state = NBR_STA_DOWN;
930	nbrself->self = 1;
931	head = RDE_NBR_HASH(NBR_IDSELF);
932	LIST_INSERT_HEAD(head, nbrself, hash);
933}
934
935void
936rde_nbr_free(void)
937{
938	free(nbrself);
939	free(rdenbrtable.hashtbl);
940}
941
942struct rde_nbr *
943rde_nbr_find(u_int32_t peerid)
944{
945	struct rde_nbr_head	*head;
946	struct rde_nbr		*nbr;
947
948	head = RDE_NBR_HASH(peerid);
949
950	LIST_FOREACH(nbr, head, hash) {
951		if (nbr->peerid == peerid)
952			return (nbr);
953	}
954
955	return (NULL);
956}
957
958struct rde_nbr *
959rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
960{
961	struct rde_nbr_head	*head;
962	struct rde_nbr		*nbr;
963	struct area		*area;
964	struct iface		*iface;
965
966	if (rde_nbr_find(peerid))
967		return (NULL);
968	if ((area = area_find(rdeconf, new->area_id)) == NULL)
969		fatalx("rde_nbr_new: unknown area");
970
971	LIST_FOREACH(iface, &area->iface_list, entry) {
972		if (iface->ifindex == new->ifindex)
973			break;
974	}
975	if (iface == NULL)
976		fatalx("rde_nbr_new: unknown interface");
977
978	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
979		fatal("rde_nbr_new");
980
981	memcpy(nbr, new, sizeof(*nbr));
982	nbr->peerid = peerid;
983	nbr->area = area;
984	nbr->iface = iface;
985
986	TAILQ_INIT(&nbr->req_list);
987
988	head = RDE_NBR_HASH(peerid);
989	LIST_INSERT_HEAD(head, nbr, hash);
990	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
991
992	return (nbr);
993}
994
995void
996rde_nbr_del(struct rde_nbr *nbr)
997{
998	if (nbr == NULL)
999		return;
1000
1001	rde_req_list_free(nbr);
1002
1003	LIST_REMOVE(nbr, entry);
1004	LIST_REMOVE(nbr, hash);
1005
1006	free(nbr);
1007}
1008
1009int
1010rde_nbr_loading(struct area *area)
1011{
1012	struct rde_nbr		*nbr;
1013	int			 checkall = 0;
1014
1015	if (area == NULL) {
1016		area = LIST_FIRST(&rdeconf->area_list);
1017		checkall = 1;
1018	}
1019
1020	while (area != NULL) {
1021		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1022			if (nbr->self)
1023				continue;
1024			if (nbr->state & NBR_STA_XCHNG ||
1025			    nbr->state & NBR_STA_LOAD)
1026				return (1);
1027		}
1028		if (!checkall)
1029			break;
1030		area = LIST_NEXT(area, entry);
1031	}
1032
1033	return (0);
1034}
1035
1036struct rde_nbr *
1037rde_nbr_self(struct area *area)
1038{
1039	struct rde_nbr		*nbr;
1040
1041	LIST_FOREACH(nbr, &area->nbr_list, entry)
1042		if (nbr->self)
1043			return (nbr);
1044
1045	/* this may not happen */
1046	fatalx("rde_nbr_self: area without self");
1047	return (NULL);
1048}
1049
1050/*
1051 * LSA req list
1052 */
1053void
1054rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
1055{
1056	struct rde_req_entry	*le;
1057
1058	if ((le = calloc(1, sizeof(*le))) == NULL)
1059		fatal("rde_req_list_add");
1060
1061	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
1062	le->type = lsa->type;
1063	le->ls_id = lsa->ls_id;
1064	le->adv_rtr = lsa->adv_rtr;
1065}
1066
1067int
1068rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1069{
1070	struct rde_req_entry	*le;
1071
1072	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1073		if ((lsa_hdr->type == le->type) &&
1074		    (lsa_hdr->ls_id == le->ls_id) &&
1075		    (lsa_hdr->adv_rtr == le->adv_rtr))
1076			return (1);
1077	}
1078	return (0);
1079}
1080
1081void
1082rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1083{
1084	struct rde_req_entry	*le;
1085
1086	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1087		if ((lsa_hdr->type == le->type) &&
1088		    (lsa_hdr->ls_id == le->ls_id) &&
1089		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1090			TAILQ_REMOVE(&nbr->req_list, le, entry);
1091			free(le);
1092			return;
1093		}
1094	}
1095}
1096
1097void
1098rde_req_list_free(struct rde_nbr *nbr)
1099{
1100	struct rde_req_entry	*le;
1101
1102	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1103		TAILQ_REMOVE(&nbr->req_list, le, entry);
1104		free(le);
1105	}
1106}
1107
1108/*
1109 * as-external LSA handling
1110 */
1111struct lsa *
1112rde_asext_get(struct rroute *rr)
1113{
1114	struct area		*area;
1115	struct iface		*iface;
1116	struct iface_addr	*ia;
1117	struct in6_addr		 addr;
1118
1119	LIST_FOREACH(area, &rdeconf->area_list, entry)
1120		LIST_FOREACH(iface, &area->iface_list, entry)
1121			TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1122				if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1123					continue;
1124
1125				inet6applymask(&addr, &ia->addr,
1126				    rr->kr.prefixlen);
1127				if (!memcmp(&addr, &rr->kr.prefix,
1128				    sizeof(addr)) && rr->kr.prefixlen ==
1129				    ia->prefixlen) {
1130					/* already announced as Prefix LSA */
1131					log_debug("rde_asext_get: %s/%d is "
1132					    "part of prefix LSA",
1133					    log_in6addr(&rr->kr.prefix),
1134					    rr->kr.prefixlen);
1135					return (NULL);
1136				}
1137			}
1138
1139	/* update of seqnum is done by lsa_merge */
1140	return (orig_asext_lsa(rr, DEFAULT_AGE));
1141}
1142
1143struct lsa *
1144rde_asext_put(struct rroute *rr)
1145{
1146	/*
1147	 * just try to remove the LSA. If the prefix is announced as
1148	 * stub net LSA lsa_find() will fail later and nothing will happen.
1149	 */
1150
1151	/* remove by reflooding with MAX_AGE */
1152	return (orig_asext_lsa(rr, MAX_AGE));
1153}
1154
1155/*
1156 * summary LSA stuff
1157 */
1158void
1159rde_summary_update(struct rt_node *rte, struct area *area)
1160{
1161	struct vertex		*v = NULL;
1162//XXX	struct lsa		*lsa;
1163	u_int16_t		 type = 0;
1164
1165	/* first check if we actually need to announce this route */
1166	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1167		return;
1168	/* never create summaries for as-ext LSA */
1169	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1170		return;
1171	/* no need for summary LSA in the originating area */
1172	if (rte->area.s_addr == area->id.s_addr)
1173		return;
1174	/* no need to originate inter-area routes to the backbone */
1175	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1176		return;
1177	/* TODO nexthop check, nexthop part of area -> no summary */
1178	if (rte->cost >= LS_INFINITY)
1179		return;
1180	/* TODO AS border router specific checks */
1181	/* TODO inter-area network route stuff */
1182	/* TODO intra-area stuff -- condense LSA ??? */
1183
1184	if (rte->d_type == DT_NET) {
1185		type = LSA_TYPE_INTER_A_PREFIX;
1186	} else if (rte->d_type == DT_RTR) {
1187		type = LSA_TYPE_INTER_A_ROUTER;
1188	} else
1189
1190#if 0 /* XXX a lot todo */
1191	/* update lsa but only if it was changed */
1192	v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1193	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1194	lsa_merge(rde_nbr_self(area), lsa, v);
1195
1196	if (v == NULL)
1197		v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1198#endif
1199
1200	/* suppressed/deleted routes are not found in the second lsa_find */
1201	if (v)
1202		v->cost = rte->cost;
1203}
1204
1205/*
1206 * Functions for self-originated LSAs
1207 */
1208
1209/* Prefix LSAs have variable size. We have to be careful to copy the right
1210 * amount of bytes, and to realloc() the right amount of memory. */
1211void
1212append_prefix_lsa(struct lsa **lsa, u_int16_t *len, struct lsa_prefix *prefix)
1213{
1214	struct lsa_prefix	*copy;
1215	unsigned int		 lsa_prefix_len;
1216	unsigned int		 new_len;
1217	char  			*new_lsa;
1218
1219	lsa_prefix_len = sizeof(struct lsa_prefix)
1220	    + LSA_PREFIXSIZE(prefix->prefixlen);
1221
1222	new_len = *len + lsa_prefix_len;
1223
1224	/* Make sure we have enough space for this prefix. */
1225	if ((new_lsa = realloc(*lsa, new_len)) == NULL)
1226		fatalx("append_prefix_lsa");
1227
1228	/* Append prefix to LSA. */
1229	copy = (struct lsa_prefix *)(new_lsa + *len);
1230	memcpy(copy, prefix, lsa_prefix_len);
1231	copy->metric = 0;
1232
1233	*lsa = (struct lsa *)new_lsa;
1234	*len = new_len;
1235}
1236
1237int
1238prefix_compare(struct prefix_node *a, struct prefix_node *b)
1239{
1240	struct lsa_prefix	*p;
1241	struct lsa_prefix	*q;
1242	int		 	 i;
1243	int			 len;
1244
1245	p = a->prefix;
1246	q = b->prefix;
1247
1248	len = MIN(LSA_PREFIXSIZE(p->prefixlen), LSA_PREFIXSIZE(q->prefixlen));
1249
1250	i = memcmp(p + 1, q + 1, len);
1251	if (i)
1252		return (i);
1253	if (p->prefixlen < q->prefixlen)
1254		return (-1);
1255	if (p->prefixlen > q->prefixlen)
1256		return (1);
1257	return (0);
1258}
1259
1260void
1261prefix_tree_add(struct prefix_tree *tree, struct lsa_link *lsa)
1262{
1263	struct prefix_node	*old;
1264	struct prefix_node	*new;
1265	struct in6_addr		 addr;
1266	unsigned int		 len;
1267	unsigned int		 i;
1268	char			*cur_prefix;
1269
1270	cur_prefix = (char *)(lsa + 1);
1271
1272	for (i = 0; i < ntohl(lsa->numprefix); i++) {
1273		if ((new = calloc(1, sizeof(*new))) == NULL)
1274			fatal("prefix_tree_add");
1275		new->prefix = (struct lsa_prefix *)cur_prefix;
1276
1277		len = sizeof(*new->prefix)
1278		    + LSA_PREFIXSIZE(new->prefix->prefixlen);
1279
1280		bzero(&addr, sizeof(addr));
1281		memcpy(&addr, new->prefix + 1,
1282		    LSA_PREFIXSIZE(new->prefix->prefixlen));
1283
1284		if (!(IN6_IS_ADDR_LINKLOCAL(&addr)) &&
1285		    (new->prefix->options & OSPF_PREFIX_NU) == 0 &&
1286		    (new->prefix->options & OSPF_PREFIX_LA) == 0) {
1287			old = RB_INSERT(prefix_tree, tree, new);
1288			if (old != NULL) {
1289				old->prefix->options |= new->prefix->options;
1290				free(new);
1291			}
1292		}
1293
1294		cur_prefix = cur_prefix + len;
1295	}
1296}
1297
1298RB_GENERATE(prefix_tree, prefix_node, entry, prefix_compare)
1299
1300/* Return non-zero if Link LSA was originated from an adjacent neighbor. */
1301int
1302link_lsa_from_full_nbr(struct lsa *lsa, struct iface *iface)
1303{
1304	struct rde_nbr	*nbr;
1305	struct area	*area;
1306
1307	if ((area = area_find(rdeconf, iface->area_id)) == NULL)
1308		fatalx("interface lost area");
1309
1310	LIST_FOREACH(nbr, &area->nbr_list, entry) {
1311		if (nbr->self || nbr->iface->ifindex != iface->ifindex)
1312			continue;
1313		if (lsa->hdr.adv_rtr == nbr->id.s_addr)
1314			break;
1315	}
1316	if (!nbr)
1317		return 0;
1318
1319	if (nbr->state & NBR_STA_FULL &&
1320	    ntohl(lsa->hdr.ls_id) == nbr->iface_id)
1321		return 1;
1322
1323	return 0;
1324}
1325
1326struct lsa *
1327orig_intra_lsa_net(struct iface *iface, struct vertex *old)
1328{
1329	struct lsa		*lsa;
1330	struct vertex		*v;
1331	struct area		*area;
1332	struct prefix_node	*node;
1333	struct prefix_tree	 tree;
1334	u_int16_t		 len;
1335	u_int16_t		 numprefix;
1336
1337	if ((area = area_find(rdeconf, iface->area_id)) == NULL)
1338		fatalx("interface lost area");
1339
1340	log_debug("orig_intra_lsa_net: area %s, interface %s",
1341	    inet_ntoa(area->id), iface->name);
1342
1343	RB_INIT(&tree);
1344
1345	if (iface->state & IF_STA_DR) {
1346		RB_FOREACH(v, lsa_tree, &iface->lsa_tree) {
1347			if (v->type != LSA_TYPE_LINK)
1348				continue;
1349			if (link_lsa_from_full_nbr(v->lsa, iface))
1350				prefix_tree_add(&tree, &v->lsa->data.link);
1351		}
1352		if (RB_EMPTY(&tree)) {
1353			/* There are no adjacent neighbors on link.
1354			 * If a copy of this LSA already exists in DB,
1355			 * it needs to be flushed. orig_intra_lsa_rtr()
1356			 * will take care of prefixes configured on
1357			 * this interface. */
1358			if (!old)
1359				return NULL;
1360		} else {
1361			/* Add our own prefixes configured for this link. */
1362			v = lsa_find(iface, htons(LSA_TYPE_LINK),
1363			    htonl(iface->ifindex), rde_router_id());
1364			if (v)
1365				prefix_tree_add(&tree, &v->lsa->data.link);
1366		}
1367	/* Continue only if a copy of this LSA already exists in DB.
1368	 * It needs to be flushed. */
1369	} else if (!old)
1370		return NULL;
1371
1372	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1373	if ((lsa = calloc(1, len)) == NULL)
1374		fatal("orig_intra_lsa_net");
1375
1376	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_NETWORK);
1377	lsa->data.pref_intra.ref_ls_id = htonl(iface->ifindex);
1378	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1379
1380	numprefix = 0;
1381	RB_FOREACH(node, prefix_tree, &tree) {
1382		append_prefix_lsa(&lsa, &len, node->prefix);
1383		numprefix++;
1384	}
1385
1386	lsa->data.pref_intra.numprefix = htons(numprefix);
1387
1388	while (!RB_EMPTY(&tree))
1389		free(RB_REMOVE(prefix_tree, &tree, RB_ROOT(&tree)));
1390
1391	/* LSA header */
1392	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1393	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1394	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1395	lsa->hdr.ls_id = htonl(iface->ifindex);
1396	lsa->hdr.adv_rtr = rde_router_id();
1397	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1398	lsa->hdr.len = htons(len);
1399	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1400
1401	return lsa;
1402}
1403
1404struct lsa *
1405orig_intra_lsa_rtr(struct area *area, struct vertex *old)
1406{
1407	char			lsa_prefix_buf[sizeof(struct lsa_prefix)
1408				    + sizeof(struct in6_addr)];
1409	struct lsa		*lsa;
1410	struct lsa_prefix	*lsa_prefix;
1411	struct in6_addr		*prefix;
1412	struct iface		*iface;
1413	struct iface_addr	*ia;
1414	struct rde_nbr		*nbr;
1415	u_int16_t		 len;
1416	u_int16_t		 numprefix;
1417
1418	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1419	if ((lsa = calloc(1, len)) == NULL)
1420		fatal("orig_intra_lsa_rtr");
1421
1422	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_ROUTER);
1423	lsa->data.pref_intra.ref_ls_id = 0;
1424	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1425
1426	log_debug("orig_intra_lsa_rtr: area %s", inet_ntoa(area->id));
1427
1428	numprefix = 0;
1429	LIST_FOREACH(iface, &area->iface_list, entry) {
1430		if (iface->state & IF_STA_DOWN)
1431			continue;
1432
1433		/* Broadcast links with adjacencies are handled
1434		 * by orig_intra_lsa_net(), ignore. */
1435		if (iface->type == IF_TYPE_BROADCAST ||
1436		    iface->type == IF_TYPE_NBMA) {
1437			if (iface->state & IF_STA_WAITING)
1438				/* Skip, we're still waiting for
1439				 * adjacencies to form. */
1440				continue;
1441
1442			LIST_FOREACH(nbr, &area->nbr_list, entry)
1443				if (!nbr->self &&
1444				    nbr->iface->ifindex == iface->ifindex &&
1445				    nbr->state & NBR_STA_FULL)
1446					break;
1447			if (nbr)
1448				continue;
1449		}
1450
1451		lsa_prefix = (struct lsa_prefix *)lsa_prefix_buf;
1452
1453		TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1454			if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1455				continue;
1456
1457			bzero(lsa_prefix_buf, sizeof(lsa_prefix_buf));
1458
1459			if (iface->type == IF_TYPE_POINTOMULTIPOINT ||
1460			    iface->state & IF_STA_LOOPBACK) {
1461				lsa_prefix->prefixlen = 128;
1462			} else {
1463				lsa_prefix->prefixlen = ia->prefixlen;
1464				lsa_prefix->metric = htons(iface->metric);
1465			}
1466
1467			if (lsa_prefix->prefixlen == 128)
1468				lsa_prefix->options |= OSPF_PREFIX_LA;
1469
1470			prefix = (struct in6_addr *)(lsa_prefix + 1);
1471			inet6applymask(prefix, &ia->addr,
1472			    lsa_prefix->prefixlen);
1473			append_prefix_lsa(&lsa, &len, lsa_prefix);
1474			numprefix++;
1475		}
1476
1477		/* TOD: Add prefixes of directly attached hosts, too */
1478		/* TOD: Add prefixes for virtual links */
1479	}
1480
1481	/* If no prefixes were included, continue only if a copy of this
1482	 * LSA already exists in DB. It needs to be flushed. */
1483	if (numprefix == 0 && !old) {
1484		free(lsa);
1485		return NULL;
1486	}
1487
1488	lsa->data.pref_intra.numprefix = htons(numprefix);
1489
1490	/* LSA header */
1491	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1492	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1493	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1494	lsa->hdr.ls_id = htonl(LS_ID_INTRA_RTR);
1495	lsa->hdr.adv_rtr = rde_router_id();
1496	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1497	lsa->hdr.len = htons(len);
1498	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1499
1500	return lsa;
1501}
1502
1503void
1504orig_intra_area_prefix_lsas(struct area *area)
1505{
1506	struct lsa	*lsa;
1507	struct vertex	*old;
1508	struct iface	*iface;
1509
1510	LIST_FOREACH(iface, &area->iface_list, entry) {
1511		if (iface->type == IF_TYPE_BROADCAST ||
1512		    iface->type == IF_TYPE_NBMA) {
1513			old = lsa_find(iface, htons(LSA_TYPE_INTRA_A_PREFIX),
1514			    htonl(iface->ifindex), rde_router_id());
1515			lsa = orig_intra_lsa_net(iface, old);
1516			if (lsa)
1517				lsa_merge(rde_nbr_self(area), lsa, old);
1518		}
1519	}
1520
1521	old = lsa_find_tree(&area->lsa_tree, htons(LSA_TYPE_INTRA_A_PREFIX),
1522		htonl(LS_ID_INTRA_RTR), rde_router_id());
1523	lsa = orig_intra_lsa_rtr(area, old);
1524	if (lsa)
1525		lsa_merge(rde_nbr_self(area), lsa, old);
1526}
1527
1528int
1529comp_asext(struct lsa *a, struct lsa *b)
1530{
1531	/* compare prefixes, if they are equal or not */
1532	if (a->data.asext.prefix.prefixlen != b->data.asext.prefix.prefixlen)
1533		return (-1);
1534	return (memcmp(
1535	    (char *)a + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1536	    (char *)b + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1537	    LSA_PREFIXSIZE(a->data.asext.prefix.prefixlen)));
1538}
1539
1540struct lsa *
1541orig_asext_lsa(struct rroute *rr, u_int16_t age)
1542{
1543	struct lsa	*lsa;
1544	u_int32_t	 ext_tag;
1545	u_int16_t	 len, ext_off = 0;
1546
1547	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext) +
1548	    LSA_PREFIXSIZE(rr->kr.prefixlen);
1549
1550	/*
1551	 * nexthop -- on connected routes we are the nexthop,
1552	 * on all other cases we should announce the true nexthop
1553	 * unless that nexthop is outside of the ospf cloud.
1554	 * XXX for now we don't do this.
1555	 */
1556
1557	if (rr->kr.ext_tag) {
1558		ext_off = len;
1559		len += sizeof(ext_tag);
1560	}
1561	if ((lsa = calloc(1, len)) == NULL)
1562		fatal("orig_asext_lsa");
1563
1564	log_debug("orig_asext_lsa: %s/%d age %d",
1565	    log_in6addr(&rr->kr.prefix), rr->kr.prefixlen, age);
1566
1567	/* LSA header */
1568	lsa->hdr.age = htons(age);
1569	lsa->hdr.type = htons(LSA_TYPE_EXTERNAL);
1570	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1571	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1572	lsa->hdr.len = htons(len);
1573
1574	lsa->data.asext.metric = htonl(rr->metric);
1575	lsa->data.asext.prefix.prefixlen = rr->kr.prefixlen;
1576	memcpy((char *)lsa + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1577	    &rr->kr.prefix, LSA_PREFIXSIZE(rr->kr.prefixlen));
1578
1579	if (rr->kr.ext_tag) {
1580		lsa->data.asext.prefix.options |= LSA_ASEXT_T_FLAG;
1581		ext_tag = htonl(rr->kr.ext_tag);
1582		memcpy((char *)lsa + ext_off, &ext_tag, sizeof(ext_tag));
1583	}
1584
1585	lsa->hdr.ls_id = lsa_find_lsid(&asext_tree, lsa->hdr.type,
1586	    lsa->hdr.adv_rtr, comp_asext, lsa);
1587	lsa->hdr.ls_chksum = 0;
1588	lsa->hdr.ls_chksum =
1589	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1590
1591	return (lsa);
1592}
1593
1594struct lsa *
1595orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1596{
1597#if 0 /* XXX a lot todo */
1598	struct lsa	*lsa;
1599	u_int16_t	 len;
1600
1601	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1602	if ((lsa = calloc(1, len)) == NULL)
1603		fatal("orig_sum_lsa");
1604
1605	/* LSA header */
1606	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1607	lsa->hdr.type = type;
1608	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1609	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1610	lsa->hdr.len = htons(len);
1611
1612	/* prefix and mask */
1613	/*
1614	 * TODO ls_id must be unique, for overlapping routes this may
1615	 * not be true. In this case a hack needs to be done to
1616	 * make the ls_id unique.
1617	 */
1618	lsa->hdr.ls_id = rte->prefix.s_addr;
1619	if (type == LSA_TYPE_SUM_NETWORK)
1620		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1621	else
1622		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1623
1624	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1625
1626	lsa->hdr.ls_chksum = 0;
1627	lsa->hdr.ls_chksum =
1628	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1629
1630	return (lsa);
1631#endif
1632	return NULL;
1633}
1634