rde.c revision 1.58
1/*	$OpenBSD: rde.c,v 1.58 2011/07/07 17:10:48 claudio Exp $ */
2
3/*
4 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5 * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <sys/types.h>
22#include <sys/socket.h>
23#include <sys/queue.h>
24#include <net/if_types.h>
25#include <netinet/in.h>
26#include <arpa/inet.h>
27#include <err.h>
28#include <errno.h>
29#include <stdlib.h>
30#include <signal.h>
31#include <string.h>
32#include <pwd.h>
33#include <unistd.h>
34#include <event.h>
35
36#include "ospf6.h"
37#include "ospf6d.h"
38#include "ospfe.h"
39#include "log.h"
40#include "rde.h"
41
42void		 rde_sig_handler(int sig, short, void *);
43void		 rde_shutdown(void);
44void		 rde_dispatch_imsg(int, short, void *);
45void		 rde_dispatch_parent(int, short, void *);
46void		 rde_dump_area(struct area *, int, pid_t);
47
48void		 rde_send_summary(pid_t);
49void		 rde_send_summary_area(struct area *, pid_t);
50void		 rde_nbr_init(u_int32_t);
51void		 rde_nbr_free(void);
52struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
53void		 rde_nbr_del(struct rde_nbr *);
54
55void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
56int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
57void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
58void		 rde_req_list_free(struct rde_nbr *);
59
60struct lsa	*rde_asext_get(struct rroute *);
61struct lsa	*rde_asext_put(struct rroute *);
62
63int		 comp_asext(struct lsa *, struct lsa *);
64struct lsa	*orig_asext_lsa(struct rroute *, u_int16_t);
65struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
66struct lsa	*orig_intra_lsa_net(struct area *, struct iface *,
67		 struct vertex *);
68struct lsa	*orig_intra_lsa_rtr(struct area *, struct vertex *);
69void		 append_prefix_lsa(struct lsa **, u_int16_t *,
70		    struct lsa_prefix *);
71
72/* A 32-bit value != any ifindex.
73 * We assume ifindex is bound by [1, USHRT_MAX] inclusive. */
74#define	LS_ID_INTRA_RTR	0x01000000
75
76/* Tree of prefixes with global scope on given a link,
77 * see orig_intra_lsa_*() */
78struct prefix_node {
79	RB_ENTRY(prefix_node)	 entry;
80	struct lsa_prefix	*prefix;
81};
82RB_HEAD(prefix_tree, prefix_node);
83RB_PROTOTYPE(prefix_tree, prefix_node, entry, prefix_compare);
84int		 prefix_compare(struct prefix_node *, struct prefix_node *);
85void		 prefix_tree_add(struct prefix_tree *, struct lsa_link *);
86
87struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
88struct imsgev		*iev_ospfe;
89struct imsgev		*iev_main;
90struct rde_nbr		*nbrself;
91struct lsa_tree		 asext_tree;
92
93/* ARGSUSED */
94void
95rde_sig_handler(int sig, short event, void *arg)
96{
97	/*
98	 * signal handler rules don't apply, libevent decouples for us
99	 */
100
101	switch (sig) {
102	case SIGINT:
103	case SIGTERM:
104		rde_shutdown();
105		/* NOTREACHED */
106	default:
107		fatalx("unexpected signal");
108	}
109}
110
111/* route decision engine */
112pid_t
113rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
114    int pipe_parent2ospfe[2])
115{
116	struct event		 ev_sigint, ev_sigterm;
117	struct timeval		 now;
118	struct passwd		*pw;
119	struct redistribute	*r;
120	pid_t			 pid;
121
122	switch (pid = fork()) {
123	case -1:
124		fatal("cannot fork");
125		/* NOTREACHED */
126	case 0:
127		break;
128	default:
129		return (pid);
130	}
131
132	rdeconf = xconf;
133
134	if ((pw = getpwnam(OSPF6D_USER)) == NULL)
135		fatal("getpwnam");
136
137	if (chroot(pw->pw_dir) == -1)
138		fatal("chroot");
139	if (chdir("/") == -1)
140		fatal("chdir(\"/\")");
141
142	setproctitle("route decision engine");
143	ospfd_process = PROC_RDE_ENGINE;
144
145	if (setgroups(1, &pw->pw_gid) ||
146	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
147	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
148		fatal("can't drop privileges");
149
150	event_init();
151	rde_nbr_init(NBR_HASHSIZE);
152	lsa_init(&asext_tree);
153
154	/* setup signal handler */
155	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
156	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
157	signal_add(&ev_sigint, NULL);
158	signal_add(&ev_sigterm, NULL);
159	signal(SIGPIPE, SIG_IGN);
160	signal(SIGHUP, SIG_IGN);
161
162	/* setup pipes */
163	close(pipe_ospfe2rde[0]);
164	close(pipe_parent2rde[0]);
165	close(pipe_parent2ospfe[0]);
166	close(pipe_parent2ospfe[1]);
167
168	if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL ||
169	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
170		fatal(NULL);
171	imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]);
172	iev_ospfe->handler = rde_dispatch_imsg;
173	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
174	iev_main->handler = rde_dispatch_parent;
175
176	/* setup event handler */
177	iev_ospfe->events = EV_READ;
178	event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events,
179	    iev_ospfe->handler, iev_ospfe);
180	event_add(&iev_ospfe->ev, NULL);
181
182	iev_main->events = EV_READ;
183	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
184	    iev_main->handler, iev_main);
185	event_add(&iev_main->ev, NULL);
186
187	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
188	cand_list_init();
189	rt_init();
190
191	while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
192		SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
193		free(r);
194	}
195
196	gettimeofday(&now, NULL);
197	rdeconf->uptime = now.tv_sec;
198
199	event_dispatch();
200
201	rde_shutdown();
202	/* NOTREACHED */
203
204	return (0);
205}
206
207void
208rde_shutdown(void)
209{
210	struct area	*a;
211
212	stop_spf_timer(rdeconf);
213	cand_list_clr();
214	rt_clear();
215
216	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
217		LIST_REMOVE(a, entry);
218		area_del(a);
219	}
220	rde_nbr_free();
221
222	msgbuf_clear(&iev_ospfe->ibuf.w);
223	free(iev_ospfe);
224	msgbuf_clear(&iev_main->ibuf.w);
225	free(iev_main);
226	free(rdeconf);
227
228	log_info("route decision engine exiting");
229	_exit(0);
230}
231
232int
233rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
234    u_int16_t datalen)
235{
236	return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1,
237	    data, datalen));
238}
239
240/* ARGSUSED */
241void
242rde_dispatch_imsg(int fd, short event, void *bula)
243{
244	struct imsgev		*iev = bula;
245	struct imsgbuf		*ibuf = &iev->ibuf;
246	struct imsg		 imsg;
247	struct in_addr		 aid;
248	struct ls_req_hdr	 req_hdr;
249	struct lsa_hdr		 lsa_hdr, *db_hdr;
250	struct rde_nbr		 rn, *nbr;
251	struct timespec		 tp;
252	struct lsa		*lsa;
253	struct area		*area;
254	struct vertex		*v;
255	char			*buf;
256	ssize_t			 n;
257	time_t			 now;
258	int			 r, state, self, shut = 0, verbose;
259	u_int16_t		 l;
260
261	if (event & EV_READ) {
262		if ((n = imsg_read(ibuf)) == -1)
263			fatal("imsg_read error");
264		if (n == 0)	/* connection closed */
265			shut = 1;
266	}
267	if (event & EV_WRITE) {
268		if (msgbuf_write(&ibuf->w) == -1)
269			fatal("msgbuf_write");
270	}
271
272	clock_gettime(CLOCK_MONOTONIC, &tp);
273	now = tp.tv_sec;
274
275	for (;;) {
276		if ((n = imsg_get(ibuf, &imsg)) == -1)
277			fatal("rde_dispatch_imsg: imsg_read error");
278		if (n == 0)
279			break;
280
281		switch (imsg.hdr.type) {
282		case IMSG_NEIGHBOR_UP:
283			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
284				fatalx("invalid size of OE request");
285			memcpy(&rn, imsg.data, sizeof(rn));
286
287			if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL)
288				fatalx("rde_dispatch_imsg: "
289				    "neighbor already exists");
290			break;
291		case IMSG_NEIGHBOR_DOWN:
292			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
293			break;
294		case IMSG_NEIGHBOR_CHANGE:
295			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
296				fatalx("invalid size of OE request");
297			memcpy(&state, imsg.data, sizeof(state));
298
299			nbr = rde_nbr_find(imsg.hdr.peerid);
300			if (nbr == NULL)
301				break;
302
303			if (state != nbr->state &&
304			    (nbr->state & NBR_STA_FULL ||
305			    state & NBR_STA_FULL)) {
306				nbr->state = state;
307				area_track(nbr->area, state);
308				orig_intra_area_prefix_lsas(nbr->area);
309			}
310
311			nbr->state = state;
312			if (nbr->state & NBR_STA_FULL)
313				rde_req_list_free(nbr);
314			break;
315		case IMSG_DB_SNAPSHOT:
316			nbr = rde_nbr_find(imsg.hdr.peerid);
317			if (nbr == NULL)
318				break;
319
320			lsa_snap(nbr, imsg.hdr.peerid);
321
322			imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid,
323			    0, -1, NULL, 0);
324			break;
325		case IMSG_DD:
326			nbr = rde_nbr_find(imsg.hdr.peerid);
327			if (nbr == NULL)
328				break;
329
330			buf = imsg.data;
331			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
332			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
333				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
334				buf += sizeof(lsa_hdr);
335
336				v = lsa_find(nbr->iface, lsa_hdr.type,
337				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
338				if (v == NULL)
339					db_hdr = NULL;
340				else
341					db_hdr = &v->lsa->hdr;
342
343				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
344					/*
345					 * only request LSAs that are
346					 * newer or missing
347					 */
348					rde_req_list_add(nbr, &lsa_hdr);
349					imsg_compose_event(iev_ospfe, IMSG_DD,
350					    imsg.hdr.peerid, 0, -1, &lsa_hdr,
351					    sizeof(lsa_hdr));
352				}
353			}
354			if (l != 0)
355				log_warnx("rde_dispatch_imsg: peerid %lu, "
356				    "trailing garbage in Database Description "
357				    "packet", imsg.hdr.peerid);
358
359			imsg_compose_event(iev_ospfe, IMSG_DD_END,
360			    imsg.hdr.peerid, 0, -1, NULL, 0);
361			break;
362		case IMSG_LS_REQ:
363			nbr = rde_nbr_find(imsg.hdr.peerid);
364			if (nbr == NULL)
365				break;
366
367			buf = imsg.data;
368			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
369			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
370				memcpy(&req_hdr, buf, sizeof(req_hdr));
371				buf += sizeof(req_hdr);
372
373				if ((v = lsa_find(nbr->iface,
374				    req_hdr.type, req_hdr.ls_id,
375				    req_hdr.adv_rtr)) == NULL) {
376					imsg_compose_event(iev_ospfe,
377					    IMSG_LS_BADREQ, imsg.hdr.peerid,
378					    0, -1, NULL, 0);
379					continue;
380				}
381				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
382				    imsg.hdr.peerid, 0, -1, v->lsa,
383				    ntohs(v->lsa->hdr.len));
384			}
385			if (l != 0)
386				log_warnx("rde_dispatch_imsg: peerid %lu, "
387				    "trailing garbage in LS Request "
388				    "packet", imsg.hdr.peerid);
389			break;
390		case IMSG_LS_UPD:
391			nbr = rde_nbr_find(imsg.hdr.peerid);
392			if (nbr == NULL)
393				break;
394
395			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
396			if (lsa == NULL)
397				fatal(NULL);
398			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
399
400			if (!lsa_check(nbr, lsa,
401			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
402				free(lsa);
403				break;
404			}
405
406			v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
407			    lsa->hdr.adv_rtr);
408			if (v == NULL)
409				db_hdr = NULL;
410			else
411				db_hdr = &v->lsa->hdr;
412
413			if (nbr->self) {
414				lsa_merge(nbr, lsa, v);
415				/* lsa_merge frees the right lsa */
416				break;
417			}
418
419			r = lsa_newer(&lsa->hdr, db_hdr);
420			if (r > 0) {
421				/* new LSA newer than DB */
422				if (v && v->flooded &&
423				    v->changed + MIN_LS_ARRIVAL >= now) {
424					free(lsa);
425					break;
426				}
427
428				rde_req_list_del(nbr, &lsa->hdr);
429
430				self = lsa_self(lsa);
431				if (self) {
432					if (v == NULL)
433						/* LSA is no longer announced,
434						 * remove by premature aging. */
435						lsa_flush(nbr, lsa);
436					else
437						lsa_reflood(v, lsa);
438				} else if (lsa_add(nbr, lsa))
439					/* delayed lsa, don't flood yet */
440					break;
441
442				/* flood and perhaps ack LSA */
443				imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD,
444				    imsg.hdr.peerid, 0, -1, lsa,
445				    ntohs(lsa->hdr.len));
446
447				/* reflood self originated LSA */
448				if (self && v)
449					imsg_compose_event(iev_ospfe,
450					    IMSG_LS_FLOOD, v->peerid, 0, -1,
451					    v->lsa, ntohs(v->lsa->hdr.len));
452				/* new LSA was not added so free it */
453				if (self)
454					free(lsa);
455			} else if (r < 0) {
456				/*
457				 * point 6 of "The Flooding Procedure"
458				 * We are violating the RFC here because
459				 * it does not make sense to reset a session
460				 * because an equal LSA is already in the table.
461				 * Only if the LSA sent is older than the one
462				 * in the table we should reset the session.
463				 */
464				if (rde_req_list_exists(nbr, &lsa->hdr)) {
465					imsg_compose_event(iev_ospfe,
466					    IMSG_LS_BADREQ, imsg.hdr.peerid,
467					    0, -1, NULL, 0);
468					free(lsa);
469					break;
470				}
471
472				/* lsa no longer needed */
473				free(lsa);
474
475				/* new LSA older than DB */
476				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
477				    ntohs(db_hdr->age) == MAX_AGE)
478					/* seq-num wrap */
479					break;
480
481				if (v->changed + MIN_LS_ARRIVAL >= now)
482					break;
483
484				/* directly send current LSA, no ack */
485				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
486				    imsg.hdr.peerid, 0, -1, v->lsa,
487				    ntohs(v->lsa->hdr.len));
488			} else {
489				/* LSA equal send direct ack */
490				imsg_compose_event(iev_ospfe, IMSG_LS_ACK,
491				    imsg.hdr.peerid, 0, -1, &lsa->hdr,
492				    sizeof(lsa->hdr));
493				free(lsa);
494			}
495			break;
496		case IMSG_LS_MAXAGE:
497			nbr = rde_nbr_find(imsg.hdr.peerid);
498			if (nbr == NULL)
499				break;
500
501			if (imsg.hdr.len != IMSG_HEADER_SIZE +
502			    sizeof(struct lsa_hdr))
503				fatalx("invalid size of OE request");
504			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
505
506			if (rde_nbr_loading(nbr->area))
507				break;
508
509			v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
510			    lsa_hdr.adv_rtr);
511			if (v == NULL)
512				db_hdr = NULL;
513			else
514				db_hdr = &v->lsa->hdr;
515
516			/*
517			 * only delete LSA if the one in the db is not newer
518			 */
519			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
520				lsa_del(nbr, &lsa_hdr);
521			break;
522		case IMSG_CTL_SHOW_DATABASE:
523		case IMSG_CTL_SHOW_DB_EXT:
524		case IMSG_CTL_SHOW_DB_LINK:
525		case IMSG_CTL_SHOW_DB_NET:
526		case IMSG_CTL_SHOW_DB_RTR:
527		case IMSG_CTL_SHOW_DB_INTRA:
528		case IMSG_CTL_SHOW_DB_SELF:
529		case IMSG_CTL_SHOW_DB_SUM:
530		case IMSG_CTL_SHOW_DB_ASBR:
531			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
532			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
533				log_warnx("rde_dispatch_imsg: wrong imsg len");
534				break;
535			}
536			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
537				LIST_FOREACH(area, &rdeconf->area_list, entry) {
538					rde_dump_area(area, imsg.hdr.type,
539					    imsg.hdr.pid);
540				}
541				lsa_dump(&asext_tree, imsg.hdr.type,
542				    imsg.hdr.pid);
543			} else {
544				memcpy(&aid, imsg.data, sizeof(aid));
545				if ((area = area_find(rdeconf, aid)) != NULL) {
546					rde_dump_area(area, imsg.hdr.type,
547					    imsg.hdr.pid);
548					if (!area->stub)
549						lsa_dump(&asext_tree,
550						    imsg.hdr.type,
551						    imsg.hdr.pid);
552				}
553			}
554			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
555			    imsg.hdr.pid, -1, NULL, 0);
556			break;
557		case IMSG_CTL_SHOW_RIB:
558			LIST_FOREACH(area, &rdeconf->area_list, entry) {
559				imsg_compose_event(iev_ospfe, IMSG_CTL_AREA,
560				    0, imsg.hdr.pid, -1, area, sizeof(*area));
561
562				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
563				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
564			}
565			aid.s_addr = 0;
566			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
567
568			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
569			    imsg.hdr.pid, -1, NULL, 0);
570			break;
571		case IMSG_CTL_SHOW_SUM:
572			rde_send_summary(imsg.hdr.pid);
573			LIST_FOREACH(area, &rdeconf->area_list, entry)
574				rde_send_summary_area(area, imsg.hdr.pid);
575			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
576			    imsg.hdr.pid, -1, NULL, 0);
577			break;
578		case IMSG_IFINFO:
579			if (imsg.hdr.len != IMSG_HEADER_SIZE +
580			    sizeof(int))
581				fatalx("IFINFO imsg with wrong len");
582
583			nbr = rde_nbr_find(imsg.hdr.peerid);
584			if (nbr == NULL)
585				fatalx("IFINFO imsg with bad peerid");
586			memcpy(&nbr->iface->state, imsg.data, sizeof(int));
587
588			/* Resend LSAs if interface state changes. */
589			orig_intra_area_prefix_lsas(nbr->area);
590			break;
591		case IMSG_CTL_LOG_VERBOSE:
592			/* already checked by ospfe */
593			memcpy(&verbose, imsg.data, sizeof(verbose));
594			log_verbose(verbose);
595			break;
596		default:
597			log_debug("rde_dispatch_imsg: unexpected imsg %d",
598			    imsg.hdr.type);
599			break;
600		}
601		imsg_free(&imsg);
602	}
603	if (!shut)
604		imsg_event_add(iev);
605	else {
606		/* this pipe is dead, so remove the event handler */
607		event_del(&iev->ev);
608		event_loopexit(NULL);
609	}
610}
611
612/* ARGSUSED */
613void
614rde_dispatch_parent(int fd, short event, void *bula)
615{
616	static struct area	*narea;
617	struct area		*area;
618	struct iface		*iface, *ifp;
619	struct ifaddrchange	*ifc;
620	struct iface_addr	*ia, *nia;
621	struct imsg		 imsg;
622	struct kroute		 kr;
623	struct rroute		 rr;
624	struct imsgev		*iev = bula;
625	struct imsgbuf		*ibuf = &iev->ibuf;
626	struct lsa		*lsa;
627	struct vertex		*v;
628	struct rt_node		*rn;
629	ssize_t			 n;
630	int			 shut = 0, wasvalid;
631	unsigned int		 ifindex;
632
633	if (event & EV_READ) {
634		if ((n = imsg_read(ibuf)) == -1)
635			fatal("imsg_read error");
636		if (n == 0)	/* connection closed */
637			shut = 1;
638	}
639	if (event & EV_WRITE) {
640		if (msgbuf_write(&ibuf->w) == -1)
641			fatal("msgbuf_write");
642	}
643
644	for (;;) {
645		if ((n = imsg_get(ibuf, &imsg)) == -1)
646			fatal("rde_dispatch_parent: imsg_read error");
647		if (n == 0)
648			break;
649
650		switch (imsg.hdr.type) {
651		case IMSG_NETWORK_ADD:
652			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
653				log_warnx("rde_dispatch_parent: "
654				    "wrong imsg len");
655				break;
656			}
657			memcpy(&rr, imsg.data, sizeof(rr));
658
659			if ((lsa = rde_asext_get(&rr)) != NULL) {
660				v = lsa_find(NULL, lsa->hdr.type,
661				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
662
663				lsa_merge(nbrself, lsa, v);
664			}
665			break;
666		case IMSG_NETWORK_DEL:
667			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
668				log_warnx("rde_dispatch_parent: "
669				    "wrong imsg len");
670				break;
671			}
672			memcpy(&rr, imsg.data, sizeof(rr));
673
674			if ((lsa = rde_asext_put(&rr)) != NULL) {
675				v = lsa_find(NULL, lsa->hdr.type,
676				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
677
678				/*
679				 * if v == NULL no LSA is in the table and
680				 * nothing has to be done.
681				 */
682				if (v)
683					lsa_merge(nbrself, lsa, v);
684				else
685					free(lsa);
686			}
687			break;
688		case IMSG_KROUTE_GET:
689			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
690				log_warnx("rde_dispatch_parent: "
691				    "wrong imsg len");
692				break;
693			}
694			memcpy(&kr, imsg.data, sizeof(kr));
695
696			if ((rn = rt_find(&kr.prefix, kr.prefixlen,
697			    DT_NET)) != NULL)
698				rde_send_change_kroute(rn);
699			else
700				/* should not happen */
701				imsg_compose_event(iev_main, IMSG_KROUTE_DELETE,
702				    0, 0, -1, &kr, sizeof(kr));
703			break;
704		case IMSG_IFINFO:
705			if (imsg.hdr.len != IMSG_HEADER_SIZE +
706			    sizeof(struct iface))
707				fatalx("IFINFO imsg with wrong len");
708
709			ifp = imsg.data;
710			iface = if_find(ifp->ifindex);
711			if (iface == NULL)
712				fatalx("interface lost in rde");
713
714			wasvalid = (iface->flags & IFF_UP) &&
715			    LINK_STATE_IS_UP(iface->linkstate);
716
717			if_update(iface, ifp->mtu, ifp->flags, ifp->media_type,
718			    ifp->linkstate, ifp->baudrate);
719
720			/* Resend LSAs if interface state changes. */
721			if (wasvalid != (iface->flags & IFF_UP) &&
722			    LINK_STATE_IS_UP(iface->linkstate)) {
723				area = area_find(rdeconf, iface->area_id);
724				if (!area)
725					fatalx("interface lost area");
726				orig_intra_area_prefix_lsas(area);
727			}
728			break;
729		case IMSG_IFADD:
730			if ((iface = malloc(sizeof(struct iface))) == NULL)
731				fatal(NULL);
732			memcpy(iface, imsg.data, sizeof(struct iface));
733
734			LIST_INIT(&iface->nbr_list);
735			TAILQ_INIT(&iface->ls_ack_list);
736			RB_INIT(&iface->lsa_tree);
737
738			area = area_find(rdeconf, iface->area_id);
739			LIST_INSERT_HEAD(&area->iface_list, iface, entry);
740			break;
741		case IMSG_IFDELETE:
742			if (imsg.hdr.len != IMSG_HEADER_SIZE +
743			    sizeof(ifindex))
744				fatalx("IFDELETE imsg with wrong len");
745
746			memcpy(&ifindex, imsg.data, sizeof(ifindex));
747			iface = if_find(ifindex);
748			if (iface == NULL)
749				fatalx("interface lost in rde");
750
751			LIST_REMOVE(iface, entry);
752			if_del(iface);
753			break;
754		case IMSG_IFADDRNEW:
755			if (imsg.hdr.len != IMSG_HEADER_SIZE +
756			    sizeof(struct ifaddrchange))
757				fatalx("IFADDRNEW imsg with wrong len");
758			ifc = imsg.data;
759
760			iface = if_find(ifc->ifindex);
761			if (iface == NULL)
762				fatalx("IFADDRNEW interface lost in rde");
763
764			if ((ia = calloc(1, sizeof(struct iface_addr))) ==
765			    NULL)
766				fatal("rde_dispatch_parent IFADDRNEW");
767			ia->addr = ifc->addr;
768			ia->dstbrd = ifc->dstbrd;
769			ia->prefixlen = ifc->prefixlen;
770
771			TAILQ_INSERT_TAIL(&iface->ifa_list, ia, entry);
772			area = area_find(rdeconf, iface->area_id);
773			if (area)
774				orig_intra_area_prefix_lsas(area);
775			break;
776		case IMSG_IFADDRDEL:
777			if (imsg.hdr.len != IMSG_HEADER_SIZE +
778			    sizeof(struct ifaddrchange))
779				fatalx("IFADDRDEL imsg with wrong len");
780			ifc = imsg.data;
781
782			iface = if_find(ifc->ifindex);
783			if (iface == NULL)
784				fatalx("IFADDRDEL interface lost in rde");
785
786			for (ia = TAILQ_FIRST(&iface->ifa_list); ia != NULL;
787			    ia = nia) {
788				nia = TAILQ_NEXT(ia, entry);
789
790				if (IN6_ARE_ADDR_EQUAL(&ia->addr,
791				    &ifc->addr)) {
792					TAILQ_REMOVE(&iface->ifa_list, ia,
793					    entry);
794					free(ia);
795					break;
796				}
797			}
798			area = area_find(rdeconf, iface->area_id);
799			if (area)
800				orig_intra_area_prefix_lsas(area);
801			break;
802		case IMSG_RECONF_CONF:
803			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
804			    NULL)
805				fatal(NULL);
806			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
807
808			LIST_INIT(&nconf->area_list);
809			LIST_INIT(&nconf->cand_list);
810			break;
811		case IMSG_RECONF_AREA:
812			if ((narea = area_new()) == NULL)
813				fatal(NULL);
814			memcpy(narea, imsg.data, sizeof(struct area));
815
816			LIST_INIT(&narea->iface_list);
817			LIST_INIT(&narea->nbr_list);
818			RB_INIT(&narea->lsa_tree);
819
820			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
821			break;
822		case IMSG_RECONF_END:
823			merge_config(rdeconf, nconf);
824			nconf = NULL;
825			break;
826		default:
827			log_debug("rde_dispatch_parent: unexpected imsg %d",
828			    imsg.hdr.type);
829			break;
830		}
831		imsg_free(&imsg);
832	}
833	if (!shut)
834		imsg_event_add(iev);
835	else {
836		/* this pipe is dead, so remove the event handler */
837		event_del(&iev->ev);
838		event_loopexit(NULL);
839	}
840}
841
842void
843rde_dump_area(struct area *area, int imsg_type, pid_t pid)
844{
845	struct iface	*iface;
846
847	/* dump header */
848	imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1,
849	    area, sizeof(*area));
850
851	/* dump link local lsa */
852	LIST_FOREACH(iface, &area->iface_list, entry) {
853		imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE,
854		    0, pid, -1, iface, sizeof(*iface));
855		lsa_dump(&iface->lsa_tree, imsg_type, pid);
856	}
857
858	/* dump area lsa */
859	lsa_dump(&area->lsa_tree, imsg_type, pid);
860}
861
862u_int32_t
863rde_router_id(void)
864{
865	return (rdeconf->rtr_id.s_addr);
866}
867
868void
869rde_send_change_kroute(struct rt_node *r)
870{
871	struct kroute		 kr;
872	struct rt_nexthop	*rn;
873
874	TAILQ_FOREACH(rn, &r->nexthop, entry) {
875		if (!rn->invalid)
876			break;
877	}
878	if (!rn)
879		fatalx("rde_send_change_kroute: no valid nexthop found");
880
881	bzero(&kr, sizeof(kr));
882	kr.prefix = r->prefix;
883	kr.nexthop = rn->nexthop;
884	if (IN6_IS_ADDR_LINKLOCAL(&rn->nexthop) ||
885	    IN6_IS_ADDR_MC_LINKLOCAL(&rn->nexthop))
886		kr.scope = rn->ifindex;
887	kr.ifindex = rn->ifindex;
888	kr.prefixlen = r->prefixlen;
889	kr.ext_tag = r->ext_tag;
890
891	imsg_compose_event(iev_main, IMSG_KROUTE_CHANGE, 0, 0, -1,
892	    &kr, sizeof(kr));
893}
894
895void
896rde_send_delete_kroute(struct rt_node *r)
897{
898	struct kroute	 kr;
899
900	bzero(&kr, sizeof(kr));
901	kr.prefix = r->prefix;
902	kr.prefixlen = r->prefixlen;
903
904	imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1,
905	    &kr, sizeof(kr));
906}
907
908void
909rde_send_summary(pid_t pid)
910{
911	static struct ctl_sum	 sumctl;
912	struct timeval		 now;
913	struct area		*area;
914	struct vertex		*v;
915
916	bzero(&sumctl, sizeof(struct ctl_sum));
917
918	sumctl.rtr_id.s_addr = rde_router_id();
919	sumctl.spf_delay = rdeconf->spf_delay;
920	sumctl.spf_hold_time = rdeconf->spf_hold_time;
921
922	LIST_FOREACH(area, &rdeconf->area_list, entry)
923		sumctl.num_area++;
924
925	RB_FOREACH(v, lsa_tree, &asext_tree)
926		sumctl.num_ext_lsa++;
927
928	gettimeofday(&now, NULL);
929	if (rdeconf->uptime < now.tv_sec)
930		sumctl.uptime = now.tv_sec - rdeconf->uptime;
931	else
932		sumctl.uptime = 0;
933
934	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
935	    sizeof(sumctl));
936}
937
938void
939rde_send_summary_area(struct area *area, pid_t pid)
940{
941	static struct ctl_sum_area	 sumareactl;
942	struct iface			*iface;
943	struct rde_nbr			*nbr;
944	struct lsa_tree			*tree = &area->lsa_tree;
945	struct vertex			*v;
946
947	bzero(&sumareactl, sizeof(struct ctl_sum_area));
948
949	sumareactl.area.s_addr = area->id.s_addr;
950	sumareactl.num_spf_calc = area->num_spf_calc;
951
952	LIST_FOREACH(iface, &area->iface_list, entry)
953		sumareactl.num_iface++;
954
955	LIST_FOREACH(nbr, &area->nbr_list, entry)
956		if (nbr->state == NBR_STA_FULL && !nbr->self)
957			sumareactl.num_adj_nbr++;
958
959	RB_FOREACH(v, lsa_tree, tree)
960		sumareactl.num_lsa++;
961
962	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
963	    sizeof(sumareactl));
964}
965
966LIST_HEAD(rde_nbr_head, rde_nbr);
967
968struct nbr_table {
969	struct rde_nbr_head	*hashtbl;
970	u_int32_t		 hashmask;
971} rdenbrtable;
972
973#define RDE_NBR_HASH(x)		\
974	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
975
976void
977rde_nbr_init(u_int32_t hashsize)
978{
979	struct rde_nbr_head	*head;
980	u_int32_t		 hs, i;
981
982	for (hs = 1; hs < hashsize; hs <<= 1)
983		;
984	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
985	if (rdenbrtable.hashtbl == NULL)
986		fatal("rde_nbr_init");
987
988	for (i = 0; i < hs; i++)
989		LIST_INIT(&rdenbrtable.hashtbl[i]);
990
991	rdenbrtable.hashmask = hs - 1;
992
993	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
994		fatal("rde_nbr_init");
995
996	nbrself->id.s_addr = rde_router_id();
997	nbrself->peerid = NBR_IDSELF;
998	nbrself->state = NBR_STA_DOWN;
999	nbrself->self = 1;
1000	head = RDE_NBR_HASH(NBR_IDSELF);
1001	LIST_INSERT_HEAD(head, nbrself, hash);
1002}
1003
1004void
1005rde_nbr_free(void)
1006{
1007	free(nbrself);
1008	free(rdenbrtable.hashtbl);
1009}
1010
1011struct rde_nbr *
1012rde_nbr_find(u_int32_t peerid)
1013{
1014	struct rde_nbr_head	*head;
1015	struct rde_nbr		*nbr;
1016
1017	head = RDE_NBR_HASH(peerid);
1018
1019	LIST_FOREACH(nbr, head, hash) {
1020		if (nbr->peerid == peerid)
1021			return (nbr);
1022	}
1023
1024	return (NULL);
1025}
1026
1027struct rde_nbr *
1028rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
1029{
1030	struct rde_nbr_head	*head;
1031	struct rde_nbr		*nbr;
1032	struct area		*area;
1033	struct iface		*iface;
1034
1035	if (rde_nbr_find(peerid))
1036		return (NULL);
1037	if ((area = area_find(rdeconf, new->area_id)) == NULL)
1038		fatalx("rde_nbr_new: unknown area");
1039
1040	if ((iface = if_find(new->ifindex)) == NULL)
1041		fatalx("rde_nbr_new: unknown interface");
1042
1043	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
1044		fatal("rde_nbr_new");
1045
1046	memcpy(nbr, new, sizeof(*nbr));
1047	nbr->peerid = peerid;
1048	nbr->area = area;
1049	nbr->iface = iface;
1050
1051	TAILQ_INIT(&nbr->req_list);
1052
1053	head = RDE_NBR_HASH(peerid);
1054	LIST_INSERT_HEAD(head, nbr, hash);
1055	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
1056
1057	return (nbr);
1058}
1059
1060void
1061rde_nbr_del(struct rde_nbr *nbr)
1062{
1063	if (nbr == NULL)
1064		return;
1065
1066	rde_req_list_free(nbr);
1067
1068	LIST_REMOVE(nbr, entry);
1069	LIST_REMOVE(nbr, hash);
1070
1071	free(nbr);
1072}
1073
1074int
1075rde_nbr_loading(struct area *area)
1076{
1077	struct rde_nbr		*nbr;
1078	int			 checkall = 0;
1079
1080	if (area == NULL) {
1081		area = LIST_FIRST(&rdeconf->area_list);
1082		checkall = 1;
1083	}
1084
1085	while (area != NULL) {
1086		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1087			if (nbr->self)
1088				continue;
1089			if (nbr->state & NBR_STA_XCHNG ||
1090			    nbr->state & NBR_STA_LOAD)
1091				return (1);
1092		}
1093		if (!checkall)
1094			break;
1095		area = LIST_NEXT(area, entry);
1096	}
1097
1098	return (0);
1099}
1100
1101struct rde_nbr *
1102rde_nbr_self(struct area *area)
1103{
1104	struct rde_nbr		*nbr;
1105
1106	LIST_FOREACH(nbr, &area->nbr_list, entry)
1107		if (nbr->self)
1108			return (nbr);
1109
1110	/* this may not happen */
1111	fatalx("rde_nbr_self: area without self");
1112	return (NULL);
1113}
1114
1115/*
1116 * LSA req list
1117 */
1118void
1119rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
1120{
1121	struct rde_req_entry	*le;
1122
1123	if ((le = calloc(1, sizeof(*le))) == NULL)
1124		fatal("rde_req_list_add");
1125
1126	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
1127	le->type = lsa->type;
1128	le->ls_id = lsa->ls_id;
1129	le->adv_rtr = lsa->adv_rtr;
1130}
1131
1132int
1133rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1134{
1135	struct rde_req_entry	*le;
1136
1137	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1138		if ((lsa_hdr->type == le->type) &&
1139		    (lsa_hdr->ls_id == le->ls_id) &&
1140		    (lsa_hdr->adv_rtr == le->adv_rtr))
1141			return (1);
1142	}
1143	return (0);
1144}
1145
1146void
1147rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1148{
1149	struct rde_req_entry	*le;
1150
1151	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1152		if ((lsa_hdr->type == le->type) &&
1153		    (lsa_hdr->ls_id == le->ls_id) &&
1154		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1155			TAILQ_REMOVE(&nbr->req_list, le, entry);
1156			free(le);
1157			return;
1158		}
1159	}
1160}
1161
1162void
1163rde_req_list_free(struct rde_nbr *nbr)
1164{
1165	struct rde_req_entry	*le;
1166
1167	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1168		TAILQ_REMOVE(&nbr->req_list, le, entry);
1169		free(le);
1170	}
1171}
1172
1173/*
1174 * as-external LSA handling
1175 */
1176struct lsa *
1177rde_asext_get(struct rroute *rr)
1178{
1179	struct area		*area;
1180	struct iface		*iface;
1181	struct iface_addr	*ia;
1182	struct in6_addr		 addr;
1183
1184	LIST_FOREACH(area, &rdeconf->area_list, entry)
1185		LIST_FOREACH(iface, &area->iface_list, entry)
1186			TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1187				if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1188					continue;
1189
1190				inet6applymask(&addr, &ia->addr,
1191				    rr->kr.prefixlen);
1192				if (!memcmp(&addr, &rr->kr.prefix,
1193				    sizeof(addr)) && rr->kr.prefixlen ==
1194				    ia->prefixlen) {
1195					/* already announced as Prefix LSA */
1196					log_debug("rde_asext_get: %s/%d is "
1197					    "part of prefix LSA",
1198					    log_in6addr(&rr->kr.prefix),
1199					    rr->kr.prefixlen);
1200					return (NULL);
1201				}
1202			}
1203
1204	/* update of seqnum is done by lsa_merge */
1205	return (orig_asext_lsa(rr, DEFAULT_AGE));
1206}
1207
1208struct lsa *
1209rde_asext_put(struct rroute *rr)
1210{
1211	/*
1212	 * just try to remove the LSA. If the prefix is announced as
1213	 * stub net LSA lsa_find() will fail later and nothing will happen.
1214	 */
1215
1216	/* remove by reflooding with MAX_AGE */
1217	return (orig_asext_lsa(rr, MAX_AGE));
1218}
1219
1220/*
1221 * summary LSA stuff
1222 */
1223void
1224rde_summary_update(struct rt_node *rte, struct area *area)
1225{
1226	struct vertex		*v = NULL;
1227//XXX	struct lsa		*lsa;
1228	u_int16_t		 type = 0;
1229
1230	/* first check if we actually need to announce this route */
1231	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1232		return;
1233	/* never create summaries for as-ext LSA */
1234	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1235		return;
1236	/* no need for summary LSA in the originating area */
1237	if (rte->area.s_addr == area->id.s_addr)
1238		return;
1239	/* no need to originate inter-area routes to the backbone */
1240	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1241		return;
1242	/* TODO nexthop check, nexthop part of area -> no summary */
1243	if (rte->cost >= LS_INFINITY)
1244		return;
1245	/* TODO AS border router specific checks */
1246	/* TODO inter-area network route stuff */
1247	/* TODO intra-area stuff -- condense LSA ??? */
1248
1249	if (rte->d_type == DT_NET) {
1250		type = LSA_TYPE_INTER_A_PREFIX;
1251	} else if (rte->d_type == DT_RTR) {
1252		type = LSA_TYPE_INTER_A_ROUTER;
1253	} else
1254
1255#if 0 /* XXX a lot todo */
1256	/* update lsa but only if it was changed */
1257	v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1258	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1259	lsa_merge(rde_nbr_self(area), lsa, v);
1260
1261	if (v == NULL)
1262		v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1263#endif
1264
1265	/* suppressed/deleted routes are not found in the second lsa_find */
1266	if (v)
1267		v->cost = rte->cost;
1268}
1269
1270/*
1271 * Functions for self-originated LSAs
1272 */
1273
1274/* Prefix LSAs have variable size. We have to be careful to copy the right
1275 * amount of bytes, and to realloc() the right amount of memory. */
1276void
1277append_prefix_lsa(struct lsa **lsa, u_int16_t *len, struct lsa_prefix *prefix)
1278{
1279	struct lsa_prefix	*copy;
1280	unsigned int		 lsa_prefix_len;
1281	unsigned int		 new_len;
1282	char			*new_lsa;
1283
1284	lsa_prefix_len = sizeof(struct lsa_prefix)
1285	    + LSA_PREFIXSIZE(prefix->prefixlen);
1286
1287	new_len = *len + lsa_prefix_len;
1288
1289	/* Make sure we have enough space for this prefix. */
1290	if ((new_lsa = realloc(*lsa, new_len)) == NULL)
1291		fatalx("append_prefix_lsa");
1292
1293	/* Append prefix to LSA. */
1294	copy = (struct lsa_prefix *)(new_lsa + *len);
1295	memcpy(copy, prefix, lsa_prefix_len);
1296	copy->metric = 0;
1297
1298	*lsa = (struct lsa *)new_lsa;
1299	*len = new_len;
1300}
1301
1302int
1303prefix_compare(struct prefix_node *a, struct prefix_node *b)
1304{
1305	struct lsa_prefix	*p;
1306	struct lsa_prefix	*q;
1307	int			 i;
1308	int			 len;
1309
1310	p = a->prefix;
1311	q = b->prefix;
1312
1313	len = MIN(LSA_PREFIXSIZE(p->prefixlen), LSA_PREFIXSIZE(q->prefixlen));
1314
1315	i = memcmp(p + 1, q + 1, len);
1316	if (i)
1317		return (i);
1318	if (p->prefixlen < q->prefixlen)
1319		return (-1);
1320	if (p->prefixlen > q->prefixlen)
1321		return (1);
1322	return (0);
1323}
1324
1325void
1326prefix_tree_add(struct prefix_tree *tree, struct lsa_link *lsa)
1327{
1328	struct prefix_node	*old;
1329	struct prefix_node	*new;
1330	struct in6_addr		 addr;
1331	unsigned int		 len;
1332	unsigned int		 i;
1333	char			*cur_prefix;
1334
1335	cur_prefix = (char *)(lsa + 1);
1336
1337	for (i = 0; i < ntohl(lsa->numprefix); i++) {
1338		if ((new = calloc(1, sizeof(*new))) == NULL)
1339			fatal("prefix_tree_add");
1340		new->prefix = (struct lsa_prefix *)cur_prefix;
1341
1342		len = sizeof(*new->prefix)
1343		    + LSA_PREFIXSIZE(new->prefix->prefixlen);
1344
1345		bzero(&addr, sizeof(addr));
1346		memcpy(&addr, new->prefix + 1,
1347		    LSA_PREFIXSIZE(new->prefix->prefixlen));
1348
1349		if (!(IN6_IS_ADDR_LINKLOCAL(&addr)) &&
1350		    (new->prefix->options & OSPF_PREFIX_NU) == 0 &&
1351		    (new->prefix->options & OSPF_PREFIX_LA) == 0) {
1352			old = RB_INSERT(prefix_tree, tree, new);
1353			if (old != NULL) {
1354				old->prefix->options |= new->prefix->options;
1355				free(new);
1356			}
1357		}
1358
1359		cur_prefix = cur_prefix + len;
1360	}
1361}
1362
1363RB_GENERATE(prefix_tree, prefix_node, entry, prefix_compare)
1364
1365struct lsa *
1366orig_intra_lsa_net(struct area *area, struct iface *iface, struct vertex *old)
1367{
1368	struct lsa		*lsa;
1369	struct vertex		*v;
1370	struct rde_nbr		*nbr;
1371	struct prefix_node	*node;
1372	struct prefix_tree	 tree;
1373	int			 num_full_nbr;
1374	u_int16_t		 len;
1375	u_int16_t		 numprefix;
1376
1377	log_debug("orig_intra_lsa_net: area %s, interface %s",
1378	    inet_ntoa(area->id), iface->name);
1379
1380	RB_INIT(&tree);
1381
1382	if (iface->state & IF_STA_DR) {
1383		num_full_nbr = 0;
1384		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1385			if (nbr->self ||
1386			    nbr->iface->ifindex != iface->ifindex ||
1387			    (nbr->state & NBR_STA_FULL) == 0)
1388				continue;
1389			num_full_nbr++;
1390			v = lsa_find(iface, htons(LSA_TYPE_LINK),
1391			    htonl(nbr->iface_id), nbr->id.s_addr);
1392			if (v)
1393				prefix_tree_add(&tree, &v->lsa->data.link);
1394		}
1395		if (num_full_nbr == 0) {
1396			/* There are no adjacent neighbors on link.
1397			 * If a copy of this LSA already exists in DB,
1398			 * it needs to be flushed. orig_intra_lsa_rtr()
1399			 * will take care of prefixes configured on
1400			 * this interface. */
1401			if (!old)
1402				return NULL;
1403		} else {
1404			/* Add our own prefixes configured for this link. */
1405			v = lsa_find(iface, htons(LSA_TYPE_LINK),
1406			    htonl(iface->ifindex), rde_router_id());
1407			if (v)
1408				prefix_tree_add(&tree, &v->lsa->data.link);
1409		}
1410	/* Continue only if a copy of this LSA already exists in DB.
1411	 * It needs to be flushed. */
1412	} else if (!old)
1413		return NULL;
1414
1415	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1416	if ((lsa = calloc(1, len)) == NULL)
1417		fatal("orig_intra_lsa_net");
1418
1419	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_NETWORK);
1420	lsa->data.pref_intra.ref_ls_id = htonl(iface->ifindex);
1421	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1422
1423	numprefix = 0;
1424	RB_FOREACH(node, prefix_tree, &tree) {
1425		append_prefix_lsa(&lsa, &len, node->prefix);
1426		numprefix++;
1427	}
1428
1429	lsa->data.pref_intra.numprefix = htons(numprefix);
1430
1431	while (!RB_EMPTY(&tree))
1432		free(RB_REMOVE(prefix_tree, &tree, RB_ROOT(&tree)));
1433
1434	/* LSA header */
1435	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1436	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1437	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1438	lsa->hdr.ls_id = htonl(iface->ifindex);
1439	lsa->hdr.adv_rtr = rde_router_id();
1440	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1441	lsa->hdr.len = htons(len);
1442	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1443
1444	return lsa;
1445}
1446
1447struct lsa *
1448orig_intra_lsa_rtr(struct area *area, struct vertex *old)
1449{
1450	char			lsa_prefix_buf[sizeof(struct lsa_prefix)
1451				    + sizeof(struct in6_addr)];
1452	struct lsa		*lsa;
1453	struct lsa_prefix	*lsa_prefix;
1454	struct in6_addr		*prefix;
1455	struct iface		*iface;
1456	struct iface_addr	*ia;
1457	struct rde_nbr		*nbr;
1458	u_int16_t		 len;
1459	u_int16_t		 numprefix;
1460
1461	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1462	if ((lsa = calloc(1, len)) == NULL)
1463		fatal("orig_intra_lsa_rtr");
1464
1465	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_ROUTER);
1466	lsa->data.pref_intra.ref_ls_id = 0;
1467	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1468
1469	numprefix = 0;
1470	LIST_FOREACH(iface, &area->iface_list, entry) {
1471		if (!((iface->flags & IFF_UP) &&
1472		    LINK_STATE_IS_UP(iface->linkstate)))
1473			/* interface or link state down */
1474			continue;
1475		if ((iface->state & IF_STA_DOWN) &&
1476		    !(iface->cflags & F_IFACE_PASSIVE))
1477			/* passive interfaces stay in state DOWN */
1478			continue;
1479
1480		/* Broadcast links with adjacencies are handled
1481		 * by orig_intra_lsa_net(), ignore. */
1482		if (iface->type == IF_TYPE_BROADCAST ||
1483		    iface->type == IF_TYPE_NBMA) {
1484			if (iface->state & IF_STA_WAITING)
1485				/* Skip, we're still waiting for
1486				 * adjacencies to form. */
1487				continue;
1488
1489			LIST_FOREACH(nbr, &area->nbr_list, entry)
1490				if (!nbr->self &&
1491				    nbr->iface->ifindex == iface->ifindex &&
1492				    nbr->state & NBR_STA_FULL)
1493					break;
1494			if (nbr)
1495				continue;
1496		}
1497
1498		lsa_prefix = (struct lsa_prefix *)lsa_prefix_buf;
1499
1500		TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1501			if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1502				continue;
1503
1504			bzero(lsa_prefix_buf, sizeof(lsa_prefix_buf));
1505
1506			if (iface->type == IF_TYPE_POINTOMULTIPOINT ||
1507			    iface->state & IF_STA_LOOPBACK) {
1508				lsa_prefix->prefixlen = 128;
1509			} else {
1510				lsa_prefix->prefixlen = ia->prefixlen;
1511				lsa_prefix->metric = htons(iface->metric);
1512			}
1513
1514			if (lsa_prefix->prefixlen == 128)
1515				lsa_prefix->options |= OSPF_PREFIX_LA;
1516
1517			log_debug("orig_intra_lsa_rtr: area %s, interface %s: "
1518			    "%s/%d", inet_ntoa(area->id),
1519			    iface->name, log_in6addr(&ia->addr),
1520			    lsa_prefix->prefixlen);
1521
1522			prefix = (struct in6_addr *)(lsa_prefix + 1);
1523			inet6applymask(prefix, &ia->addr,
1524			    lsa_prefix->prefixlen);
1525			append_prefix_lsa(&lsa, &len, lsa_prefix);
1526			numprefix++;
1527		}
1528
1529		/* TOD: Add prefixes of directly attached hosts, too */
1530		/* TOD: Add prefixes for virtual links */
1531	}
1532
1533	/* If no prefixes were included, continue only if a copy of this
1534	 * LSA already exists in DB. It needs to be flushed. */
1535	if (numprefix == 0 && !old) {
1536		free(lsa);
1537		return NULL;
1538	}
1539
1540	lsa->data.pref_intra.numprefix = htons(numprefix);
1541
1542	/* LSA header */
1543	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1544	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1545	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1546	lsa->hdr.ls_id = htonl(LS_ID_INTRA_RTR);
1547	lsa->hdr.adv_rtr = rde_router_id();
1548	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1549	lsa->hdr.len = htons(len);
1550	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1551
1552	return lsa;
1553}
1554
1555void
1556orig_intra_area_prefix_lsas(struct area *area)
1557{
1558	struct lsa	*lsa;
1559	struct vertex	*old;
1560	struct iface	*iface;
1561
1562	LIST_FOREACH(iface, &area->iface_list, entry) {
1563		if (iface->type == IF_TYPE_BROADCAST ||
1564		    iface->type == IF_TYPE_NBMA) {
1565			old = lsa_find(iface, htons(LSA_TYPE_INTRA_A_PREFIX),
1566			    htonl(iface->ifindex), rde_router_id());
1567			lsa = orig_intra_lsa_net(area, iface, old);
1568			if (lsa)
1569				lsa_merge(rde_nbr_self(area), lsa, old);
1570		}
1571	}
1572
1573	old = lsa_find_tree(&area->lsa_tree, htons(LSA_TYPE_INTRA_A_PREFIX),
1574		htonl(LS_ID_INTRA_RTR), rde_router_id());
1575	lsa = orig_intra_lsa_rtr(area, old);
1576	if (lsa)
1577		lsa_merge(rde_nbr_self(area), lsa, old);
1578}
1579
1580int
1581comp_asext(struct lsa *a, struct lsa *b)
1582{
1583	/* compare prefixes, if they are equal or not */
1584	if (a->data.asext.prefix.prefixlen != b->data.asext.prefix.prefixlen)
1585		return (-1);
1586	return (memcmp(
1587	    (char *)a + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1588	    (char *)b + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1589	    LSA_PREFIXSIZE(a->data.asext.prefix.prefixlen)));
1590}
1591
1592struct lsa *
1593orig_asext_lsa(struct rroute *rr, u_int16_t age)
1594{
1595	struct lsa	*lsa;
1596	u_int32_t	 ext_tag;
1597	u_int16_t	 len, ext_off;
1598
1599	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext) +
1600	    LSA_PREFIXSIZE(rr->kr.prefixlen);
1601
1602	/*
1603	 * nexthop -- on connected routes we are the nexthop,
1604	 * on all other cases we should announce the true nexthop
1605	 * unless that nexthop is outside of the ospf cloud.
1606	 * XXX for now we don't do this.
1607	 */
1608
1609	ext_off = len;
1610	if (rr->kr.ext_tag) {
1611		len += sizeof(ext_tag);
1612	}
1613	if ((lsa = calloc(1, len)) == NULL)
1614		fatal("orig_asext_lsa");
1615
1616	log_debug("orig_asext_lsa: %s/%d age %d",
1617	    log_in6addr(&rr->kr.prefix), rr->kr.prefixlen, age);
1618
1619	/* LSA header */
1620	lsa->hdr.age = htons(age);
1621	lsa->hdr.type = htons(LSA_TYPE_EXTERNAL);
1622	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1623	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1624	lsa->hdr.len = htons(len);
1625
1626	lsa->data.asext.prefix.prefixlen = rr->kr.prefixlen;
1627	memcpy((char *)lsa + sizeof(struct lsa_hdr) + sizeof(struct lsa_asext),
1628	    &rr->kr.prefix, LSA_PREFIXSIZE(rr->kr.prefixlen));
1629
1630	lsa->hdr.ls_id = lsa_find_lsid(&asext_tree, lsa->hdr.type,
1631	    lsa->hdr.adv_rtr, comp_asext, lsa);
1632
1633	if (age == MAX_AGE) {
1634		/* inherit metric and ext_tag from the current LSA,
1635		 * some routers don't like to get withdraws that are
1636		 * different from what they have in their table.
1637		 */
1638		struct vertex *v;
1639		v = lsa_find(NULL, lsa->hdr.type, lsa->hdr.ls_id,
1640		    lsa->hdr.adv_rtr);
1641		if (v != NULL) {
1642			rr->metric = ntohl(v->lsa->data.asext.metric);
1643			if (rr->metric & LSA_ASEXT_T_FLAG) {
1644				memcpy(&ext_tag, (char *)v->lsa + ext_off,
1645				    sizeof(ext_tag));
1646				rr->kr.ext_tag = ntohl(ext_tag);
1647			}
1648			rr->metric &= LSA_METRIC_MASK;
1649		}
1650	}
1651
1652	if (rr->kr.ext_tag) {
1653		lsa->data.asext.metric = htonl(rr->metric | LSA_ASEXT_T_FLAG);
1654		ext_tag = htonl(rr->kr.ext_tag);
1655		memcpy((char *)lsa + ext_off, &ext_tag, sizeof(ext_tag));
1656	} else {
1657		lsa->data.asext.metric = htonl(rr->metric);
1658	}
1659
1660	lsa->hdr.ls_chksum = 0;
1661	lsa->hdr.ls_chksum =
1662	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1663
1664	return (lsa);
1665}
1666
1667struct lsa *
1668orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1669{
1670#if 0 /* XXX a lot todo */
1671	struct lsa	*lsa;
1672	u_int16_t	 len;
1673
1674	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1675	if ((lsa = calloc(1, len)) == NULL)
1676		fatal("orig_sum_lsa");
1677
1678	/* LSA header */
1679	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1680	lsa->hdr.type = type;
1681	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1682	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1683	lsa->hdr.len = htons(len);
1684
1685	/* prefix and mask */
1686	/*
1687	 * TODO ls_id must be unique, for overlapping routes this may
1688	 * not be true. In this case a hack needs to be done to
1689	 * make the ls_id unique.
1690	 */
1691	lsa->hdr.ls_id = rte->prefix.s_addr;
1692	if (type == LSA_TYPE_SUM_NETWORK)
1693		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1694	else
1695		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1696
1697	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1698
1699	lsa->hdr.ls_chksum = 0;
1700	lsa->hdr.ls_chksum =
1701	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1702
1703	return (lsa);
1704#endif
1705	return NULL;
1706}
1707