rde.c revision 1.22
1/*	$OpenBSD: rde.c,v 1.22 2009/02/19 22:17:07 stsp Exp $ */
2
3/*
4 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5 * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <sys/types.h>
22#include <sys/socket.h>
23#include <sys/queue.h>
24#include <sys/param.h>
25#include <netinet/in.h>
26#include <arpa/inet.h>
27#include <err.h>
28#include <errno.h>
29#include <stdlib.h>
30#include <signal.h>
31#include <string.h>
32#include <pwd.h>
33#include <unistd.h>
34#include <event.h>
35
36#include "ospf6.h"
37#include "ospf6d.h"
38#include "ospfe.h"
39#include "log.h"
40#include "rde.h"
41
42void		 rde_sig_handler(int sig, short, void *);
43void		 rde_shutdown(void);
44void		 rde_dispatch_imsg(int, short, void *);
45void		 rde_dispatch_parent(int, short, void *);
46void		 rde_dump_area(struct area *, int, pid_t);
47
48void		 rde_send_summary(pid_t);
49void		 rde_send_summary_area(struct area *, pid_t);
50void		 rde_nbr_init(u_int32_t);
51void		 rde_nbr_free(void);
52struct rde_nbr	*rde_nbr_find(u_int32_t);
53struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
54void		 rde_nbr_del(struct rde_nbr *);
55
56void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
57int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
58void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
59void		 rde_req_list_free(struct rde_nbr *);
60
61struct lsa	*rde_asext_get(struct rroute *);
62struct lsa	*rde_asext_put(struct rroute *);
63
64struct lsa	*orig_asext_lsa(struct rroute *, u_int16_t);
65struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
66struct lsa	*orig_intra_lsa_net(struct iface *, struct vertex *);
67struct lsa	*orig_intra_lsa_rtr(struct area *, struct vertex *);
68void		 orig_intra_area_prefix_lsas(struct area *);
69void		 append_prefix_lsa(struct lsa **, u_int16_t *,
70		    struct lsa_prefix *);
71int		 link_lsa_from_full_nbr(struct lsa *, struct iface *);
72
73/* A 32-bit value != any ifindex.
74 * We assume ifindex is bound by [1, USHRT_MAX] inclusive. */
75#define	LS_ID_INTRA_RTR	0x01000000
76
77/* Tree of prefixes with global scope on given a link,
78 * see orig_intra_lsa_*() */
79struct prefix_node {
80	RB_ENTRY(prefix_node)	 entry;
81	struct lsa_prefix	*prefix;
82};
83RB_HEAD(prefix_tree, prefix_node);
84RB_PROTOTYPE(prefix_tree, prefix_node, entry, prefix_compare);
85int		 prefix_compare(struct prefix_node *, struct prefix_node *);
86void		 prefix_tree_add(struct prefix_tree *, struct lsa_link *);
87
88struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
89struct imsgbuf		*ibuf_ospfe;
90struct imsgbuf		*ibuf_main;
91struct rde_nbr		*nbrself;
92struct lsa_tree		 asext_tree;
93
94/* ARGSUSED */
95void
96rde_sig_handler(int sig, short event, void *arg)
97{
98	/*
99	 * signal handler rules don't apply, libevent decouples for us
100	 */
101
102	switch (sig) {
103	case SIGINT:
104	case SIGTERM:
105		rde_shutdown();
106		/* NOTREACHED */
107	default:
108		fatalx("unexpected signal");
109	}
110}
111
112/* route decision engine */
113pid_t
114rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
115    int pipe_parent2ospfe[2])
116{
117	struct event		 ev_sigint, ev_sigterm;
118	struct timeval		 now;
119	struct passwd		*pw;
120	struct redistribute	*r;
121	pid_t			 pid;
122
123	switch (pid = fork()) {
124	case -1:
125		fatal("cannot fork");
126		/* NOTREACHED */
127	case 0:
128		break;
129	default:
130		return (pid);
131	}
132
133	rdeconf = xconf;
134
135	if ((pw = getpwnam(OSPF6D_USER)) == NULL)
136		fatal("getpwnam");
137
138	if (chroot(pw->pw_dir) == -1)
139		fatal("chroot");
140	if (chdir("/") == -1)
141		fatal("chdir(\"/\")");
142
143	setproctitle("route decision engine");
144	ospfd_process = PROC_RDE_ENGINE;
145
146	if (setgroups(1, &pw->pw_gid) ||
147	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
148	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
149		fatal("can't drop privileges");
150
151	event_init();
152	rde_nbr_init(NBR_HASHSIZE);
153	lsa_init(&asext_tree);
154
155	/* setup signal handler */
156	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
157	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
158	signal_add(&ev_sigint, NULL);
159	signal_add(&ev_sigterm, NULL);
160	signal(SIGPIPE, SIG_IGN);
161	signal(SIGHUP, SIG_IGN);
162
163	/* setup pipes */
164	close(pipe_ospfe2rde[0]);
165	close(pipe_parent2rde[0]);
166	close(pipe_parent2ospfe[0]);
167	close(pipe_parent2ospfe[1]);
168
169	if ((ibuf_ospfe = malloc(sizeof(struct imsgbuf))) == NULL ||
170	    (ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
171		fatal(NULL);
172	imsg_init(ibuf_ospfe, pipe_ospfe2rde[1], rde_dispatch_imsg);
173	imsg_init(ibuf_main, pipe_parent2rde[1], rde_dispatch_parent);
174
175	/* setup event handler */
176	ibuf_ospfe->events = EV_READ;
177	event_set(&ibuf_ospfe->ev, ibuf_ospfe->fd, ibuf_ospfe->events,
178	    ibuf_ospfe->handler, ibuf_ospfe);
179	event_add(&ibuf_ospfe->ev, NULL);
180
181	ibuf_main->events = EV_READ;
182	event_set(&ibuf_main->ev, ibuf_main->fd, ibuf_main->events,
183	    ibuf_main->handler, ibuf_main);
184	event_add(&ibuf_main->ev, NULL);
185
186	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
187	cand_list_init();
188	rt_init();
189
190	while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
191		SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
192		free(r);
193	}
194
195	gettimeofday(&now, NULL);
196	rdeconf->uptime = now.tv_sec;
197
198	event_dispatch();
199
200	rde_shutdown();
201	/* NOTREACHED */
202
203	return (0);
204}
205
206void
207rde_shutdown(void)
208{
209	struct area	*a;
210
211	stop_spf_timer(rdeconf);
212	cand_list_clr();
213	rt_clear();
214
215	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
216		LIST_REMOVE(a, entry);
217		area_del(a);
218	}
219	rde_nbr_free();
220
221	msgbuf_clear(&ibuf_ospfe->w);
222	free(ibuf_ospfe);
223	msgbuf_clear(&ibuf_main->w);
224	free(ibuf_main);
225	free(rdeconf);
226
227	log_info("route decision engine exiting");
228	_exit(0);
229}
230
231int
232rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
233    u_int16_t datalen)
234{
235	return (imsg_compose(ibuf_ospfe, type, peerid, pid, data, datalen));
236}
237
238/* ARGSUSED */
239void
240rde_dispatch_imsg(int fd, short event, void *bula)
241{
242	struct imsgbuf		*ibuf = bula;
243	struct imsg		 imsg;
244	struct in_addr		 aid;
245	struct ls_req_hdr	 req_hdr;
246	struct lsa_hdr		 lsa_hdr, *db_hdr;
247	struct rde_nbr		 rn, *nbr;
248	struct timespec		 tp;
249	struct lsa		*lsa;
250	struct area		*area;
251	struct vertex		*v;
252	struct iface		*iface, *ifp;
253	char			*buf;
254	ssize_t			 n;
255	time_t			 now;
256	int			 r, state, self, shut = 0;
257	u_int16_t		 l;
258
259	switch (event) {
260	case EV_READ:
261		if ((n = imsg_read(ibuf)) == -1)
262			fatal("imsg_read error");
263		if (n == 0)	/* connection closed */
264			shut = 1;
265		break;
266	case EV_WRITE:
267		if (msgbuf_write(&ibuf->w) == -1)
268			fatal("msgbuf_write");
269		imsg_event_add(ibuf);
270		return;
271	default:
272		fatalx("unknown event");
273	}
274
275	clock_gettime(CLOCK_MONOTONIC, &tp);
276	now = tp.tv_sec;
277
278	for (;;) {
279		if ((n = imsg_get(ibuf, &imsg)) == -1)
280			fatal("rde_dispatch_imsg: imsg_read error");
281		if (n == 0)
282			break;
283
284		switch (imsg.hdr.type) {
285		case IMSG_NEIGHBOR_UP:
286			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
287				fatalx("invalid size of OE request");
288			memcpy(&rn, imsg.data, sizeof(rn));
289
290			if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL)
291				fatalx("rde_dispatch_imsg: "
292				    "neighbor already exists");
293			break;
294		case IMSG_NEIGHBOR_DOWN:
295			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
296			break;
297		case IMSG_NEIGHBOR_CHANGE:
298			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
299				fatalx("invalid size of OE request");
300			memcpy(&state, imsg.data, sizeof(state));
301
302			nbr = rde_nbr_find(imsg.hdr.peerid);
303			if (nbr == NULL)
304				break;
305
306			if (state != nbr->state && (nbr->state & NBR_STA_FULL ||
307			    state & NBR_STA_FULL))
308				area_track(nbr->area, state);
309
310			nbr->state = state;
311			if (nbr->state & NBR_STA_FULL)
312				rde_req_list_free(nbr);
313			break;
314		case IMSG_DB_SNAPSHOT:
315			nbr = rde_nbr_find(imsg.hdr.peerid);
316			if (nbr == NULL)
317				break;
318
319			lsa_snap(nbr, imsg.hdr.peerid);
320
321			imsg_compose(ibuf_ospfe, IMSG_DB_END, imsg.hdr.peerid,
322			    0, NULL, 0);
323			break;
324		case IMSG_DD:
325			nbr = rde_nbr_find(imsg.hdr.peerid);
326			if (nbr == NULL)
327				break;
328
329			buf = imsg.data;
330			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
331			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
332				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
333				buf += sizeof(lsa_hdr);
334
335				v = lsa_find(nbr->iface, lsa_hdr.type,
336				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
337				if (v == NULL)
338					db_hdr = NULL;
339				else
340					db_hdr = &v->lsa->hdr;
341
342				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
343					/*
344					 * only request LSAs that are
345					 * newer or missing
346					 */
347					rde_req_list_add(nbr, &lsa_hdr);
348					imsg_compose(ibuf_ospfe, IMSG_DD,
349					    imsg.hdr.peerid, 0, &lsa_hdr,
350					    sizeof(lsa_hdr));
351				}
352			}
353			if (l != 0)
354				log_warnx("rde_dispatch_imsg: peerid %lu, "
355				    "trailing garbage in Database Description "
356				    "packet", imsg.hdr.peerid);
357
358			imsg_compose(ibuf_ospfe, IMSG_DD_END, imsg.hdr.peerid,
359			    0, NULL, 0);
360			break;
361		case IMSG_LS_REQ:
362			nbr = rde_nbr_find(imsg.hdr.peerid);
363			if (nbr == NULL)
364				break;
365
366			buf = imsg.data;
367			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
368			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
369				memcpy(&req_hdr, buf, sizeof(req_hdr));
370				buf += sizeof(req_hdr);
371
372				if ((v = lsa_find(nbr->iface,
373				    req_hdr.type, req_hdr.ls_id,
374				    req_hdr.adv_rtr)) == NULL) {
375					imsg_compose(ibuf_ospfe, IMSG_LS_BADREQ,
376					    imsg.hdr.peerid, 0, NULL, 0);
377					continue;
378				}
379				imsg_compose(ibuf_ospfe, IMSG_LS_UPD,
380				    imsg.hdr.peerid, 0, v->lsa,
381				    ntohs(v->lsa->hdr.len));
382			}
383			if (l != 0)
384				log_warnx("rde_dispatch_imsg: peerid %lu, "
385				    "trailing garbage in LS Request "
386				    "packet", imsg.hdr.peerid);
387			break;
388		case IMSG_LS_UPD:
389			nbr = rde_nbr_find(imsg.hdr.peerid);
390			if (nbr == NULL)
391				break;
392
393			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
394			if (lsa == NULL)
395				fatal(NULL);
396			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
397
398			if (!lsa_check(nbr, lsa,
399			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
400				free(lsa);
401				break;
402			}
403
404			v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
405				    lsa->hdr.adv_rtr);
406			if (v == NULL)
407				db_hdr = NULL;
408			else
409				db_hdr = &v->lsa->hdr;
410
411			if (nbr->self) {
412				lsa_merge(nbr, lsa, v);
413				/* lsa_merge frees the right lsa */
414				break;
415			}
416
417			r = lsa_newer(&lsa->hdr, db_hdr);
418			if (r > 0) {
419				/* new LSA newer than DB */
420				if (v && v->flooded &&
421				    v->changed + MIN_LS_ARRIVAL >= now) {
422					free(lsa);
423					break;
424				}
425
426				rde_req_list_del(nbr, &lsa->hdr);
427
428				if (!(self = lsa_self(nbr, lsa, v)))
429					if (lsa_add(nbr, lsa))
430						/* delayed lsa */
431						break;
432
433				/* flood and perhaps ack LSA */
434				imsg_compose(ibuf_ospfe, IMSG_LS_FLOOD,
435				    imsg.hdr.peerid, 0, lsa,
436				    ntohs(lsa->hdr.len));
437
438				/* reflood self originated LSA */
439				if (self && v)
440					imsg_compose(ibuf_ospfe, IMSG_LS_FLOOD,
441					    v->peerid, 0, v->lsa,
442					    ntohs(v->lsa->hdr.len));
443				/* lsa not added so free it */
444				if (self)
445					free(lsa);
446			} else if (r < 0) {
447				/* lsa no longer needed */
448				free(lsa);
449
450				/*
451				 * point 6 of "The Flooding Procedure"
452				 * We are violating the RFC here because
453				 * it does not make sense to reset a session
454				 * because an equal LSA is already in the table.
455				 * Only if the LSA sent is older than the one
456				 * in the table we should reset the session.
457				 */
458				if (rde_req_list_exists(nbr, &lsa->hdr)) {
459					imsg_compose(ibuf_ospfe, IMSG_LS_BADREQ,
460					    imsg.hdr.peerid, 0, NULL, 0);
461					break;
462				}
463
464				/* new LSA older than DB */
465				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
466				    ntohs(db_hdr->age) == MAX_AGE)
467					/* seq-num wrap */
468					break;
469
470				if (v->changed + MIN_LS_ARRIVAL >= now)
471					break;
472
473				/* directly send current LSA, no ack */
474				imsg_compose(ibuf_ospfe, IMSG_LS_UPD,
475				    imsg.hdr.peerid, 0, v->lsa,
476				    ntohs(v->lsa->hdr.len));
477			} else {
478				/* LSA equal send direct ack */
479				imsg_compose(ibuf_ospfe, IMSG_LS_ACK,
480				    imsg.hdr.peerid, 0, &lsa->hdr,
481				    sizeof(lsa->hdr));
482				free(lsa);
483			}
484			break;
485		case IMSG_LS_MAXAGE:
486			nbr = rde_nbr_find(imsg.hdr.peerid);
487			if (nbr == NULL)
488				break;
489
490			if (imsg.hdr.len != IMSG_HEADER_SIZE +
491			    sizeof(struct lsa_hdr))
492				fatalx("invalid size of OE request");
493			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
494
495			if (rde_nbr_loading(nbr->area))
496				break;
497
498			v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
499				    lsa_hdr.adv_rtr);
500			if (v == NULL)
501				db_hdr = NULL;
502			else
503				db_hdr = &v->lsa->hdr;
504
505			/*
506			 * only delete LSA if the one in the db is not newer
507			 */
508			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
509				lsa_del(nbr, &lsa_hdr);
510			break;
511		case IMSG_CTL_SHOW_DATABASE:
512		case IMSG_CTL_SHOW_DB_EXT:
513		case IMSG_CTL_SHOW_DB_LINK:
514		case IMSG_CTL_SHOW_DB_NET:
515		case IMSG_CTL_SHOW_DB_RTR:
516		case IMSG_CTL_SHOW_DB_INTRA:
517		case IMSG_CTL_SHOW_DB_SELF:
518		case IMSG_CTL_SHOW_DB_SUM:
519		case IMSG_CTL_SHOW_DB_ASBR:
520			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
521			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
522				log_warnx("rde_dispatch_imsg: wrong imsg len");
523				break;
524			}
525			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
526				LIST_FOREACH(area, &rdeconf->area_list, entry) {
527					rde_dump_area(area, imsg.hdr.type,
528					    imsg.hdr.pid);
529				}
530				lsa_dump(&asext_tree, imsg.hdr.type,
531				    imsg.hdr.pid);
532			} else {
533				memcpy(&aid, imsg.data, sizeof(aid));
534				if ((area = area_find(rdeconf, aid)) != NULL) {
535					rde_dump_area(area, imsg.hdr.type,
536					    imsg.hdr.pid);
537					if (!area->stub)
538						lsa_dump(&asext_tree,
539						    imsg.hdr.type,
540						    imsg.hdr.pid);
541				}
542			}
543			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
544			    NULL, 0);
545			break;
546		case IMSG_CTL_SHOW_RIB:
547			LIST_FOREACH(area, &rdeconf->area_list, entry) {
548				imsg_compose(ibuf_ospfe, IMSG_CTL_AREA,
549				    0, imsg.hdr.pid, area, sizeof(*area));
550
551				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
552				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
553			}
554			aid.s_addr = 0;
555			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
556
557			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
558			    NULL, 0);
559			break;
560		case IMSG_CTL_SHOW_SUM:
561			rde_send_summary(imsg.hdr.pid);
562			LIST_FOREACH(area, &rdeconf->area_list, entry)
563				rde_send_summary_area(area, imsg.hdr.pid);
564			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
565			    NULL, 0);
566			break;
567		case IMSG_IFINFO:
568			if (imsg.hdr.len != IMSG_HEADER_SIZE +
569			    sizeof(struct iface))
570				fatalx("IFINFO imsg with wrong len");
571
572			ifp = imsg.data;
573
574			iface = if_find(ifp->ifindex);
575			if (iface == NULL)
576				fatalx("interface lost in rde");
577			iface->flags = ifp->flags;
578			iface->linkstate = ifp->linkstate;
579			iface->nh_reachable = ifp->nh_reachable;
580			iface->state = ifp->state;
581			break;
582		default:
583			log_debug("rde_dispatch_imsg: unexpected imsg %d",
584			    imsg.hdr.type);
585			break;
586		}
587		imsg_free(&imsg);
588	}
589	if (!shut)
590		imsg_event_add(ibuf);
591	else {
592		/* this pipe is dead, so remove the event handler */
593		event_del(&ibuf->ev);
594		event_loopexit(NULL);
595	}
596}
597
598/* ARGSUSED */
599void
600rde_dispatch_parent(int fd, short event, void *bula)
601{
602	static struct area	*narea;
603	struct iface		*niface, *iface;
604	struct imsg		 imsg;
605	struct kroute		 kr;
606	struct rroute		 rr;
607	struct imsgbuf		*ibuf = bula;
608	struct lsa		*lsa;
609	struct vertex		*v;
610	struct rt_node		*rn;
611	ssize_t			 n;
612	int			 shut = 0;
613	unsigned int		 ifindex;
614
615	switch (event) {
616	case EV_READ:
617		if ((n = imsg_read(ibuf)) == -1)
618			fatal("imsg_read error");
619		if (n == 0)	/* connection closed */
620			shut = 1;
621		break;
622	case EV_WRITE:
623		if (msgbuf_write(&ibuf->w) == -1)
624			fatal("msgbuf_write");
625		imsg_event_add(ibuf);
626		return;
627	default:
628		fatalx("unknown event");
629	}
630
631	for (;;) {
632		if ((n = imsg_get(ibuf, &imsg)) == -1)
633			fatal("rde_dispatch_parent: imsg_read error");
634		if (n == 0)
635			break;
636
637		switch (imsg.hdr.type) {
638		case IMSG_NETWORK_ADD:
639			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
640				log_warnx("rde_dispatch_parent: "
641				    "wrong imsg len");
642				break;
643			}
644			memcpy(&rr, imsg.data, sizeof(rr));
645
646			if ((lsa = rde_asext_get(&rr)) != NULL) {
647				v = lsa_find(NULL, lsa->hdr.type,
648				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
649
650				lsa_merge(nbrself, lsa, v);
651			}
652			break;
653		case IMSG_NETWORK_DEL:
654			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
655				log_warnx("rde_dispatch_parent: "
656				    "wrong imsg len");
657				break;
658			}
659			memcpy(&rr, imsg.data, sizeof(rr));
660
661			if ((lsa = rde_asext_put(&rr)) != NULL) {
662				v = lsa_find(NULL, lsa->hdr.type,
663				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
664
665				/*
666				 * if v == NULL no LSA is in the table and
667				 * nothing has to be done.
668				 */
669				if (v)
670					lsa_merge(nbrself, lsa, v);
671			}
672			break;
673		case IMSG_KROUTE_GET:
674			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
675				log_warnx("rde_dispatch_parent: "
676				    "wrong imsg len");
677				break;
678			}
679			memcpy(&kr, imsg.data, sizeof(kr));
680
681			if ((rn = rt_find(&kr.prefix, kr.prefixlen,
682			    DT_NET)) != NULL)
683				rde_send_change_kroute(rn);
684			else
685				/* should not happen */
686				imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0,
687				    0, &kr, sizeof(kr));
688			break;
689		case IMSG_IFADD:
690			if ((niface = malloc(sizeof(struct iface))) == NULL)
691				fatal(NULL);
692			memcpy(niface, imsg.data, sizeof(struct iface));
693
694			LIST_INIT(&niface->nbr_list);
695			TAILQ_INIT(&niface->ls_ack_list);
696			RB_INIT(&niface->lsa_tree);
697
698			narea = area_find(rdeconf, niface->area_id);
699			LIST_INSERT_HEAD(&narea->iface_list, niface, entry);
700			break;
701		case IMSG_IFDELETE:
702			if (imsg.hdr.len != IMSG_HEADER_SIZE +
703			    sizeof(ifindex))
704				fatalx("IFDELETE imsg with wrong len");
705
706			memcpy(&ifindex, imsg.data, sizeof(ifindex));
707			iface = if_find(ifindex);
708			if (iface == NULL)
709				fatalx("interface lost in ospfe");
710
711			LIST_REMOVE(iface, entry);
712			if_del(iface);
713			break;
714		case IMSG_RECONF_CONF:
715			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
716			    NULL)
717				fatal(NULL);
718			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
719
720			LIST_INIT(&nconf->area_list);
721			LIST_INIT(&nconf->cand_list);
722			break;
723		case IMSG_RECONF_AREA:
724			if ((narea = area_new()) == NULL)
725				fatal(NULL);
726			memcpy(narea, imsg.data, sizeof(struct area));
727
728			LIST_INIT(&narea->iface_list);
729			LIST_INIT(&narea->nbr_list);
730			RB_INIT(&narea->lsa_tree);
731
732			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
733			break;
734		case IMSG_RECONF_END:
735			merge_config(rdeconf, nconf);
736			nconf = NULL;
737			break;
738		default:
739			log_debug("rde_dispatch_parent: unexpected imsg %d",
740			    imsg.hdr.type);
741			break;
742		}
743		imsg_free(&imsg);
744	}
745	if (!shut)
746		imsg_event_add(ibuf);
747	else {
748		/* this pipe is dead, so remove the event handler */
749		event_del(&ibuf->ev);
750		event_loopexit(NULL);
751	}
752}
753
754void
755rde_dump_area(struct area *area, int imsg_type, pid_t pid)
756{
757	struct iface	*iface;
758
759	/* dump header */
760	imsg_compose(ibuf_ospfe, IMSG_CTL_AREA, 0, pid, area, sizeof(*area));
761
762	/* dump link local lsa */
763	LIST_FOREACH(iface, &area->iface_list, entry) {
764		imsg_compose(ibuf_ospfe, IMSG_CTL_IFACE,
765		    0, pid, iface, sizeof(*iface));
766		lsa_dump(&iface->lsa_tree, imsg_type, pid);
767	}
768
769	/* dump area lsa */
770	lsa_dump(&area->lsa_tree, imsg_type, pid);
771}
772
773u_int32_t
774rde_router_id(void)
775{
776	return (rdeconf->rtr_id.s_addr);
777}
778
779void
780rde_send_change_kroute(struct rt_node *r)
781{
782	struct kroute		 kr;
783	struct rt_nexthop	*rn;
784
785	TAILQ_FOREACH(rn, &r->nexthop, entry) {
786		if (!rn->invalid)
787			break;
788	}
789	if (!rn)
790		fatalx("rde_send_change_kroute: no valid nexthop found");
791
792	bzero(&kr, sizeof(kr));
793	kr.prefix = r->prefix;
794	kr.nexthop = rn->nexthop;
795	kr.prefixlen = r->prefixlen;
796	kr.ext_tag = r->ext_tag;
797
798	imsg_compose(ibuf_main, IMSG_KROUTE_CHANGE, 0, 0, &kr, sizeof(kr));
799}
800
801void
802rde_send_delete_kroute(struct rt_node *r)
803{
804	struct kroute	 kr;
805
806	bzero(&kr, sizeof(kr));
807	kr.prefix = r->prefix;
808	kr.prefixlen = r->prefixlen;
809
810	imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0, 0, &kr, sizeof(kr));
811}
812
813void
814rde_send_summary(pid_t pid)
815{
816	static struct ctl_sum	 sumctl;
817	struct timeval		 now;
818	struct area		*area;
819	struct vertex		*v;
820
821	bzero(&sumctl, sizeof(struct ctl_sum));
822
823	sumctl.rtr_id.s_addr = rde_router_id();
824	sumctl.spf_delay = rdeconf->spf_delay;
825	sumctl.spf_hold_time = rdeconf->spf_hold_time;
826
827	LIST_FOREACH(area, &rdeconf->area_list, entry)
828		sumctl.num_area++;
829
830	RB_FOREACH(v, lsa_tree, &asext_tree)
831		sumctl.num_ext_lsa++;
832
833	gettimeofday(&now, NULL);
834	if (rdeconf->uptime < now.tv_sec)
835		sumctl.uptime = now.tv_sec - rdeconf->uptime;
836	else
837		sumctl.uptime = 0;
838
839	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
840	    sizeof(sumctl));
841}
842
843void
844rde_send_summary_area(struct area *area, pid_t pid)
845{
846	static struct ctl_sum_area	 sumareactl;
847	struct iface			*iface;
848	struct rde_nbr			*nbr;
849	struct lsa_tree			*tree = &area->lsa_tree;
850	struct vertex			*v;
851
852	bzero(&sumareactl, sizeof(struct ctl_sum_area));
853
854	sumareactl.area.s_addr = area->id.s_addr;
855	sumareactl.num_spf_calc = area->num_spf_calc;
856
857	LIST_FOREACH(iface, &area->iface_list, entry)
858		sumareactl.num_iface++;
859
860	LIST_FOREACH(nbr, &area->nbr_list, entry)
861		if (nbr->state == NBR_STA_FULL && !nbr->self)
862			sumareactl.num_adj_nbr++;
863
864	RB_FOREACH(v, lsa_tree, tree)
865		sumareactl.num_lsa++;
866
867	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
868	    sizeof(sumareactl));
869}
870
871LIST_HEAD(rde_nbr_head, rde_nbr);
872
873struct nbr_table {
874	struct rde_nbr_head	*hashtbl;
875	u_int32_t		 hashmask;
876} rdenbrtable;
877
878#define RDE_NBR_HASH(x)		\
879	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
880
881void
882rde_nbr_init(u_int32_t hashsize)
883{
884	struct rde_nbr_head	*head;
885	u_int32_t		 hs, i;
886
887	for (hs = 1; hs < hashsize; hs <<= 1)
888		;
889	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
890	if (rdenbrtable.hashtbl == NULL)
891		fatal("rde_nbr_init");
892
893	for (i = 0; i < hs; i++)
894		LIST_INIT(&rdenbrtable.hashtbl[i]);
895
896	rdenbrtable.hashmask = hs - 1;
897
898	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
899		fatal("rde_nbr_init");
900
901	nbrself->id.s_addr = rde_router_id();
902	nbrself->peerid = NBR_IDSELF;
903	nbrself->state = NBR_STA_DOWN;
904	nbrself->self = 1;
905	head = RDE_NBR_HASH(NBR_IDSELF);
906	LIST_INSERT_HEAD(head, nbrself, hash);
907}
908
909void
910rde_nbr_free(void)
911{
912	free(nbrself);
913	free(rdenbrtable.hashtbl);
914}
915
916struct rde_nbr *
917rde_nbr_find(u_int32_t peerid)
918{
919	struct rde_nbr_head	*head;
920	struct rde_nbr		*nbr;
921
922	head = RDE_NBR_HASH(peerid);
923
924	LIST_FOREACH(nbr, head, hash) {
925		if (nbr->peerid == peerid)
926			return (nbr);
927	}
928
929	return (NULL);
930}
931
932struct rde_nbr *
933rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
934{
935	struct rde_nbr_head	*head;
936	struct rde_nbr		*nbr;
937	struct area		*area;
938	struct iface		*iface;
939
940	if (rde_nbr_find(peerid))
941		return (NULL);
942	if ((area = area_find(rdeconf, new->area_id)) == NULL)
943		fatalx("rde_nbr_new: unknown area");
944
945	LIST_FOREACH(iface, &area->iface_list, entry) {
946		if (iface->ifindex == new->ifindex)
947			break;
948	}
949	if (iface == NULL)
950		fatalx("rde_nbr_new: unknown interface");
951
952	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
953		fatal("rde_nbr_new");
954
955	memcpy(nbr, new, sizeof(*nbr));
956	nbr->peerid = peerid;
957	nbr->area = area;
958	nbr->iface = iface;
959
960	TAILQ_INIT(&nbr->req_list);
961
962	head = RDE_NBR_HASH(peerid);
963	LIST_INSERT_HEAD(head, nbr, hash);
964	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
965
966	return (nbr);
967}
968
969void
970rde_nbr_del(struct rde_nbr *nbr)
971{
972	if (nbr == NULL)
973		return;
974
975	rde_req_list_free(nbr);
976
977	LIST_REMOVE(nbr, entry);
978	LIST_REMOVE(nbr, hash);
979
980	free(nbr);
981}
982
983int
984rde_nbr_loading(struct area *area)
985{
986	struct rde_nbr		*nbr;
987	int			 checkall = 0;
988
989	if (area == NULL) {
990		area = LIST_FIRST(&rdeconf->area_list);
991		checkall = 1;
992	}
993
994	while (area != NULL) {
995		LIST_FOREACH(nbr, &area->nbr_list, entry) {
996			if (nbr->self)
997				continue;
998			if (nbr->state & NBR_STA_XCHNG ||
999			    nbr->state & NBR_STA_LOAD)
1000				return (1);
1001		}
1002		if (!checkall)
1003			break;
1004		area = LIST_NEXT(area, entry);
1005	}
1006
1007	return (0);
1008}
1009
1010struct rde_nbr *
1011rde_nbr_self(struct area *area)
1012{
1013	struct rde_nbr		*nbr;
1014
1015	LIST_FOREACH(nbr, &area->nbr_list, entry)
1016		if (nbr->self)
1017			return (nbr);
1018
1019	/* this may not happen */
1020	fatalx("rde_nbr_self: area without self");
1021	return (NULL);
1022}
1023
1024/*
1025 * LSA req list
1026 */
1027void
1028rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
1029{
1030	struct rde_req_entry	*le;
1031
1032	if ((le = calloc(1, sizeof(*le))) == NULL)
1033		fatal("rde_req_list_add");
1034
1035	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
1036	le->type = lsa->type;
1037	le->ls_id = lsa->ls_id;
1038	le->adv_rtr = lsa->adv_rtr;
1039}
1040
1041int
1042rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1043{
1044	struct rde_req_entry	*le;
1045
1046	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1047		if ((lsa_hdr->type == le->type) &&
1048		    (lsa_hdr->ls_id == le->ls_id) &&
1049		    (lsa_hdr->adv_rtr == le->adv_rtr))
1050			return (1);
1051	}
1052	return (0);
1053}
1054
1055void
1056rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1057{
1058	struct rde_req_entry	*le;
1059
1060	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1061		if ((lsa_hdr->type == le->type) &&
1062		    (lsa_hdr->ls_id == le->ls_id) &&
1063		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1064			TAILQ_REMOVE(&nbr->req_list, le, entry);
1065			free(le);
1066			return;
1067		}
1068	}
1069}
1070
1071void
1072rde_req_list_free(struct rde_nbr *nbr)
1073{
1074	struct rde_req_entry	*le;
1075
1076	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1077		TAILQ_REMOVE(&nbr->req_list, le, entry);
1078		free(le);
1079	}
1080}
1081
1082/*
1083 * as-external LSA handling
1084 */
1085struct lsa *
1086rde_asext_get(struct rroute *rr)
1087{
1088#if 0
1089	struct area	*area;
1090	struct iface	*iface;
1091XXX
1092	LIST_FOREACH(area, &rdeconf->area_list, entry)
1093		LIST_FOREACH(iface, &area->iface_list, entry) {
1094			if ((iface->addr.s_addr & iface->mask.s_addr) ==
1095			    rr->kr.prefix.s_addr && iface->mask.s_addr ==
1096			    prefixlen2mask(rr->kr.prefixlen)) {
1097				/* already announced as (stub) net LSA */
1098				log_debug("rde_asext_get: %s/%d is net LSA",
1099				    inet_ntoa(rr->kr.prefix), rr->kr.prefixlen);
1100				return (NULL);
1101			}
1102		}
1103#endif
1104	/* update of seqnum is done by lsa_merge */
1105	return (orig_asext_lsa(rr, DEFAULT_AGE));
1106}
1107
1108struct lsa *
1109rde_asext_put(struct rroute *rr)
1110{
1111	/*
1112	 * just try to remove the LSA. If the prefix is announced as
1113	 * stub net LSA lsa_find() will fail later and nothing will happen.
1114	 */
1115
1116	/* remove by reflooding with MAX_AGE */
1117	return (orig_asext_lsa(rr, MAX_AGE));
1118}
1119
1120/*
1121 * summary LSA stuff
1122 */
1123void
1124rde_summary_update(struct rt_node *rte, struct area *area)
1125{
1126	struct vertex		*v = NULL;
1127//XXX	struct lsa		*lsa;
1128	u_int16_t		 type = 0;
1129
1130	/* first check if we actually need to announce this route */
1131	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1132		return;
1133	/* never create summaries for as-ext LSA */
1134	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1135		return;
1136	/* no need for summary LSA in the originating area */
1137	if (rte->area.s_addr == area->id.s_addr)
1138		return;
1139	/* no need to originate inter-area routes to the backbone */
1140	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1141		return;
1142	/* TODO nexthop check, nexthop part of area -> no summary */
1143	if (rte->cost >= LS_INFINITY)
1144		return;
1145	/* TODO AS border router specific checks */
1146	/* TODO inter-area network route stuff */
1147	/* TODO intra-area stuff -- condense LSA ??? */
1148
1149	if (rte->d_type == DT_NET) {
1150		type = LSA_TYPE_INTER_A_PREFIX;
1151	} else if (rte->d_type == DT_RTR) {
1152		type = LSA_TYPE_INTER_A_ROUTER;
1153	} else
1154
1155#if 0 /* XXX a lot todo */
1156	/* update lsa but only if it was changed */
1157	v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1158	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1159	lsa_merge(rde_nbr_self(area), lsa, v);
1160
1161	if (v == NULL)
1162		v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1163#endif
1164
1165	/* suppressed/deleted routes are not found in the second lsa_find */
1166	if (v)
1167		v->cost = rte->cost;
1168}
1169
1170/*
1171 * Functions for self-originated LSAs
1172 */
1173
1174struct lsa *
1175orig_intra_lsa_net(struct iface *iface, struct vertex *old)
1176{
1177	struct lsa		*lsa;
1178	struct vertex		*v;
1179	struct area		*area;
1180	struct prefix_node	*node;
1181	struct prefix_tree	 tree;
1182	u_int16_t		 len;
1183	u_int16_t		 numprefix;
1184
1185	if ((area = area_find(rdeconf, iface->area_id)) == NULL)
1186		fatalx("interface lost area");
1187
1188	log_debug("orig_intra_lsa_net: area %s, interface %s",
1189	    inet_ntoa(area->id), iface->name);
1190
1191	RB_INIT(&tree);
1192
1193	if (iface->state & IF_STA_DR) {
1194		RB_FOREACH(v, lsa_tree, &iface->lsa_tree) {
1195			if (v->type != LSA_TYPE_LINK)
1196				continue;
1197			if (link_lsa_from_full_nbr(v->lsa, iface))
1198				prefix_tree_add(&tree, &v->lsa->data.link);
1199		}
1200		if (RB_EMPTY(&tree)) {
1201			/* There are no adjacent neighbors on link.
1202			 * If a copy of this LSA already exists in DB,
1203			 * it needs to be flushed. orig_intra_lsa_rtr()
1204			 * will take care of prefixes configured on
1205			 * this interface. */
1206			if (!old)
1207				return NULL;
1208		} else {
1209			/* Add our own prefixes configured for this link. */
1210			v = lsa_find(iface, htons(LSA_TYPE_LINK),
1211			    htonl(iface->ifindex), rde_router_id());
1212			if (v)
1213				prefix_tree_add(&tree, &v->lsa->data.link);
1214		}
1215	/* Continue only if a copy of this LSA already exists in DB.
1216	 * It needs to be flushed. */
1217	} else if (!old)
1218		return NULL;
1219
1220	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1221	if ((lsa = calloc(1, len)) == NULL)
1222		fatal("orig_intra_lsa_net");
1223
1224	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_NETWORK);
1225	lsa->data.pref_intra.ref_lsid = htonl(iface->ifindex);
1226	lsa->data.pref_intra.ref_adv_rtr = rdeconf->rtr_id.s_addr;
1227
1228	numprefix = 0;
1229	RB_FOREACH(node, prefix_tree, &tree) {
1230		append_prefix_lsa(&lsa, &len, node->prefix);
1231		numprefix++;
1232	}
1233
1234	lsa->data.pref_intra.numprefix = htons(numprefix);
1235
1236	while (!RB_EMPTY(&tree))
1237		free(RB_REMOVE(prefix_tree, &tree, RB_ROOT(&tree)));
1238
1239	/* LSA header */
1240	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1241	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1242	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1243	lsa->hdr.ls_id = htonl(iface->ifindex);
1244	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1245	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1246	lsa->hdr.len = htons(len);
1247	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1248
1249	return lsa;
1250}
1251
1252/* Prefix LSAs have variable size. We have to be careful to copy the right
1253 * amount of bytes, and to realloc() the right amount of memory. */
1254void
1255append_prefix_lsa(struct lsa **lsa, u_int16_t *len, struct lsa_prefix *prefix)
1256{
1257	struct lsa_prefix	*copy;
1258	unsigned int		 lsa_prefix_len;
1259	unsigned int		 new_len;
1260	char  			*new_lsa;
1261
1262	lsa_prefix_len = sizeof(struct lsa_prefix)
1263	    + LSA_PREFIXSIZE(prefix->prefixlen);
1264
1265	new_len = *len + lsa_prefix_len;
1266
1267	/* Make sure we have enough space for this prefix. */
1268	if ((new_lsa = realloc(*lsa, new_len)) == NULL)
1269		fatalx("append_prefix_lsa");
1270
1271	/* Append prefix to LSA. */
1272	copy = (struct lsa_prefix *)(new_lsa + *len);
1273	memcpy(copy, prefix, lsa_prefix_len);
1274	copy->metric = 0;
1275
1276	*lsa = (struct lsa *)new_lsa;
1277	*len = new_len;
1278}
1279
1280int
1281prefix_compare(struct prefix_node *a, struct prefix_node *b)
1282{
1283	struct lsa_prefix	*p;
1284	struct lsa_prefix	*q;
1285	int		 	 i;
1286	int			 len;
1287
1288	p = a->prefix;
1289	q = b->prefix;
1290
1291	len = MIN(LSA_PREFIXSIZE(p->prefixlen), LSA_PREFIXSIZE(q->prefixlen));
1292
1293	i = memcmp(p + 1, q + 1, len);
1294	if (i)
1295		return (i);
1296	if (p->prefixlen < q->prefixlen)
1297		return (-1);
1298	if (p->prefixlen > q->prefixlen)
1299		return (1);
1300	return (0);
1301}
1302
1303void
1304prefix_tree_add(struct prefix_tree *tree, struct lsa_link *lsa)
1305{
1306	struct prefix_node	*old;
1307	struct prefix_node	*new;
1308	struct in6_addr		 addr;
1309	unsigned int		 len;
1310	unsigned int		 i;
1311	char			*cur_prefix;
1312
1313	cur_prefix = (char *)(lsa + 1);
1314
1315	for (i = 0; i < ntohl(lsa->numprefix); i++) {
1316		new = calloc(sizeof(*new), 1);
1317		new->prefix = (struct lsa_prefix *)cur_prefix;
1318
1319		len = sizeof(*new->prefix)
1320		    + LSA_PREFIXSIZE(new->prefix->prefixlen);
1321
1322		bzero(&addr, sizeof(addr));
1323		memcpy(&addr, new->prefix + 1,
1324		    LSA_PREFIXSIZE(new->prefix->prefixlen));
1325
1326		if (!(IN6_IS_ADDR_LINKLOCAL(&addr)) &&
1327		    (new->prefix->options & OSPF_PREFIX_NU) == 0 &&
1328		    (new->prefix->options & OSPF_PREFIX_LA) == 0) {
1329			old = RB_INSERT(prefix_tree, tree, new);
1330			if (old != NULL) {
1331				old->prefix->options |= new->prefix->options;
1332				free(new);
1333			}
1334		}
1335
1336		cur_prefix = cur_prefix + len;
1337	}
1338}
1339
1340RB_GENERATE(prefix_tree, prefix_node, entry, prefix_compare)
1341
1342/* Return non-zero if Link LSA was originated from an adjacent neighbor. */
1343int
1344link_lsa_from_full_nbr(struct lsa *lsa, struct iface *iface)
1345{
1346	struct rde_nbr	*nbr;
1347	struct area	*area;
1348
1349	if ((area = area_find(rdeconf, iface->area_id)) == NULL)
1350		fatalx("interface lost area");
1351
1352	LIST_FOREACH(nbr, &area->nbr_list, entry) {
1353		if (nbr->self || nbr->iface->ifindex != iface->ifindex)
1354			continue;
1355		if (lsa->hdr.adv_rtr == nbr->id.s_addr)
1356			break;
1357	}
1358	if (!nbr)
1359		return 0;
1360
1361	if (nbr->state & NBR_STA_FULL &&
1362	    ntohl(lsa->hdr.ls_id) == nbr->iface_id)
1363		return 1;
1364
1365	return 0;
1366}
1367
1368struct lsa *
1369orig_intra_lsa_rtr(struct area *area, struct vertex *old)
1370{
1371	struct lsa		*lsa;
1372	struct lsa_prefix	*lsa_prefix;
1373	struct in6_addr		*prefix;
1374	struct iface		*iface;
1375	struct iface_addr	*ia;
1376	struct rde_nbr		*nbr;
1377	u_int16_t		 len;
1378	u_int16_t		 numprefix;
1379
1380	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix);
1381	if ((lsa = calloc(1, len)) == NULL)
1382		fatal("orig_intra_lsa_net");
1383
1384	lsa->data.pref_intra.ref_type = htons(LSA_TYPE_ROUTER);
1385	lsa->data.pref_intra.ref_lsid = 0;
1386	lsa->data.pref_intra.ref_adv_rtr = rde_router_id();
1387
1388	log_debug("orig_intra_lsa_rtr: area %s", inet_ntoa(area->id));
1389
1390	numprefix = 0;
1391	LIST_FOREACH(iface, &area->iface_list, entry) {
1392		if (iface->state & IF_STA_DOWN)
1393			continue;
1394
1395		/* Broadcast links with adjacencies are handled
1396		 * by orig_intra_lsa_net(), ignore. */
1397		if (iface->type == IF_TYPE_BROADCAST ||
1398		    iface->type == IF_TYPE_NBMA) {
1399			if (iface->state & IF_STA_WAITING)
1400				/* Skip, we're still waiting for
1401				 * adjacencies to form. */
1402				continue;
1403
1404			LIST_FOREACH(nbr, &area->nbr_list, entry)
1405				if (!nbr->self &&
1406				    nbr->iface->ifindex == iface->ifindex &&
1407				    nbr->state & NBR_STA_FULL)
1408					break;
1409			if (nbr)
1410				continue;
1411		}
1412
1413		if ((lsa_prefix = calloc(sizeof(*lsa_prefix)
1414		    + sizeof(struct in6_addr), 1)) == NULL)
1415			fatal("orig_intra_lsa_rtr");
1416
1417		TAILQ_FOREACH(ia, &iface->ifa_list, entry) {
1418			if (IN6_IS_ADDR_LINKLOCAL(&ia->addr))
1419				continue;
1420
1421			bzero(lsa_prefix, sizeof(*lsa_prefix)
1422			    + sizeof(struct in6_addr));
1423
1424			if (iface->type == IF_TYPE_POINTOMULTIPOINT ||
1425			    iface->state & IF_STA_LOOPBACK) {
1426				lsa_prefix->prefixlen = 128;
1427			} else {
1428				lsa_prefix->prefixlen = ia->prefixlen;
1429				lsa_prefix->metric = htons(iface->metric);
1430			}
1431
1432			if (lsa_prefix->prefixlen == 128)
1433				lsa_prefix->options |= OSPF_PREFIX_LA;
1434
1435			prefix = (struct in6_addr *)(lsa_prefix + 1);
1436			inet6applymask(prefix, &ia->addr,
1437			    lsa_prefix->prefixlen);
1438			append_prefix_lsa(&lsa, &len, lsa_prefix);
1439			numprefix++;
1440		}
1441
1442		free(lsa_prefix);
1443
1444		/* TOD: Add prefixes of directly attached hosts, too */
1445		/* TOD: Add prefixes for virtual links */
1446	}
1447
1448	/* If no prefixes were included, continue only if a copy of this
1449	 * LSA already exists in DB. It needs to be flushed. */
1450	if (numprefix == 0 && !old) {
1451		free(lsa);
1452		return NULL;
1453	}
1454
1455	lsa->data.pref_intra.numprefix = htons(numprefix);
1456
1457	/* LSA header */
1458	/* If numprefix is zero, originate with MAX_AGE to flush LSA. */
1459	lsa->hdr.age = numprefix == 0 ? htons(MAX_AGE) : htons(DEFAULT_AGE);
1460	lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX);
1461	lsa->hdr.ls_id = htonl(LS_ID_INTRA_RTR);
1462	lsa->hdr.adv_rtr = rde_router_id();
1463	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1464	lsa->hdr.len = htons(len);
1465	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1466
1467	return lsa;
1468}
1469
1470void
1471orig_intra_area_prefix_lsas(struct area *area)
1472{
1473	struct lsa	*lsa;
1474	struct vertex	*old;
1475	struct iface	*iface;
1476	struct vertex	 key;
1477
1478	LIST_FOREACH(iface, &area->iface_list, entry) {
1479		if (iface->type == IF_TYPE_BROADCAST ||
1480		    iface->type == IF_TYPE_NBMA) {
1481			old = lsa_find(iface, htons(LSA_TYPE_INTRA_A_PREFIX),
1482			    htonl(iface->ifindex), rde_router_id());
1483			lsa = orig_intra_lsa_net(iface, old);
1484			if (lsa)
1485				lsa_merge(rde_nbr_self(area), lsa, old);
1486		}
1487	}
1488
1489	/* XXX: lsa_find() should take an LSA tree as argument,
1490	 * if you have no iface at hand you cannot use it... */
1491	bzero(&key, sizeof(key));
1492	key.type = LSA_TYPE_INTRA_A_PREFIX;
1493	key.ls_id = LS_ID_INTRA_RTR;
1494	key.adv_rtr = ntohl(rde_router_id());
1495	old = RB_FIND(lsa_tree, &area->lsa_tree, &key);
1496	if (old && old->deleted)
1497		old = NULL;
1498
1499	lsa = orig_intra_lsa_rtr(area, old);
1500	if (lsa)
1501		lsa_merge(rde_nbr_self(area), lsa, old);
1502}
1503
1504struct lsa *
1505orig_asext_lsa(struct rroute *rr, u_int16_t age)
1506{
1507#if 0 /* XXX a lot todo */
1508	struct lsa	*lsa;
1509	u_int16_t	 len;
1510
1511	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext);
1512	if ((lsa = calloc(1, len)) == NULL)
1513		fatal("orig_asext_lsa");
1514
1515	log_debug("orig_asext_lsa: %s/%d age %d",
1516	    log_in6addr(&rr->kr.prefix), rr->kr.prefixlen, age);
1517
1518	/* LSA header */
1519	lsa->hdr.age = htons(age);
1520	lsa->hdr.type = LSA_TYPE_EXTERNAL;
1521	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1522	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1523	lsa->hdr.len = htons(len);
1524
1525	/* prefix and mask */
1526	/*
1527	 * TODO ls_id must be unique, for overlapping routes this may
1528	 * not be true. In this case a hack needs to be done to
1529	 * make the ls_id unique.
1530	 */
1531	lsa->hdr.ls_id = rr->kr.prefix.s_addr;
1532	lsa->data.asext.mask = prefixlen2mask(rr->kr.prefixlen);
1533
1534	/*
1535	 * nexthop -- on connected routes we are the nexthop,
1536	 * on all other cases we announce the true nexthop.
1537	 * XXX this is wrong as the true nexthop may be outside
1538	 * of the ospf cloud and so unreachable. For now we force
1539	 * all traffic to be directed to us.
1540	 */
1541	lsa->data.asext.fw_addr = 0;
1542
1543	lsa->data.asext.metric = htonl(rr->metric);
1544	lsa->data.asext.ext_tag = htonl(rr->kr.ext_tag);
1545
1546	lsa->hdr.ls_chksum = 0;
1547	lsa->hdr.ls_chksum =
1548	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1549
1550	return (lsa);
1551#endif
1552	return NULL;
1553}
1554
1555struct lsa *
1556orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1557{
1558#if 0 /* XXX a lot todo */
1559	struct lsa	*lsa;
1560	u_int16_t	 len;
1561
1562	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1563	if ((lsa = calloc(1, len)) == NULL)
1564		fatal("orig_sum_lsa");
1565
1566	/* LSA header */
1567	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1568	lsa->hdr.type = type;
1569	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1570	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1571	lsa->hdr.len = htons(len);
1572
1573	/* prefix and mask */
1574	/*
1575	 * TODO ls_id must be unique, for overlapping routes this may
1576	 * not be true. In this case a hack needs to be done to
1577	 * make the ls_id unique.
1578	 */
1579	lsa->hdr.ls_id = rte->prefix.s_addr;
1580	if (type == LSA_TYPE_SUM_NETWORK)
1581		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1582	else
1583		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1584
1585	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1586
1587	lsa->hdr.ls_chksum = 0;
1588	lsa->hdr.ls_chksum =
1589	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1590
1591	return (lsa);
1592#endif
1593	return NULL;
1594}
1595