rde.c revision 1.10
1/*	$OpenBSD: rde.c,v 1.10 2007/12/13 08:54:05 claudio Exp $ */
2
3/*
4 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5 * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <sys/types.h>
22#include <sys/socket.h>
23#include <sys/queue.h>
24#include <netinet/in.h>
25#include <arpa/inet.h>
26#include <err.h>
27#include <errno.h>
28#include <stdlib.h>
29#include <signal.h>
30#include <string.h>
31#include <pwd.h>
32#include <unistd.h>
33#include <event.h>
34
35#include "ospf6.h"
36#include "ospf6d.h"
37#include "ospfe.h"
38#include "log.h"
39#include "rde.h"
40
41void		 rde_sig_handler(int sig, short, void *);
42void		 rde_shutdown(void);
43void		 rde_dispatch_imsg(int, short, void *);
44void		 rde_dispatch_parent(int, short, void *);
45void		 rde_dump_area(struct area *, int, pid_t);
46
47void		 rde_send_summary(pid_t);
48void		 rde_send_summary_area(struct area *, pid_t);
49void		 rde_nbr_init(u_int32_t);
50void		 rde_nbr_free(void);
51struct rde_nbr	*rde_nbr_find(u_int32_t);
52struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
53void		 rde_nbr_del(struct rde_nbr *);
54
55void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
56int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
57void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
58void		 rde_req_list_free(struct rde_nbr *);
59
60struct lsa	*rde_asext_get(struct rroute *);
61struct lsa	*rde_asext_put(struct rroute *);
62
63struct lsa	*orig_asext_lsa(struct rroute *, u_int16_t);
64struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
65
66struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
67struct imsgbuf		*ibuf_ospfe;
68struct imsgbuf		*ibuf_main;
69struct rde_nbr		*nbrself;
70struct lsa_tree		 asext_tree;
71
72/* ARGSUSED */
73void
74rde_sig_handler(int sig, short event, void *arg)
75{
76	/*
77	 * signal handler rules don't apply, libevent decouples for us
78	 */
79
80	switch (sig) {
81	case SIGINT:
82	case SIGTERM:
83		rde_shutdown();
84		/* NOTREACHED */
85	default:
86		fatalx("unexpected signal");
87	}
88}
89
90/* route decision engine */
91pid_t
92rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
93    int pipe_parent2ospfe[2])
94{
95	struct event		 ev_sigint, ev_sigterm;
96	struct timeval		 now;
97	struct passwd		*pw;
98	struct redistribute	*r;
99	pid_t			 pid;
100
101	switch (pid = fork()) {
102	case -1:
103		fatal("cannot fork");
104		/* NOTREACHED */
105	case 0:
106		break;
107	default:
108		return (pid);
109	}
110
111	rdeconf = xconf;
112
113	if ((pw = getpwnam(OSPF6D_USER)) == NULL)
114		fatal("getpwnam");
115
116	if (chroot(pw->pw_dir) == -1)
117		fatal("chroot");
118	if (chdir("/") == -1)
119		fatal("chdir(\"/\")");
120
121	setproctitle("route decision engine");
122	ospfd_process = PROC_RDE_ENGINE;
123
124	if (setgroups(1, &pw->pw_gid) ||
125	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
126	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
127		fatal("can't drop privileges");
128
129	event_init();
130	rde_nbr_init(NBR_HASHSIZE);
131	lsa_init(&asext_tree);
132
133	/* setup signal handler */
134	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
135	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
136	signal_add(&ev_sigint, NULL);
137	signal_add(&ev_sigterm, NULL);
138	signal(SIGPIPE, SIG_IGN);
139	signal(SIGHUP, SIG_IGN);
140
141	/* setup pipes */
142	close(pipe_ospfe2rde[0]);
143	close(pipe_parent2rde[0]);
144	close(pipe_parent2ospfe[0]);
145	close(pipe_parent2ospfe[1]);
146
147	if ((ibuf_ospfe = malloc(sizeof(struct imsgbuf))) == NULL ||
148	    (ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
149		fatal(NULL);
150	imsg_init(ibuf_ospfe, pipe_ospfe2rde[1], rde_dispatch_imsg);
151	imsg_init(ibuf_main, pipe_parent2rde[1], rde_dispatch_parent);
152
153	/* setup event handler */
154	ibuf_ospfe->events = EV_READ;
155	event_set(&ibuf_ospfe->ev, ibuf_ospfe->fd, ibuf_ospfe->events,
156	    ibuf_ospfe->handler, ibuf_ospfe);
157	event_add(&ibuf_ospfe->ev, NULL);
158
159	ibuf_main->events = EV_READ;
160	event_set(&ibuf_main->ev, ibuf_main->fd, ibuf_main->events,
161	    ibuf_main->handler, ibuf_main);
162	event_add(&ibuf_main->ev, NULL);
163
164	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
165	cand_list_init();
166	rt_init();
167
168	while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
169		SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
170		free(r);
171	}
172
173	gettimeofday(&now, NULL);
174	rdeconf->uptime = now.tv_sec;
175
176	event_dispatch();
177
178	rde_shutdown();
179	/* NOTREACHED */
180
181	return (0);
182}
183
184void
185rde_shutdown(void)
186{
187	struct area	*a;
188
189	stop_spf_timer(rdeconf);
190	cand_list_clr();
191	rt_clear();
192
193	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
194		LIST_REMOVE(a, entry);
195		area_del(a);
196	}
197	rde_nbr_free();
198
199	msgbuf_clear(&ibuf_ospfe->w);
200	free(ibuf_ospfe);
201	msgbuf_clear(&ibuf_main->w);
202	free(ibuf_main);
203	free(rdeconf);
204
205	log_info("route decision engine exiting");
206	_exit(0);
207}
208
209int
210rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
211    u_int16_t datalen)
212{
213	return (imsg_compose(ibuf_ospfe, type, peerid, pid, data, datalen));
214}
215
216/* ARGSUSED */
217void
218rde_dispatch_imsg(int fd, short event, void *bula)
219{
220	struct imsgbuf		*ibuf = bula;
221	struct imsg		 imsg;
222	struct in_addr		 aid;
223	struct ls_req_hdr	 req_hdr;
224	struct lsa_hdr		 lsa_hdr, *db_hdr;
225	struct rde_nbr		 rn, *nbr;
226	struct timespec		 tp;
227	struct lsa		*lsa;
228	struct area		*area;
229	struct vertex		*v;
230	char			*buf;
231	ssize_t			 n;
232	time_t			 now;
233	int			 r, state, self, shut = 0;
234	u_int16_t		 l;
235
236	switch (event) {
237	case EV_READ:
238		if ((n = imsg_read(ibuf)) == -1)
239			fatal("imsg_read error");
240		if (n == 0)	/* connection closed */
241			shut = 1;
242		break;
243	case EV_WRITE:
244		if (msgbuf_write(&ibuf->w) == -1)
245			fatal("msgbuf_write");
246		imsg_event_add(ibuf);
247		return;
248	default:
249		fatalx("unknown event");
250	}
251
252	clock_gettime(CLOCK_MONOTONIC, &tp);
253	now = tp.tv_sec;
254
255	for (;;) {
256		if ((n = imsg_get(ibuf, &imsg)) == -1)
257			fatal("rde_dispatch_imsg: imsg_read error");
258		if (n == 0)
259			break;
260
261		switch (imsg.hdr.type) {
262		case IMSG_NEIGHBOR_UP:
263			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
264				fatalx("invalid size of OE request");
265			memcpy(&rn, imsg.data, sizeof(rn));
266
267			if (rde_nbr_find(imsg.hdr.peerid))
268				fatalx("rde_dispatch_imsg: "
269				    "neighbor already exists");
270			rde_nbr_new(imsg.hdr.peerid, &rn);
271			break;
272		case IMSG_NEIGHBOR_DOWN:
273			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
274			break;
275		case IMSG_NEIGHBOR_CHANGE:
276			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
277				fatalx("invalid size of OE request");
278			memcpy(&state, imsg.data, sizeof(state));
279
280			nbr = rde_nbr_find(imsg.hdr.peerid);
281			if (nbr == NULL)
282				break;
283
284			if (state != nbr->state && (nbr->state & NBR_STA_FULL ||
285			    state & NBR_STA_FULL))
286				area_track(nbr->area, state);
287
288			nbr->state = state;
289			if (nbr->state & NBR_STA_FULL)
290				rde_req_list_free(nbr);
291			break;
292		case IMSG_DB_SNAPSHOT:
293			nbr = rde_nbr_find(imsg.hdr.peerid);
294			if (nbr == NULL)
295				break;
296
297			lsa_snap(nbr->area, imsg.hdr.peerid);
298
299			imsg_compose(ibuf_ospfe, IMSG_DB_END, imsg.hdr.peerid,
300			    0, NULL, 0);
301			break;
302		case IMSG_DD:
303			nbr = rde_nbr_find(imsg.hdr.peerid);
304			if (nbr == NULL)
305				break;
306
307			buf = imsg.data;
308			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
309			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
310				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
311				buf += sizeof(lsa_hdr);
312
313				v = lsa_find(nbr->iface, lsa_hdr.type,
314				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
315				if (v == NULL)
316					db_hdr = NULL;
317				else
318					db_hdr = &v->lsa->hdr;
319
320				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
321					/*
322					 * only request LSAs that are
323					 * newer or missing
324					 */
325					rde_req_list_add(nbr, &lsa_hdr);
326					imsg_compose(ibuf_ospfe, IMSG_DD,
327					    imsg.hdr.peerid, 0, &lsa_hdr,
328					    sizeof(lsa_hdr));
329				}
330			}
331			if (l != 0)
332				log_warnx("rde_dispatch_imsg: peerid %lu, "
333				    "trailing garbage in Database Description "
334				    "packet", imsg.hdr.peerid);
335
336			imsg_compose(ibuf_ospfe, IMSG_DD_END, imsg.hdr.peerid,
337			    0, NULL, 0);
338			break;
339		case IMSG_LS_REQ:
340			nbr = rde_nbr_find(imsg.hdr.peerid);
341			if (nbr == NULL)
342				break;
343
344			buf = imsg.data;
345			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
346			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
347				memcpy(&req_hdr, buf, sizeof(req_hdr));
348				buf += sizeof(req_hdr);
349
350				if ((v = lsa_find(nbr->iface,
351				    ntohl(req_hdr.type), req_hdr.ls_id,
352				    req_hdr.adv_rtr)) == NULL) {
353					imsg_compose(ibuf_ospfe, IMSG_LS_BADREQ,
354					    imsg.hdr.peerid, 0, NULL, 0);
355					continue;
356				}
357				imsg_compose(ibuf_ospfe, IMSG_LS_UPD,
358				    imsg.hdr.peerid, 0, v->lsa,
359				    ntohs(v->lsa->hdr.len));
360			}
361			if (l != 0)
362				log_warnx("rde_dispatch_imsg: peerid %lu, "
363				    "trailing garbage in LS Request "
364				    "packet", imsg.hdr.peerid);
365			break;
366		case IMSG_LS_UPD:
367			nbr = rde_nbr_find(imsg.hdr.peerid);
368			if (nbr == NULL)
369				break;
370
371			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
372			if (lsa == NULL)
373				fatal(NULL);
374			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
375
376			if (!lsa_check(nbr, lsa,
377			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
378				free(lsa);
379				break;
380			}
381
382			v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
383				    lsa->hdr.adv_rtr);
384			if (v == NULL)
385				db_hdr = NULL;
386			else
387				db_hdr = &v->lsa->hdr;
388
389			if (nbr->self) {
390				lsa_merge(nbr, lsa, v);
391				/* lsa_merge frees the right lsa */
392				break;
393			}
394
395			r = lsa_newer(&lsa->hdr, db_hdr);
396			if (r > 0) {
397				/* new LSA newer than DB */
398				if (v && v->flooded &&
399				    v->changed + MIN_LS_ARRIVAL >= now) {
400					free(lsa);
401					break;
402				}
403
404				rde_req_list_del(nbr, &lsa->hdr);
405
406				if (!(self = lsa_self(nbr, lsa, v)))
407					if (lsa_add(nbr, lsa))
408						/* delayed lsa */
409						break;
410
411				/* flood and perhaps ack LSA */
412				imsg_compose(ibuf_ospfe, IMSG_LS_FLOOD,
413				    imsg.hdr.peerid, 0, lsa,
414				    ntohs(lsa->hdr.len));
415
416				/* reflood self originated LSA */
417				if (self && v)
418					imsg_compose(ibuf_ospfe, IMSG_LS_FLOOD,
419					    v->peerid, 0, v->lsa,
420					    ntohs(v->lsa->hdr.len));
421				/* lsa not added so free it */
422				if (self)
423					free(lsa);
424			} else if (r < 0) {
425				/* lsa no longer needed */
426				free(lsa);
427
428				/*
429				 * point 6 of "The Flooding Procedure"
430				 * We are violating the RFC here because
431				 * it does not make sense to reset a session
432				 * because an equal LSA is already in the table.
433				 * Only if the LSA sent is older than the one
434				 * in the table we should reset the session.
435				 */
436				if (rde_req_list_exists(nbr, &lsa->hdr)) {
437					imsg_compose(ibuf_ospfe, IMSG_LS_BADREQ,
438					    imsg.hdr.peerid, 0, NULL, 0);
439					break;
440				}
441
442				/* new LSA older than DB */
443				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
444				    ntohs(db_hdr->age) == MAX_AGE)
445					/* seq-num wrap */
446					break;
447
448				if (v->changed + MIN_LS_ARRIVAL >= now)
449					break;
450
451				/* directly send current LSA, no ack */
452				imsg_compose(ibuf_ospfe, IMSG_LS_UPD,
453				    imsg.hdr.peerid, 0, v->lsa,
454				    ntohs(v->lsa->hdr.len));
455			} else {
456				/* LSA equal send direct ack */
457				imsg_compose(ibuf_ospfe, IMSG_LS_ACK,
458				    imsg.hdr.peerid, 0, &lsa->hdr,
459				    sizeof(lsa->hdr));
460				free(lsa);
461			}
462			break;
463		case IMSG_LS_MAXAGE:
464			nbr = rde_nbr_find(imsg.hdr.peerid);
465			if (nbr == NULL)
466				break;
467
468			if (imsg.hdr.len != IMSG_HEADER_SIZE +
469			    sizeof(struct lsa_hdr))
470				fatalx("invalid size of OE request");
471			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
472
473			if (rde_nbr_loading(nbr->area))
474				break;
475
476			v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
477				    lsa_hdr.adv_rtr);
478			if (v == NULL)
479				db_hdr = NULL;
480			else
481				db_hdr = &v->lsa->hdr;
482
483			/*
484			 * only delete LSA if the one in the db is not newer
485			 */
486			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
487				lsa_del(nbr, &lsa_hdr);
488			break;
489		case IMSG_CTL_SHOW_DATABASE:
490		case IMSG_CTL_SHOW_DB_EXT:
491		case IMSG_CTL_SHOW_DB_NET:
492		case IMSG_CTL_SHOW_DB_RTR:
493		case IMSG_CTL_SHOW_DB_SELF:
494		case IMSG_CTL_SHOW_DB_SUM:
495		case IMSG_CTL_SHOW_DB_ASBR:
496			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
497			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
498				log_warnx("rde_dispatch: wrong imsg len");
499				break;
500			}
501			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
502				LIST_FOREACH(area, &rdeconf->area_list, entry) {
503					rde_dump_area(area, imsg.hdr.type,
504					    imsg.hdr.pid);
505				}
506				lsa_dump(&asext_tree, imsg.hdr.type,
507				    imsg.hdr.pid);
508			} else {
509				memcpy(&aid, imsg.data, sizeof(aid));
510				if ((area = area_find(rdeconf, aid)) != NULL) {
511					rde_dump_area(area, imsg.hdr.type,
512					    imsg.hdr.pid);
513					if (!area->stub)
514						lsa_dump(&asext_tree,
515						    imsg.hdr.type,
516						    imsg.hdr.pid);
517				}
518			}
519			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
520			    NULL, 0);
521			break;
522		case IMSG_CTL_SHOW_RIB:
523			LIST_FOREACH(area, &rdeconf->area_list, entry) {
524				imsg_compose(ibuf_ospfe, IMSG_CTL_AREA,
525				    0, imsg.hdr.pid, area, sizeof(*area));
526
527				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
528				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
529			}
530			aid.s_addr = 0;
531			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
532
533			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
534			    NULL, 0);
535			break;
536		case IMSG_CTL_SHOW_SUM:
537			rde_send_summary(imsg.hdr.pid);
538			LIST_FOREACH(area, &rdeconf->area_list, entry)
539				rde_send_summary_area(area, imsg.hdr.pid);
540			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
541			    NULL, 0);
542			break;
543		default:
544			log_debug("rde_dispatch_msg: unexpected imsg %d",
545			    imsg.hdr.type);
546			break;
547		}
548		imsg_free(&imsg);
549	}
550	if (!shut)
551		imsg_event_add(ibuf);
552	else {
553		/* this pipe is dead, so remove the event handler */
554		event_del(&ibuf->ev);
555		event_loopexit(NULL);
556	}
557}
558
559/* ARGSUSED */
560void
561rde_dispatch_parent(int fd, short event, void *bula)
562{
563	static struct area	*narea;
564	struct iface		*niface, *iface;
565	struct imsg		 imsg;
566	struct kroute		 kr;
567	struct rroute		 rr;
568	struct imsgbuf		*ibuf = bula;
569	struct lsa		*lsa;
570	struct vertex		*v;
571	struct rt_node		*rn;
572	ssize_t			 n;
573	int			 shut = 0;
574	unsigned int		 ifindex;
575
576	switch (event) {
577	case EV_READ:
578		if ((n = imsg_read(ibuf)) == -1)
579			fatal("imsg_read error");
580		if (n == 0)	/* connection closed */
581			shut = 1;
582		break;
583	case EV_WRITE:
584		if (msgbuf_write(&ibuf->w) == -1)
585			fatal("msgbuf_write");
586		imsg_event_add(ibuf);
587		return;
588	default:
589		fatalx("unknown event");
590	}
591
592	for (;;) {
593		if ((n = imsg_get(ibuf, &imsg)) == -1)
594			fatal("rde_dispatch_parent: imsg_read error");
595		if (n == 0)
596			break;
597
598		switch (imsg.hdr.type) {
599		case IMSG_NETWORK_ADD:
600			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
601				log_warnx("rde_dispatch: wrong imsg len");
602				break;
603			}
604			memcpy(&rr, imsg.data, sizeof(rr));
605
606			if ((lsa = rde_asext_get(&rr)) != NULL) {
607				v = lsa_find(NULL, lsa->hdr.type,
608				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
609
610				lsa_merge(nbrself, lsa, v);
611			}
612			break;
613		case IMSG_NETWORK_DEL:
614			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
615				log_warnx("rde_dispatch: wrong imsg len");
616				break;
617			}
618			memcpy(&rr, imsg.data, sizeof(rr));
619
620			if ((lsa = rde_asext_put(&rr)) != NULL) {
621				v = lsa_find(NULL, lsa->hdr.type,
622				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
623
624				/*
625				 * if v == NULL no LSA is in the table and
626				 * nothing has to be done.
627				 */
628				if (v)
629					lsa_merge(nbrself, lsa, v);
630			}
631			break;
632		case IMSG_KROUTE_GET:
633			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
634				log_warnx("rde_dispatch: wrong imsg len");
635				break;
636			}
637			memcpy(&kr, imsg.data, sizeof(kr));
638
639			if ((rn = rt_find(&kr.prefix, kr.prefixlen,
640			    DT_NET)) != NULL)
641				rde_send_change_kroute(rn);
642			else
643				/* should not happen */
644				imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0,
645				    0, &kr, sizeof(kr));
646			break;
647		case IMSG_IFADD:
648			if ((niface = malloc(sizeof(struct iface))) == NULL)
649				fatal(NULL);
650			memcpy(niface, imsg.data, sizeof(struct iface));
651
652			LIST_INIT(&niface->nbr_list);
653			TAILQ_INIT(&niface->ls_ack_list);
654			RB_INIT(&niface->lsa_tree);
655
656			narea = area_find(rdeconf, niface->area_id);
657			LIST_INSERT_HEAD(&narea->iface_list, niface, entry);
658			break;
659		case IMSG_IFDELETE:
660			if (imsg.hdr.len != IMSG_HEADER_SIZE +
661			    sizeof(ifindex))
662				fatalx("IFINFO imsg with wrong len");
663
664			memcpy(&ifindex, imsg.data, sizeof(ifindex));
665			iface = if_find(ifindex);
666			if (iface == NULL)
667				fatalx("interface lost in ospfe");
668
669			LIST_REMOVE(iface, entry);
670			if_del(iface);
671			break;
672		case IMSG_RECONF_CONF:
673			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
674			    NULL)
675				fatal(NULL);
676			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
677
678			LIST_INIT(&nconf->area_list);
679			LIST_INIT(&nconf->cand_list);
680			break;
681		case IMSG_RECONF_AREA:
682			if ((narea = area_new()) == NULL)
683				fatal(NULL);
684			memcpy(narea, imsg.data, sizeof(struct area));
685
686			LIST_INIT(&narea->iface_list);
687			LIST_INIT(&narea->nbr_list);
688			RB_INIT(&narea->lsa_tree);
689
690			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
691			break;
692		case IMSG_RECONF_END:
693			merge_config(rdeconf, nconf);
694			nconf = NULL;
695			break;
696		default:
697			log_debug("rde_dispatch_parent: unexpected imsg %d",
698			    imsg.hdr.type);
699			break;
700		}
701		imsg_free(&imsg);
702	}
703	if (!shut)
704		imsg_event_add(ibuf);
705	else {
706		/* this pipe is dead, so remove the event handler */
707		event_del(&ibuf->ev);
708		event_loopexit(NULL);
709	}
710}
711
712void
713rde_dump_area(struct area *area, int imsg_type, pid_t pid)
714{
715	struct iface	*iface;
716
717	/* dump header */
718	imsg_compose(ibuf_ospfe, IMSG_CTL_AREA, 0, pid, area, sizeof(*area));
719
720	/* dump link local lsa */
721	LIST_FOREACH(iface, &area->iface_list, entry) {
722		imsg_compose(ibuf_ospfe, IMSG_CTL_IFACE,
723		    0, pid, iface, sizeof(*iface));
724		lsa_dump(&iface->lsa_tree, imsg_type, pid);
725	}
726
727	/* dump area lsa */
728	lsa_dump(&area->lsa_tree, imsg_type, pid);
729}
730
731u_int32_t
732rde_router_id(void)
733{
734	return (rdeconf->rtr_id.s_addr);
735}
736
737void
738rde_send_change_kroute(struct rt_node *r)
739{
740	struct kroute		 kr;
741	struct rt_nexthop	*rn;
742
743	TAILQ_FOREACH(rn, &r->nexthop, entry) {
744		if (!rn->invalid)
745			break;
746	}
747	if (!rn)
748		fatalx("rde_send_change_kroute: no valid nexthop found");
749
750	bzero(&kr, sizeof(kr));
751	kr.prefix = r->prefix;
752	kr.nexthop = rn->nexthop;
753	kr.prefixlen = r->prefixlen;
754	kr.ext_tag = r->ext_tag;
755
756	imsg_compose(ibuf_main, IMSG_KROUTE_CHANGE, 0, 0, &kr, sizeof(kr));
757}
758
759void
760rde_send_delete_kroute(struct rt_node *r)
761{
762	struct kroute	 kr;
763
764	bzero(&kr, sizeof(kr));
765	kr.prefix = r->prefix;
766	kr.prefixlen = r->prefixlen;
767
768	imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0, 0, &kr, sizeof(kr));
769}
770
771void
772rde_send_summary(pid_t pid)
773{
774	static struct ctl_sum	 sumctl;
775	struct timeval		 now;
776	struct area		*area;
777	struct vertex		*v;
778
779	bzero(&sumctl, sizeof(struct ctl_sum));
780
781	sumctl.rtr_id.s_addr = rde_router_id();
782	sumctl.spf_delay = rdeconf->spf_delay;
783	sumctl.spf_hold_time = rdeconf->spf_hold_time;
784
785	LIST_FOREACH(area, &rdeconf->area_list, entry)
786		sumctl.num_area++;
787
788	RB_FOREACH(v, lsa_tree, &asext_tree)
789		sumctl.num_ext_lsa++;
790
791	gettimeofday(&now, NULL);
792	if (rdeconf->uptime < now.tv_sec)
793		sumctl.uptime = now.tv_sec - rdeconf->uptime;
794	else
795		sumctl.uptime = 0;
796
797	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
798	    sizeof(sumctl));
799}
800
801void
802rde_send_summary_area(struct area *area, pid_t pid)
803{
804	static struct ctl_sum_area	 sumareactl;
805	struct iface			*iface;
806	struct rde_nbr			*nbr;
807	struct lsa_tree			*tree = &area->lsa_tree;
808	struct vertex			*v;
809
810	bzero(&sumareactl, sizeof(struct ctl_sum_area));
811
812	sumareactl.area.s_addr = area->id.s_addr;
813	sumareactl.num_spf_calc = area->num_spf_calc;
814
815	LIST_FOREACH(iface, &area->iface_list, entry)
816		sumareactl.num_iface++;
817
818	LIST_FOREACH(nbr, &area->nbr_list, entry)
819		if (nbr->state == NBR_STA_FULL && !nbr->self)
820			sumareactl.num_adj_nbr++;
821
822	RB_FOREACH(v, lsa_tree, tree)
823		sumareactl.num_lsa++;
824
825	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
826	    sizeof(sumareactl));
827}
828
829LIST_HEAD(rde_nbr_head, rde_nbr);
830
831struct nbr_table {
832	struct rde_nbr_head	*hashtbl;
833	u_int32_t		 hashmask;
834} rdenbrtable;
835
836#define RDE_NBR_HASH(x)		\
837	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
838
839void
840rde_nbr_init(u_int32_t hashsize)
841{
842	struct rde_nbr_head	*head;
843	u_int32_t		 hs, i;
844
845	for (hs = 1; hs < hashsize; hs <<= 1)
846		;
847	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
848	if (rdenbrtable.hashtbl == NULL)
849		fatal("rde_nbr_init");
850
851	for (i = 0; i < hs; i++)
852		LIST_INIT(&rdenbrtable.hashtbl[i]);
853
854	rdenbrtable.hashmask = hs - 1;
855
856	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
857		fatal("rde_nbr_init");
858
859	nbrself->id.s_addr = rde_router_id();
860	nbrself->peerid = NBR_IDSELF;
861	nbrself->state = NBR_STA_DOWN;
862	nbrself->self = 1;
863	head = RDE_NBR_HASH(NBR_IDSELF);
864	LIST_INSERT_HEAD(head, nbrself, hash);
865}
866
867void
868rde_nbr_free(void)
869{
870	free(nbrself);
871	free(rdenbrtable.hashtbl);
872}
873
874struct rde_nbr *
875rde_nbr_find(u_int32_t peerid)
876{
877	struct rde_nbr_head	*head;
878	struct rde_nbr		*nbr;
879
880	head = RDE_NBR_HASH(peerid);
881
882	LIST_FOREACH(nbr, head, hash) {
883		if (nbr->peerid == peerid)
884			return (nbr);
885	}
886
887	return (NULL);
888}
889
890struct rde_nbr *
891rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
892{
893	struct rde_nbr_head	*head;
894	struct rde_nbr		*nbr;
895	struct area		*area;
896	struct iface		*iface;
897
898	if (rde_nbr_find(peerid))
899		return (NULL);
900	if ((area = area_find(rdeconf, new->area_id)) == NULL)
901		fatalx("rde_nbr_new: unknown area");
902
903	LIST_FOREACH(iface, &area->iface_list, entry) {
904		if (iface->ifindex == new->ifindex)
905			break;
906	}
907	if (iface == NULL)
908		fatalx("rde_nbr_new: unknown interface");
909
910	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
911		fatal("rde_nbr_new");
912
913	memcpy(nbr, new, sizeof(*nbr));
914	nbr->peerid = peerid;
915	nbr->area = area;
916	nbr->iface = iface;
917
918	TAILQ_INIT(&nbr->req_list);
919
920	head = RDE_NBR_HASH(peerid);
921	LIST_INSERT_HEAD(head, nbr, hash);
922	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
923
924	return (nbr);
925}
926
927void
928rde_nbr_del(struct rde_nbr *nbr)
929{
930	if (nbr == NULL)
931		return;
932
933	rde_req_list_free(nbr);
934
935	LIST_REMOVE(nbr, entry);
936	LIST_REMOVE(nbr, hash);
937
938	free(nbr);
939}
940
941int
942rde_nbr_loading(struct area *area)
943{
944	struct rde_nbr		*nbr;
945	int			 checkall = 0;
946
947	if (area == NULL) {
948		area = LIST_FIRST(&rdeconf->area_list);
949		checkall = 1;
950	}
951
952	while (area != NULL) {
953		LIST_FOREACH(nbr, &area->nbr_list, entry) {
954			if (nbr->self)
955				continue;
956			if (nbr->state & NBR_STA_XCHNG ||
957			    nbr->state & NBR_STA_LOAD)
958				return (1);
959		}
960		if (!checkall)
961			break;
962		area = LIST_NEXT(area, entry);
963	}
964
965	return (0);
966}
967
968struct rde_nbr *
969rde_nbr_self(struct area *area)
970{
971	struct rde_nbr		*nbr;
972
973	LIST_FOREACH(nbr, &area->nbr_list, entry)
974		if (nbr->self)
975			return (nbr);
976
977	/* this may not happen */
978	fatalx("rde_nbr_self: area without self");
979	return (NULL);
980}
981
982/*
983 * LSA req list
984 */
985void
986rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
987{
988	struct rde_req_entry	*le;
989
990	if ((le = calloc(1, sizeof(*le))) == NULL)
991		fatal("rde_req_list_add");
992
993	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
994	le->type = lsa->type;
995	le->ls_id = lsa->ls_id;
996	le->adv_rtr = lsa->adv_rtr;
997}
998
999int
1000rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1001{
1002	struct rde_req_entry	*le;
1003
1004	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1005		if ((lsa_hdr->type == le->type) &&
1006		    (lsa_hdr->ls_id == le->ls_id) &&
1007		    (lsa_hdr->adv_rtr == le->adv_rtr))
1008			return (1);
1009	}
1010	return (0);
1011}
1012
1013void
1014rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1015{
1016	struct rde_req_entry	*le;
1017
1018	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1019		if ((lsa_hdr->type == le->type) &&
1020		    (lsa_hdr->ls_id == le->ls_id) &&
1021		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1022			TAILQ_REMOVE(&nbr->req_list, le, entry);
1023			free(le);
1024			return;
1025		}
1026	}
1027}
1028
1029void
1030rde_req_list_free(struct rde_nbr *nbr)
1031{
1032	struct rde_req_entry	*le;
1033
1034	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1035		TAILQ_REMOVE(&nbr->req_list, le, entry);
1036		free(le);
1037	}
1038}
1039
1040/*
1041 * as-external LSA handling
1042 */
1043struct lsa *
1044rde_asext_get(struct rroute *rr)
1045{
1046#if 0
1047	struct area	*area;
1048	struct iface	*iface;
1049XXX
1050	LIST_FOREACH(area, &rdeconf->area_list, entry)
1051		LIST_FOREACH(iface, &area->iface_list, entry) {
1052			if ((iface->addr.s_addr & iface->mask.s_addr) ==
1053			    rr->kr.prefix.s_addr && iface->mask.s_addr ==
1054			    prefixlen2mask(rr->kr.prefixlen)) {
1055				/* already announced as (stub) net LSA */
1056				log_debug("rde_asext_get: %s/%d is net LSA",
1057				    inet_ntoa(rr->kr.prefix), rr->kr.prefixlen);
1058				return (NULL);
1059			}
1060		}
1061#endif
1062	/* update of seqnum is done by lsa_merge */
1063	return (orig_asext_lsa(rr, DEFAULT_AGE));
1064}
1065
1066struct lsa *
1067rde_asext_put(struct rroute *rr)
1068{
1069	/*
1070	 * just try to remove the LSA. If the prefix is announced as
1071	 * stub net LSA lsa_find() will fail later and nothing will happen.
1072	 */
1073
1074	/* remove by reflooding with MAX_AGE */
1075	return (orig_asext_lsa(rr, MAX_AGE));
1076}
1077
1078/*
1079 * summary LSA stuff
1080 */
1081void
1082rde_summary_update(struct rt_node *rte, struct area *area)
1083{
1084	struct vertex		*v = NULL;
1085//XXX	struct lsa		*lsa;
1086	u_int16_t		 type = 0;
1087
1088	/* first check if we actually need to announce this route */
1089	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1090		return;
1091	/* never create summaries for as-ext LSA */
1092	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1093		return;
1094	/* no need for summary LSA in the originating area */
1095	if (rte->area.s_addr == area->id.s_addr)
1096		return;
1097	/* no need to originate inter-area routes to the backbone */
1098	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1099		return;
1100	/* TODO nexthop check, nexthop part of area -> no summary */
1101	if (rte->cost >= LS_INFINITY)
1102		return;
1103	/* TODO AS border router specific checks */
1104	/* TODO inter-area network route stuff */
1105	/* TODO intra-area stuff -- condense LSA ??? */
1106
1107	if (rte->d_type == DT_NET) {
1108		type = LSA_TYPE_INTER_A_PREFIX;
1109	} else if (rte->d_type == DT_RTR) {
1110		type = LSA_TYPE_INTER_A_ROUTER;
1111	} else
1112
1113#if 0 /* XXX a lot todo */
1114	/* update lsa but only if it was changed */
1115	v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1116	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1117	lsa_merge(rde_nbr_self(area), lsa, v);
1118
1119	if (v == NULL)
1120		v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1121#endif
1122
1123	/* suppressed/deleted routes are not found in the second lsa_find */
1124	if (v)
1125		v->cost = rte->cost;
1126}
1127
1128
1129/*
1130 * functions for self-originated LSA
1131 */
1132struct lsa *
1133orig_asext_lsa(struct rroute *rr, u_int16_t age)
1134{
1135#if 0 /* XXX a lot todo */
1136	struct lsa	*lsa;
1137	u_int16_t	 len;
1138
1139	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext);
1140	if ((lsa = calloc(1, len)) == NULL)
1141		fatal("orig_asext_lsa");
1142
1143	log_debug("orig_asext_lsa: %s/%d age %d",
1144	    log_in6addr(&rr->kr.prefix), rr->kr.prefixlen, age);
1145
1146	/* LSA header */
1147	lsa->hdr.age = htons(age);
1148	lsa->hdr.type = LSA_TYPE_EXTERNAL;
1149	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1150	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1151	lsa->hdr.len = htons(len);
1152
1153	/* prefix and mask */
1154	/*
1155	 * TODO ls_id must be unique, for overlapping routes this may
1156	 * not be true. In this case a hack needs to be done to
1157	 * make the ls_id unique.
1158	 */
1159	lsa->hdr.ls_id = rr->kr.prefix.s_addr;
1160	lsa->data.asext.mask = prefixlen2mask(rr->kr.prefixlen);
1161
1162	/*
1163	 * nexthop -- on connected routes we are the nexthop,
1164	 * on all other cases we announce the true nexthop.
1165	 * XXX this is wrong as the true nexthop may be outside
1166	 * of the ospf cloud and so unreachable. For now we force
1167	 * all traffic to be directed to us.
1168	 */
1169	lsa->data.asext.fw_addr = 0;
1170
1171	lsa->data.asext.metric = htonl(rr->metric);
1172	lsa->data.asext.ext_tag = htonl(rr->kr.ext_tag);
1173
1174	lsa->hdr.ls_chksum = 0;
1175	lsa->hdr.ls_chksum =
1176	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1177
1178	return (lsa);
1179#endif
1180	return NULL;
1181}
1182
1183struct lsa *
1184orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1185{
1186#if 0 /* XXX a lot todo */
1187	struct lsa	*lsa;
1188	u_int16_t	 len;
1189
1190	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1191	if ((lsa = calloc(1, len)) == NULL)
1192		fatal("orig_sum_lsa");
1193
1194	/* LSA header */
1195	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1196	lsa->hdr.type = type;
1197	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1198	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1199	lsa->hdr.len = htons(len);
1200
1201	/* prefix and mask */
1202	/*
1203	 * TODO ls_id must be unique, for overlapping routes this may
1204	 * not be true. In this case a hack needs to be done to
1205	 * make the ls_id unique.
1206	 */
1207	lsa->hdr.ls_id = rte->prefix.s_addr;
1208	if (type == LSA_TYPE_SUM_NETWORK)
1209		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1210	else
1211		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1212
1213	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1214
1215	lsa->hdr.ls_chksum = 0;
1216	lsa->hdr.ls_chksum =
1217	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1218
1219	return (lsa);
1220#endif
1221	return NULL;
1222}
1223