rde.c revision 1.4
1/*	$OpenBSD: rde.c,v 1.4 2007/10/16 08:41:56 claudio Exp $ */
2
3/*
4 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5 * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <sys/types.h>
22#include <sys/socket.h>
23#include <sys/queue.h>
24#include <netinet/in.h>
25#include <arpa/inet.h>
26#include <err.h>
27#include <errno.h>
28#include <stdlib.h>
29#include <signal.h>
30#include <string.h>
31#include <pwd.h>
32#include <unistd.h>
33#include <event.h>
34
35#include "ospf6.h"
36#include "ospf6d.h"
37#include "ospfe.h"
38#include "log.h"
39#include "rde.h"
40
41void		 rde_sig_handler(int sig, short, void *);
42void		 rde_shutdown(void);
43void		 rde_dispatch_imsg(int, short, void *);
44void		 rde_dispatch_parent(int, short, void *);
45
46void		 rde_send_summary(pid_t);
47void		 rde_send_summary_area(struct area *, pid_t);
48void		 rde_nbr_init(u_int32_t);
49void		 rde_nbr_free(void);
50struct rde_nbr	*rde_nbr_find(u_int32_t);
51struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
52void		 rde_nbr_del(struct rde_nbr *);
53
54void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
55int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
56void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
57void		 rde_req_list_free(struct rde_nbr *);
58
59struct lsa	*rde_asext_get(struct rroute *);
60struct lsa	*rde_asext_put(struct rroute *);
61
62struct lsa	*orig_asext_lsa(struct rroute *, u_int16_t);
63struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
64
65struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
66struct imsgbuf		*ibuf_ospfe;
67struct imsgbuf		*ibuf_main;
68struct rde_nbr		*nbrself;
69struct lsa_tree		 asext_tree;
70
71/* ARGSUSED */
72void
73rde_sig_handler(int sig, short event, void *arg)
74{
75	/*
76	 * signal handler rules don't apply, libevent decouples for us
77	 */
78
79	switch (sig) {
80	case SIGINT:
81	case SIGTERM:
82		rde_shutdown();
83		/* NOTREACHED */
84	default:
85		fatalx("unexpected signal");
86	}
87}
88
89/* route decision engine */
90pid_t
91rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
92    int pipe_parent2ospfe[2])
93{
94	struct event		 ev_sigint, ev_sigterm;
95	struct timeval		 now;
96	struct passwd		*pw;
97	struct redistribute	*r;
98	pid_t			 pid;
99
100	switch (pid = fork()) {
101	case -1:
102		fatal("cannot fork");
103		/* NOTREACHED */
104	case 0:
105		break;
106	default:
107		return (pid);
108	}
109
110	rdeconf = xconf;
111
112	if ((pw = getpwnam(OSPF6D_USER)) == NULL)
113		fatal("getpwnam");
114
115	if (chroot(pw->pw_dir) == -1)
116		fatal("chroot");
117	if (chdir("/") == -1)
118		fatal("chdir(\"/\")");
119
120	setproctitle("route decision engine");
121	ospfd_process = PROC_RDE_ENGINE;
122
123	if (setgroups(1, &pw->pw_gid) ||
124	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
125	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
126		fatal("can't drop privileges");
127
128	event_init();
129	rde_nbr_init(NBR_HASHSIZE);
130	lsa_init(&asext_tree);
131
132	/* setup signal handler */
133	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
134	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
135	signal_add(&ev_sigint, NULL);
136	signal_add(&ev_sigterm, NULL);
137	signal(SIGPIPE, SIG_IGN);
138	signal(SIGHUP, SIG_IGN);
139
140	/* setup pipes */
141	close(pipe_ospfe2rde[0]);
142	close(pipe_parent2rde[0]);
143	close(pipe_parent2ospfe[0]);
144	close(pipe_parent2ospfe[1]);
145
146	if ((ibuf_ospfe = malloc(sizeof(struct imsgbuf))) == NULL ||
147	    (ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
148		fatal(NULL);
149	imsg_init(ibuf_ospfe, pipe_ospfe2rde[1], rde_dispatch_imsg);
150	imsg_init(ibuf_main, pipe_parent2rde[1], rde_dispatch_parent);
151
152	/* setup event handler */
153	ibuf_ospfe->events = EV_READ;
154	event_set(&ibuf_ospfe->ev, ibuf_ospfe->fd, ibuf_ospfe->events,
155	    ibuf_ospfe->handler, ibuf_ospfe);
156	event_add(&ibuf_ospfe->ev, NULL);
157
158	ibuf_main->events = EV_READ;
159	event_set(&ibuf_main->ev, ibuf_main->fd, ibuf_main->events,
160	    ibuf_main->handler, ibuf_main);
161	event_add(&ibuf_main->ev, NULL);
162
163	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
164	cand_list_init();
165	rt_init();
166
167	while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
168		SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
169		free(r);
170	}
171
172	gettimeofday(&now, NULL);
173	rdeconf->uptime = now.tv_sec;
174
175	event_dispatch();
176
177	rde_shutdown();
178	/* NOTREACHED */
179
180	return (0);
181}
182
183void
184rde_shutdown(void)
185{
186	struct area	*a;
187
188	stop_spf_timer(rdeconf);
189	cand_list_clr();
190	rt_clear();
191
192	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
193		LIST_REMOVE(a, entry);
194		area_del(a);
195	}
196	rde_nbr_free();
197
198	msgbuf_clear(&ibuf_ospfe->w);
199	free(ibuf_ospfe);
200	msgbuf_clear(&ibuf_main->w);
201	free(ibuf_main);
202	free(rdeconf);
203
204	log_info("route decision engine exiting");
205	_exit(0);
206}
207
208int
209rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
210    u_int16_t datalen)
211{
212	return (imsg_compose(ibuf_ospfe, type, peerid, pid, data, datalen));
213}
214
215/* ARGSUSED */
216void
217rde_dispatch_imsg(int fd, short event, void *bula)
218{
219	struct imsgbuf		*ibuf = bula;
220	struct imsg		 imsg;
221	struct in_addr		 aid;
222	struct ls_req_hdr	 req_hdr;
223	struct lsa_hdr		 lsa_hdr, *db_hdr;
224	struct rde_nbr		 rn, *nbr;
225	struct timespec		 tp;
226	struct lsa		*lsa;
227	struct area		*area;
228	struct vertex		*v;
229	char			*buf;
230	ssize_t			 n;
231	time_t			 now;
232	int			 r, state, self, shut = 0;
233	u_int16_t		 l;
234
235	switch (event) {
236	case EV_READ:
237		if ((n = imsg_read(ibuf)) == -1)
238			fatal("imsg_read error");
239		if (n == 0)	/* connection closed */
240			shut = 1;
241		break;
242	case EV_WRITE:
243		if (msgbuf_write(&ibuf->w) == -1)
244			fatal("msgbuf_write");
245		imsg_event_add(ibuf);
246		return;
247	default:
248		fatalx("unknown event");
249	}
250
251	clock_gettime(CLOCK_MONOTONIC, &tp);
252	now = tp.tv_sec;
253
254	for (;;) {
255		if ((n = imsg_get(ibuf, &imsg)) == -1)
256			fatal("rde_dispatch_imsg: imsg_read error");
257		if (n == 0)
258			break;
259
260		switch (imsg.hdr.type) {
261		case IMSG_NEIGHBOR_UP:
262			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
263				fatalx("invalid size of OE request");
264			memcpy(&rn, imsg.data, sizeof(rn));
265
266			if (rde_nbr_find(imsg.hdr.peerid))
267				fatalx("rde_dispatch_imsg: "
268				    "neighbor already exists");
269			rde_nbr_new(imsg.hdr.peerid, &rn);
270			break;
271		case IMSG_NEIGHBOR_DOWN:
272			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
273			break;
274		case IMSG_NEIGHBOR_CHANGE:
275			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
276				fatalx("invalid size of OE request");
277			memcpy(&state, imsg.data, sizeof(state));
278
279			nbr = rde_nbr_find(imsg.hdr.peerid);
280			if (nbr == NULL)
281				break;
282
283			if (state != nbr->state && (nbr->state & NBR_STA_FULL ||
284			    state & NBR_STA_FULL))
285				area_track(nbr->area, state);
286
287			nbr->state = state;
288			if (nbr->state & NBR_STA_FULL)
289				rde_req_list_free(nbr);
290			break;
291		case IMSG_DB_SNAPSHOT:
292			nbr = rde_nbr_find(imsg.hdr.peerid);
293			if (nbr == NULL)
294				break;
295
296//XXX needs work
297//XXX			lsa_snap(nbr->area, imsg.hdr.peerid);
298
299			imsg_compose(ibuf_ospfe, IMSG_DB_END, imsg.hdr.peerid,
300			    0, NULL, 0);
301			break;
302		case IMSG_DD:
303			nbr = rde_nbr_find(imsg.hdr.peerid);
304			if (nbr == NULL)
305				break;
306
307			buf = imsg.data;
308#if 0	/* XXX does not work yet */
309			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
310			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
311				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
312				buf += sizeof(lsa_hdr);
313
314				v = lsa_find(nbr->area, lsa_hdr.type,
315				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
316				if (v == NULL)
317					db_hdr = NULL;
318				else
319					db_hdr = &v->lsa->hdr;
320
321				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
322					/*
323					 * only request LSAs that are
324					 * newer or missing
325					 */
326					rde_req_list_add(nbr, &lsa_hdr);
327					imsg_compose(ibuf_ospfe, IMSG_DD,
328					    imsg.hdr.peerid, 0, &lsa_hdr,
329					    sizeof(lsa_hdr));
330				}
331			}
332			if (l != 0)
333				log_warnx("rde_dispatch_imsg: peerid %lu, "
334				    "trailing garbage in Database Description "
335				    "packet", imsg.hdr.peerid);
336#endif
337
338			imsg_compose(ibuf_ospfe, IMSG_DD_END, imsg.hdr.peerid,
339			    0, NULL, 0);
340			break;
341		case IMSG_LS_REQ:
342			nbr = rde_nbr_find(imsg.hdr.peerid);
343			if (nbr == NULL)
344				break;
345
346			buf = imsg.data;
347			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
348			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
349				memcpy(&req_hdr, buf, sizeof(req_hdr));
350				buf += sizeof(req_hdr);
351
352				if ((v = lsa_find(nbr->area,
353				    ntohl(req_hdr.type), req_hdr.ls_id,
354				    req_hdr.adv_rtr)) == NULL) {
355					imsg_compose(ibuf_ospfe, IMSG_LS_BADREQ,
356					    imsg.hdr.peerid, 0, NULL, 0);
357					continue;
358				}
359				imsg_compose(ibuf_ospfe, IMSG_LS_UPD,
360				    imsg.hdr.peerid, 0, v->lsa,
361				    ntohs(v->lsa->hdr.len));
362			}
363			if (l != 0)
364				log_warnx("rde_dispatch_imsg: peerid %lu, "
365				    "trailing garbage in LS Request "
366				    "packet", imsg.hdr.peerid);
367			break;
368		case IMSG_LS_UPD:
369			nbr = rde_nbr_find(imsg.hdr.peerid);
370			if (nbr == NULL)
371				break;
372
373			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
374			if (lsa == NULL)
375				fatal(NULL);
376			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
377
378			if (!lsa_check(nbr, lsa,
379			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
380				free(lsa);
381				break;
382			}
383
384			v = lsa_find(nbr->area, lsa->hdr.type, lsa->hdr.ls_id,
385				    lsa->hdr.adv_rtr);
386			if (v == NULL)
387				db_hdr = NULL;
388			else
389				db_hdr = &v->lsa->hdr;
390
391			if (nbr->self) {
392				lsa_merge(nbr, lsa, v);
393				/* lsa_merge frees the right lsa */
394				break;
395			}
396
397			r = lsa_newer(&lsa->hdr, db_hdr);
398			if (r > 0) {
399				/* new LSA newer than DB */
400				if (v && v->flooded &&
401				    v->changed + MIN_LS_ARRIVAL >= now) {
402					free(lsa);
403					break;
404				}
405
406				rde_req_list_del(nbr, &lsa->hdr);
407
408				if (!(self = lsa_self(nbr, lsa, v)))
409					if (lsa_add(nbr, lsa))
410						/* delayed lsa */
411						break;
412
413				/* flood and perhaps ack LSA */
414				imsg_compose(ibuf_ospfe, IMSG_LS_FLOOD,
415				    imsg.hdr.peerid, 0, lsa,
416				    ntohs(lsa->hdr.len));
417
418				/* reflood self originated LSA */
419				if (self && v)
420					imsg_compose(ibuf_ospfe, IMSG_LS_FLOOD,
421					    v->peerid, 0, v->lsa,
422					    ntohs(v->lsa->hdr.len));
423				/* lsa not added so free it */
424				if (self)
425					free(lsa);
426			} else if (r < 0) {
427				/* lsa no longer needed */
428				free(lsa);
429
430				/*
431				 * point 6 of "The Flooding Procedure"
432				 * We are violating the RFC here because
433				 * it does not make sense to reset a session
434				 * because an equal LSA is already in the table.
435				 * Only if the LSA sent is older than the one
436				 * in the table we should reset the session.
437				 */
438				if (rde_req_list_exists(nbr, &lsa->hdr)) {
439					imsg_compose(ibuf_ospfe, IMSG_LS_BADREQ,
440					    imsg.hdr.peerid, 0, NULL, 0);
441					break;
442				}
443
444				/* new LSA older than DB */
445				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
446				    ntohs(db_hdr->age) == MAX_AGE)
447					/* seq-num wrap */
448					break;
449
450				if (v->changed + MIN_LS_ARRIVAL >= now)
451					break;
452
453				/* directly send current LSA, no ack */
454				imsg_compose(ibuf_ospfe, IMSG_LS_UPD,
455				    imsg.hdr.peerid, 0, v->lsa,
456				    ntohs(v->lsa->hdr.len));
457			} else {
458				/* LSA equal send direct ack */
459				imsg_compose(ibuf_ospfe, IMSG_LS_ACK,
460				    imsg.hdr.peerid, 0, &lsa->hdr,
461				    sizeof(lsa->hdr));
462				free(lsa);
463			}
464			break;
465		case IMSG_LS_MAXAGE:
466			nbr = rde_nbr_find(imsg.hdr.peerid);
467			if (nbr == NULL)
468				break;
469
470			if (imsg.hdr.len != IMSG_HEADER_SIZE +
471			    sizeof(struct lsa_hdr))
472				fatalx("invalid size of OE request");
473			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
474
475			if (rde_nbr_loading(nbr->area))
476				break;
477
478			v = lsa_find(nbr->area, lsa_hdr.type, lsa_hdr.ls_id,
479				    lsa_hdr.adv_rtr);
480			if (v == NULL)
481				db_hdr = NULL;
482			else
483				db_hdr = &v->lsa->hdr;
484
485			/*
486			 * only delete LSA if the one in the db is not newer
487			 */
488			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
489				lsa_del(nbr, &lsa_hdr);
490			break;
491		case IMSG_CTL_SHOW_DATABASE:
492		case IMSG_CTL_SHOW_DB_EXT:
493		case IMSG_CTL_SHOW_DB_NET:
494		case IMSG_CTL_SHOW_DB_RTR:
495		case IMSG_CTL_SHOW_DB_SELF:
496		case IMSG_CTL_SHOW_DB_SUM:
497		case IMSG_CTL_SHOW_DB_ASBR:
498			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
499			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
500				log_warnx("rde_dispatch: wrong imsg len");
501				break;
502			}
503			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
504				LIST_FOREACH(area, &rdeconf->area_list, entry) {
505					imsg_compose(ibuf_ospfe, IMSG_CTL_AREA,
506					    0, imsg.hdr.pid, area,
507					    sizeof(*area));
508					lsa_dump(&area->lsa_tree, imsg.hdr.type,
509					    imsg.hdr.pid);
510				}
511				lsa_dump(&asext_tree, imsg.hdr.type,
512				    imsg.hdr.pid);
513			} else {
514				memcpy(&aid, imsg.data, sizeof(aid));
515				if ((area = area_find(rdeconf, aid)) != NULL) {
516					imsg_compose(ibuf_ospfe, IMSG_CTL_AREA,
517					    0, imsg.hdr.pid, area,
518					    sizeof(*area));
519					lsa_dump(&area->lsa_tree, imsg.hdr.type,
520					    imsg.hdr.pid);
521					if (!area->stub)
522						lsa_dump(&asext_tree,
523						    imsg.hdr.type,
524						    imsg.hdr.pid);
525				}
526			}
527			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
528			    NULL, 0);
529			break;
530		case IMSG_CTL_SHOW_RIB:
531			LIST_FOREACH(area, &rdeconf->area_list, entry) {
532				imsg_compose(ibuf_ospfe, IMSG_CTL_AREA,
533				    0, imsg.hdr.pid, area, sizeof(*area));
534
535				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
536				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
537			}
538			aid.s_addr = 0;
539			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
540
541			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
542			    NULL, 0);
543			break;
544		case IMSG_CTL_SHOW_SUM:
545			rde_send_summary(imsg.hdr.pid);
546			LIST_FOREACH(area, &rdeconf->area_list, entry)
547				rde_send_summary_area(area, imsg.hdr.pid);
548			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
549			    NULL, 0);
550			break;
551		default:
552			log_debug("rde_dispatch_msg: unexpected imsg %d",
553			    imsg.hdr.type);
554			break;
555		}
556		imsg_free(&imsg);
557	}
558	if (!shut)
559		imsg_event_add(ibuf);
560	else {
561		/* this pipe is dead, so remove the event handler */
562		event_del(&ibuf->ev);
563		event_loopexit(NULL);
564	}
565}
566
567/* ARGSUSED */
568void
569rde_dispatch_parent(int fd, short event, void *bula)
570{
571	static struct area	*narea;
572	struct iface		*niface;
573	struct imsg		 imsg;
574	struct kroute		 kr;
575	struct rroute		 rr;
576	struct imsgbuf		*ibuf = bula;
577	struct lsa		*lsa;
578	struct vertex		*v;
579	struct rt_node		*rn;
580	ssize_t			 n;
581	int			 shut = 0;
582
583	switch (event) {
584	case EV_READ:
585		if ((n = imsg_read(ibuf)) == -1)
586			fatal("imsg_read error");
587		if (n == 0)	/* connection closed */
588			shut = 1;
589		break;
590	case EV_WRITE:
591		if (msgbuf_write(&ibuf->w) == -1)
592			fatal("msgbuf_write");
593		imsg_event_add(ibuf);
594		return;
595	default:
596		fatalx("unknown event");
597	}
598
599	for (;;) {
600		if ((n = imsg_get(ibuf, &imsg)) == -1)
601			fatal("rde_dispatch_parent: imsg_read error");
602		if (n == 0)
603			break;
604
605		switch (imsg.hdr.type) {
606		case IMSG_NETWORK_ADD:
607			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
608				log_warnx("rde_dispatch: wrong imsg len");
609				break;
610			}
611			memcpy(&rr, imsg.data, sizeof(rr));
612
613			if ((lsa = rde_asext_get(&rr)) != NULL) {
614				v = lsa_find(NULL, lsa->hdr.type,
615				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
616
617				lsa_merge(nbrself, lsa, v);
618			}
619			break;
620		case IMSG_NETWORK_DEL:
621			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
622				log_warnx("rde_dispatch: wrong imsg len");
623				break;
624			}
625			memcpy(&rr, imsg.data, sizeof(rr));
626
627			if ((lsa = rde_asext_put(&rr)) != NULL) {
628				v = lsa_find(NULL, lsa->hdr.type,
629				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
630
631				/*
632				 * if v == NULL no LSA is in the table and
633				 * nothing has to be done.
634				 */
635				if (v)
636					lsa_merge(nbrself, lsa, v);
637			}
638			break;
639		case IMSG_KROUTE_GET:
640			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
641				log_warnx("rde_dispatch: wrong imsg len");
642				break;
643			}
644			memcpy(&kr, imsg.data, sizeof(kr));
645
646			if ((rn = rt_find(&kr.prefix, kr.prefixlen,
647			    DT_NET)) != NULL)
648				rde_send_change_kroute(rn);
649			else
650				/* should not happen */
651				imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0,
652				    0, &kr, sizeof(kr));
653			break;
654		case IMSG_RECONF_CONF:
655			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
656			    NULL)
657				fatal(NULL);
658			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
659
660			LIST_INIT(&nconf->area_list);
661			LIST_INIT(&nconf->cand_list);
662			break;
663		case IMSG_RECONF_AREA:
664			if ((narea = area_new()) == NULL)
665				fatal(NULL);
666			memcpy(narea, imsg.data, sizeof(struct area));
667
668			LIST_INIT(&narea->iface_list);
669			LIST_INIT(&narea->nbr_list);
670			RB_INIT(&narea->lsa_tree);
671
672			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
673			break;
674		case IMSG_RECONF_IFACE:
675			if ((niface = malloc(sizeof(struct iface))) == NULL)
676				fatal(NULL);
677			memcpy(niface, imsg.data, sizeof(struct iface));
678
679			LIST_INIT(&niface->nbr_list);
680			TAILQ_INIT(&niface->ls_ack_list);
681
682			niface->area = narea;
683			LIST_INSERT_HEAD(&narea->iface_list, niface, entry);
684
685			break;
686		case IMSG_RECONF_END:
687			merge_config(rdeconf, nconf);
688			nconf = NULL;
689			break;
690		default:
691			log_debug("rde_dispatch_parent: unexpected imsg %d",
692			    imsg.hdr.type);
693			break;
694		}
695		imsg_free(&imsg);
696	}
697	if (!shut)
698		imsg_event_add(ibuf);
699	else {
700		/* this pipe is dead, so remove the event handler */
701		event_del(&ibuf->ev);
702		event_loopexit(NULL);
703	}
704}
705
706u_int32_t
707rde_router_id(void)
708{
709	return (rdeconf->rtr_id.s_addr);
710}
711
712void
713rde_send_change_kroute(struct rt_node *r)
714{
715	struct kroute		 kr;
716	struct rt_nexthop	*rn;
717
718	TAILQ_FOREACH(rn, &r->nexthop, entry) {
719		if (!rn->invalid)
720			break;
721	}
722	if (!rn)
723		fatalx("rde_send_change_kroute: no valid nexthop found");
724
725	bzero(&kr, sizeof(kr));
726	kr.prefix = r->prefix;
727	kr.nexthop = rn->nexthop;
728	kr.prefixlen = r->prefixlen;
729	kr.ext_tag = r->ext_tag;
730
731	imsg_compose(ibuf_main, IMSG_KROUTE_CHANGE, 0, 0, &kr, sizeof(kr));
732}
733
734void
735rde_send_delete_kroute(struct rt_node *r)
736{
737	struct kroute	 kr;
738
739	bzero(&kr, sizeof(kr));
740	kr.prefix = r->prefix;
741	kr.prefixlen = r->prefixlen;
742
743	imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0, 0, &kr, sizeof(kr));
744}
745
746void
747rde_send_summary(pid_t pid)
748{
749	static struct ctl_sum	 sumctl;
750	struct timeval		 now;
751	struct area		*area;
752	struct vertex		*v;
753
754	bzero(&sumctl, sizeof(struct ctl_sum));
755
756	sumctl.rtr_id.s_addr = rde_router_id();
757	sumctl.spf_delay = rdeconf->spf_delay;
758	sumctl.spf_hold_time = rdeconf->spf_hold_time;
759
760	LIST_FOREACH(area, &rdeconf->area_list, entry)
761		sumctl.num_area++;
762
763	RB_FOREACH(v, lsa_tree, &asext_tree)
764		sumctl.num_ext_lsa++;
765
766	gettimeofday(&now, NULL);
767	if (rdeconf->uptime < now.tv_sec)
768		sumctl.uptime = now.tv_sec - rdeconf->uptime;
769	else
770		sumctl.uptime = 0;
771
772	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
773	    sizeof(sumctl));
774}
775
776void
777rde_send_summary_area(struct area *area, pid_t pid)
778{
779	static struct ctl_sum_area	 sumareactl;
780	struct iface			*iface;
781	struct rde_nbr			*nbr;
782	struct lsa_tree			*tree = &area->lsa_tree;
783	struct vertex			*v;
784
785	bzero(&sumareactl, sizeof(struct ctl_sum_area));
786
787	sumareactl.area.s_addr = area->id.s_addr;
788	sumareactl.num_spf_calc = area->num_spf_calc;
789
790	LIST_FOREACH(iface, &area->iface_list, entry)
791		sumareactl.num_iface++;
792
793	LIST_FOREACH(nbr, &area->nbr_list, entry)
794		if (nbr->state == NBR_STA_FULL && !nbr->self)
795			sumareactl.num_adj_nbr++;
796
797	RB_FOREACH(v, lsa_tree, tree)
798		sumareactl.num_lsa++;
799
800	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
801	    sizeof(sumareactl));
802}
803
804LIST_HEAD(rde_nbr_head, rde_nbr);
805
806struct nbr_table {
807	struct rde_nbr_head	*hashtbl;
808	u_int32_t		 hashmask;
809} rdenbrtable;
810
811#define RDE_NBR_HASH(x)		\
812	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
813
814void
815rde_nbr_init(u_int32_t hashsize)
816{
817	struct rde_nbr_head	*head;
818	u_int32_t		 hs, i;
819
820	for (hs = 1; hs < hashsize; hs <<= 1)
821		;
822	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
823	if (rdenbrtable.hashtbl == NULL)
824		fatal("rde_nbr_init");
825
826	for (i = 0; i < hs; i++)
827		LIST_INIT(&rdenbrtable.hashtbl[i]);
828
829	rdenbrtable.hashmask = hs - 1;
830
831	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
832		fatal("rde_nbr_init");
833
834	nbrself->id.s_addr = rde_router_id();
835	nbrself->peerid = NBR_IDSELF;
836	nbrself->state = NBR_STA_DOWN;
837	nbrself->self = 1;
838	head = RDE_NBR_HASH(NBR_IDSELF);
839	LIST_INSERT_HEAD(head, nbrself, hash);
840}
841
842void
843rde_nbr_free(void)
844{
845	free(nbrself);
846	free(rdenbrtable.hashtbl);
847}
848
849struct rde_nbr *
850rde_nbr_find(u_int32_t peerid)
851{
852	struct rde_nbr_head	*head;
853	struct rde_nbr		*nbr;
854
855	head = RDE_NBR_HASH(peerid);
856
857	LIST_FOREACH(nbr, head, hash) {
858		if (nbr->peerid == peerid)
859			return (nbr);
860	}
861
862	return (NULL);
863}
864
865struct rde_nbr *
866rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
867{
868	struct rde_nbr_head	*head;
869	struct rde_nbr		*nbr;
870	struct area		*area;
871
872	if (rde_nbr_find(peerid))
873		return (NULL);
874	if ((area = area_find(rdeconf, new->area_id)) == NULL)
875		fatalx("rde_nbr_new: unknown area");
876
877	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
878		fatal("rde_nbr_new");
879
880	memcpy(nbr, new, sizeof(*nbr));
881	nbr->peerid = peerid;
882	nbr->area = area;
883
884	TAILQ_INIT(&nbr->req_list);
885
886	head = RDE_NBR_HASH(peerid);
887	LIST_INSERT_HEAD(head, nbr, hash);
888	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
889
890	return (nbr);
891}
892
893void
894rde_nbr_del(struct rde_nbr *nbr)
895{
896	if (nbr == NULL)
897		return;
898
899	rde_req_list_free(nbr);
900
901	LIST_REMOVE(nbr, entry);
902	LIST_REMOVE(nbr, hash);
903
904	free(nbr);
905}
906
907int
908rde_nbr_loading(struct area *area)
909{
910	struct rde_nbr		*nbr;
911	int			 checkall = 0;
912
913	if (area == NULL) {
914		area = LIST_FIRST(&rdeconf->area_list);
915		checkall = 1;
916	}
917
918	while (area != NULL) {
919		LIST_FOREACH(nbr, &area->nbr_list, entry) {
920			if (nbr->self)
921				continue;
922			if (nbr->state & NBR_STA_XCHNG ||
923			    nbr->state & NBR_STA_LOAD)
924				return (1);
925		}
926		if (!checkall)
927			break;
928		area = LIST_NEXT(area, entry);
929	}
930
931	return (0);
932}
933
934struct rde_nbr *
935rde_nbr_self(struct area *area)
936{
937	struct rde_nbr		*nbr;
938
939	LIST_FOREACH(nbr, &area->nbr_list, entry)
940		if (nbr->self)
941			return (nbr);
942
943	/* this may not happen */
944	fatalx("rde_nbr_self: area without self");
945	return (NULL);
946}
947
948/*
949 * LSA req list
950 */
951void
952rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
953{
954	struct rde_req_entry	*le;
955
956	if ((le = calloc(1, sizeof(*le))) == NULL)
957		fatal("rde_req_list_add");
958
959	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
960	le->type = lsa->type;
961	le->ls_id = lsa->ls_id;
962	le->adv_rtr = lsa->adv_rtr;
963}
964
965int
966rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
967{
968	struct rde_req_entry	*le;
969
970	TAILQ_FOREACH(le, &nbr->req_list, entry) {
971		if ((lsa_hdr->type == le->type) &&
972		    (lsa_hdr->ls_id == le->ls_id) &&
973		    (lsa_hdr->adv_rtr == le->adv_rtr))
974			return (1);
975	}
976	return (0);
977}
978
979void
980rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
981{
982	struct rde_req_entry	*le;
983
984	TAILQ_FOREACH(le, &nbr->req_list, entry) {
985		if ((lsa_hdr->type == le->type) &&
986		    (lsa_hdr->ls_id == le->ls_id) &&
987		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
988			TAILQ_REMOVE(&nbr->req_list, le, entry);
989			free(le);
990			return;
991		}
992	}
993}
994
995void
996rde_req_list_free(struct rde_nbr *nbr)
997{
998	struct rde_req_entry	*le;
999
1000	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1001		TAILQ_REMOVE(&nbr->req_list, le, entry);
1002		free(le);
1003	}
1004}
1005
1006/*
1007 * as-external LSA handling
1008 */
1009struct lsa *
1010rde_asext_get(struct rroute *rr)
1011{
1012#if 0
1013	struct area	*area;
1014	struct iface	*iface;
1015XXX
1016	LIST_FOREACH(area, &rdeconf->area_list, entry)
1017		LIST_FOREACH(iface, &area->iface_list, entry) {
1018			if ((iface->addr.s_addr & iface->mask.s_addr) ==
1019			    rr->kr.prefix.s_addr && iface->mask.s_addr ==
1020			    prefixlen2mask(rr->kr.prefixlen)) {
1021				/* already announced as (stub) net LSA */
1022				log_debug("rde_asext_get: %s/%d is net LSA",
1023				    inet_ntoa(rr->kr.prefix), rr->kr.prefixlen);
1024				return (NULL);
1025			}
1026		}
1027#endif
1028	/* update of seqnum is done by lsa_merge */
1029	return (orig_asext_lsa(rr, DEFAULT_AGE));
1030}
1031
1032struct lsa *
1033rde_asext_put(struct rroute *rr)
1034{
1035	/*
1036	 * just try to remove the LSA. If the prefix is announced as
1037	 * stub net LSA lsa_find() will fail later and nothing will happen.
1038	 */
1039
1040	/* remove by reflooding with MAX_AGE */
1041	return (orig_asext_lsa(rr, MAX_AGE));
1042}
1043
1044/*
1045 * summary LSA stuff
1046 */
1047void
1048rde_summary_update(struct rt_node *rte, struct area *area)
1049{
1050	struct vertex		*v = NULL;
1051//XXX	struct lsa		*lsa;
1052	u_int8_t		 type = 0;
1053
1054	/* first check if we actually need to announce this route */
1055	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1056		return;
1057	/* never create summaries for as-ext LSA */
1058	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1059		return;
1060	/* no need for summary LSA in the originating area */
1061	if (rte->area.s_addr == area->id.s_addr)
1062		return;
1063	/* no need to originate inter-area routes to the backbone */
1064	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1065		return;
1066	/* TODO nexthop check, nexthop part of area -> no summary */
1067	if (rte->cost >= LS_INFINITY)
1068		return;
1069	/* TODO AS border router specific checks */
1070	/* TODO inter-area network route stuff */
1071	/* TODO intra-area stuff -- condense LSA ??? */
1072
1073	if (rte->d_type == DT_NET) {
1074		type = LSA_TYPE_SUM_NETWORK;
1075	} else if (rte->d_type == DT_RTR) {
1076		type = LSA_TYPE_SUM_ROUTER;
1077	} else
1078		fatalx("rde_summary_update: unknown route type");
1079
1080#if 0 /* XXX a lot todo */
1081	/* update lsa but only if it was changed */
1082	v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1083	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1084	lsa_merge(rde_nbr_self(area), lsa, v);
1085
1086	if (v == NULL)
1087		v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1088#endif
1089
1090	/* suppressed/deleted routes are not found in the second lsa_find */
1091	if (v)
1092		v->cost = rte->cost;
1093}
1094
1095
1096/*
1097 * functions for self-originated LSA
1098 */
1099struct lsa *
1100orig_asext_lsa(struct rroute *rr, u_int16_t age)
1101{
1102#if 0 /* XXX a lot todo */
1103	struct lsa	*lsa;
1104	u_int16_t	 len;
1105
1106	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext);
1107	if ((lsa = calloc(1, len)) == NULL)
1108		fatal("orig_asext_lsa");
1109
1110	log_debug("orig_asext_lsa: %s/%d age %d",
1111	    log_in6addr(&rr->kr.prefix), rr->kr.prefixlen, age);
1112
1113	/* LSA header */
1114	lsa->hdr.age = htons(age);
1115	lsa->hdr.type = LSA_TYPE_EXTERNAL;
1116	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1117	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1118	lsa->hdr.len = htons(len);
1119
1120	/* prefix and mask */
1121	/*
1122	 * TODO ls_id must be unique, for overlapping routes this may
1123	 * not be true. In this case a hack needs to be done to
1124	 * make the ls_id unique.
1125	 */
1126	lsa->hdr.ls_id = rr->kr.prefix.s_addr;
1127	lsa->data.asext.mask = prefixlen2mask(rr->kr.prefixlen);
1128
1129	/*
1130	 * nexthop -- on connected routes we are the nexthop,
1131	 * on all other cases we announce the true nexthop.
1132	 * XXX this is wrong as the true nexthop may be outside
1133	 * of the ospf cloud and so unreachable. For now we force
1134	 * all traffic to be directed to us.
1135	 */
1136	lsa->data.asext.fw_addr = 0;
1137
1138	lsa->data.asext.metric = htonl(rr->metric);
1139	lsa->data.asext.ext_tag = htonl(rr->kr.ext_tag);
1140
1141	lsa->hdr.ls_chksum = 0;
1142	lsa->hdr.ls_chksum =
1143	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1144
1145	return (lsa);
1146#endif
1147	return NULL;
1148}
1149
1150struct lsa *
1151orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1152{
1153#if 0 /* XXX a lot todo */
1154	struct lsa	*lsa;
1155	u_int16_t	 len;
1156
1157	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1158	if ((lsa = calloc(1, len)) == NULL)
1159		fatal("orig_sum_lsa");
1160
1161	/* LSA header */
1162	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1163	lsa->hdr.type = type;
1164	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1165	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1166	lsa->hdr.len = htons(len);
1167
1168	/* prefix and mask */
1169	/*
1170	 * TODO ls_id must be unique, for overlapping routes this may
1171	 * not be true. In this case a hack needs to be done to
1172	 * make the ls_id unique.
1173	 */
1174	lsa->hdr.ls_id = rte->prefix.s_addr;
1175	if (type == LSA_TYPE_SUM_NETWORK)
1176		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1177	else
1178		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1179
1180	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1181
1182	lsa->hdr.ls_chksum = 0;
1183	lsa->hdr.ls_chksum =
1184	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1185
1186	return (lsa);
1187#endif
1188	return NULL;
1189}
1190