rde.c revision 1.1
1/*	$OpenBSD: rde.c,v 1.1 2007/10/08 10:44:51 norby Exp $ */
2
3/*
4 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5 * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <sys/types.h>
22#include <sys/socket.h>
23#include <sys/queue.h>
24#include <netinet/in.h>
25#include <arpa/inet.h>
26#include <err.h>
27#include <errno.h>
28#include <stdlib.h>
29#include <signal.h>
30#include <string.h>
31#include <pwd.h>
32#include <unistd.h>
33#include <event.h>
34
35#include "ospf6.h"
36#include "ospf6d.h"
37#include "ospfe.h"
38#include "log.h"
39#include "rde.h"
40
41void		 rde_sig_handler(int sig, short, void *);
42void		 rde_shutdown(void);
43void		 rde_dispatch_imsg(int, short, void *);
44void		 rde_dispatch_parent(int, short, void *);
45
46void		 rde_send_summary(pid_t);
47void		 rde_send_summary_area(struct area *, pid_t);
48void		 rde_nbr_init(u_int32_t);
49void		 rde_nbr_free(void);
50struct rde_nbr	*rde_nbr_find(u_int32_t);
51struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
52void		 rde_nbr_del(struct rde_nbr *);
53
54void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
55int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
56void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
57void		 rde_req_list_free(struct rde_nbr *);
58
59struct lsa	*rde_asext_get(struct rroute *);
60struct lsa	*rde_asext_put(struct rroute *);
61
62struct lsa	*orig_asext_lsa(struct rroute *, u_int16_t);
63struct lsa	*orig_sum_lsa(struct rt_node *, u_int8_t, int);
64
65struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
66struct imsgbuf		*ibuf_ospfe;
67struct imsgbuf		*ibuf_main;
68struct rde_nbr		*nbrself;
69struct lsa_tree		 asext_tree;
70
71/* ARGSUSED */
72void
73rde_sig_handler(int sig, short event, void *arg)
74{
75	/*
76	 * signal handler rules don't apply, libevent decouples for us
77	 */
78
79	switch (sig) {
80	case SIGINT:
81	case SIGTERM:
82		rde_shutdown();
83		/* NOTREACHED */
84	default:
85		fatalx("unexpected signal");
86	}
87}
88
89/* route decision engine */
90pid_t
91rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
92    int pipe_parent2ospfe[2])
93{
94	struct event		 ev_sigint, ev_sigterm;
95	struct timeval		 now;
96	struct passwd		*pw;
97	struct redistribute	*r;
98	pid_t			 pid;
99
100	switch (pid = fork()) {
101	case -1:
102		fatal("cannot fork");
103		/* NOTREACHED */
104	case 0:
105		break;
106	default:
107		return (pid);
108	}
109
110	rdeconf = xconf;
111
112	if ((pw = getpwnam(OSPF6D_USER)) == NULL)
113		fatal("getpwnam");
114
115	if (chroot(pw->pw_dir) == -1)
116		fatal("chroot");
117	if (chdir("/") == -1)
118		fatal("chdir(\"/\")");
119
120	setproctitle("route decision engine");
121	ospfd_process = PROC_RDE_ENGINE;
122
123	if (setgroups(1, &pw->pw_gid) ||
124	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
125	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
126		fatal("can't drop privileges");
127
128	event_init();
129	rde_nbr_init(NBR_HASHSIZE);
130	lsa_init(&asext_tree);
131
132	/* setup signal handler */
133	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
134	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
135	signal_add(&ev_sigint, NULL);
136	signal_add(&ev_sigterm, NULL);
137	signal(SIGPIPE, SIG_IGN);
138	signal(SIGHUP, SIG_IGN);
139
140	/* setup pipes */
141	close(pipe_ospfe2rde[0]);
142	close(pipe_parent2rde[0]);
143	close(pipe_parent2ospfe[0]);
144	close(pipe_parent2ospfe[1]);
145
146	if ((ibuf_ospfe = malloc(sizeof(struct imsgbuf))) == NULL ||
147	    (ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
148		fatal(NULL);
149	imsg_init(ibuf_ospfe, pipe_ospfe2rde[1], rde_dispatch_imsg);
150	imsg_init(ibuf_main, pipe_parent2rde[1], rde_dispatch_parent);
151
152	/* setup event handler */
153	ibuf_ospfe->events = EV_READ;
154	event_set(&ibuf_ospfe->ev, ibuf_ospfe->fd, ibuf_ospfe->events,
155	    ibuf_ospfe->handler, ibuf_ospfe);
156	event_add(&ibuf_ospfe->ev, NULL);
157
158	ibuf_main->events = EV_READ;
159	event_set(&ibuf_main->ev, ibuf_main->fd, ibuf_main->events,
160	    ibuf_main->handler, ibuf_main);
161	event_add(&ibuf_main->ev, NULL);
162
163	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
164	cand_list_init();
165	rt_init();
166
167	while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
168		SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
169		free(r);
170	}
171
172	gettimeofday(&now, NULL);
173	rdeconf->uptime = now.tv_sec;
174
175	event_dispatch();
176
177	rde_shutdown();
178	/* NOTREACHED */
179
180	return (0);
181}
182
183void
184rde_shutdown(void)
185{
186	struct area	*a;
187
188	stop_spf_timer(rdeconf);
189	cand_list_clr();
190	rt_clear();
191
192	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
193		LIST_REMOVE(a, entry);
194		area_del(a);
195	}
196	rde_nbr_free();
197
198	msgbuf_clear(&ibuf_ospfe->w);
199	free(ibuf_ospfe);
200	msgbuf_clear(&ibuf_main->w);
201	free(ibuf_main);
202	free(rdeconf);
203
204	log_info("route decision engine exiting");
205	_exit(0);
206}
207
208int
209rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
210    u_int16_t datalen)
211{
212	return (imsg_compose(ibuf_ospfe, type, peerid, pid, data, datalen));
213}
214
215/* ARGSUSED */
216void
217rde_dispatch_imsg(int fd, short event, void *bula)
218{
219	struct imsgbuf		*ibuf = bula;
220	struct imsg		 imsg;
221	struct in_addr		 aid;
222	struct ls_req_hdr	 req_hdr;
223	struct lsa_hdr		 lsa_hdr, *db_hdr;
224	struct rde_nbr		 rn, *nbr;
225	struct timespec		 tp;
226	struct lsa		*lsa;
227	struct area		*area;
228	struct vertex		*v;
229	char			*buf;
230	ssize_t			 n;
231	time_t			 now;
232	int			 r, state, self, shut = 0;
233	u_int16_t		 l;
234
235	switch (event) {
236	case EV_READ:
237		if ((n = imsg_read(ibuf)) == -1)
238			fatal("imsg_read error");
239		if (n == 0)	/* connection closed */
240			shut = 1;
241		break;
242	case EV_WRITE:
243		if (msgbuf_write(&ibuf->w) == -1)
244			fatal("msgbuf_write");
245		imsg_event_add(ibuf);
246		return;
247	default:
248		fatalx("unknown event");
249	}
250
251	clock_gettime(CLOCK_MONOTONIC, &tp);
252	now = tp.tv_sec;
253
254	for (;;) {
255		if ((n = imsg_get(ibuf, &imsg)) == -1)
256			fatal("rde_dispatch_imsg: imsg_read error");
257		if (n == 0)
258			break;
259
260		switch (imsg.hdr.type) {
261		case IMSG_NEIGHBOR_UP:
262			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
263				fatalx("invalid size of OE request");
264			memcpy(&rn, imsg.data, sizeof(rn));
265
266			if (rde_nbr_find(imsg.hdr.peerid))
267				fatalx("rde_dispatch_imsg: "
268				    "neighbor already exists");
269			rde_nbr_new(imsg.hdr.peerid, &rn);
270			break;
271		case IMSG_NEIGHBOR_DOWN:
272			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
273			break;
274		case IMSG_NEIGHBOR_CHANGE:
275			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
276				fatalx("invalid size of OE request");
277			memcpy(&state, imsg.data, sizeof(state));
278
279			nbr = rde_nbr_find(imsg.hdr.peerid);
280			if (nbr == NULL)
281				break;
282
283			if (state != nbr->state && (nbr->state & NBR_STA_FULL ||
284			    state & NBR_STA_FULL))
285				area_track(nbr->area, state);
286
287			nbr->state = state;
288			if (nbr->state & NBR_STA_FULL)
289				rde_req_list_free(nbr);
290			break;
291		case IMSG_DB_SNAPSHOT:
292			nbr = rde_nbr_find(imsg.hdr.peerid);
293			if (nbr == NULL)
294				break;
295
296			lsa_snap(nbr->area, imsg.hdr.peerid);
297
298			imsg_compose(ibuf_ospfe, IMSG_DB_END, imsg.hdr.peerid,
299			    0, NULL, 0);
300			break;
301		case IMSG_DD:
302			nbr = rde_nbr_find(imsg.hdr.peerid);
303			if (nbr == NULL)
304				break;
305
306			buf = imsg.data;
307			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
308			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
309				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
310				buf += sizeof(lsa_hdr);
311
312				v = lsa_find(nbr->area, lsa_hdr.type,
313				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
314				if (v == NULL)
315					db_hdr = NULL;
316				else
317					db_hdr = &v->lsa->hdr;
318
319				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
320					/*
321					 * only request LSAs that are
322					 * newer or missing
323					 */
324					rde_req_list_add(nbr, &lsa_hdr);
325					imsg_compose(ibuf_ospfe, IMSG_DD,
326					    imsg.hdr.peerid, 0, &lsa_hdr,
327					    sizeof(lsa_hdr));
328				}
329			}
330			if (l != 0)
331				log_warnx("rde_dispatch_imsg: peerid %lu, "
332				    "trailing garbage in Database Description "
333				    "packet", imsg.hdr.peerid);
334
335			imsg_compose(ibuf_ospfe, IMSG_DD_END, imsg.hdr.peerid,
336			    0, NULL, 0);
337			break;
338		case IMSG_LS_REQ:
339			nbr = rde_nbr_find(imsg.hdr.peerid);
340			if (nbr == NULL)
341				break;
342
343			buf = imsg.data;
344			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
345			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
346				memcpy(&req_hdr, buf, sizeof(req_hdr));
347				buf += sizeof(req_hdr);
348
349				if ((v = lsa_find(nbr->area,
350				    ntohl(req_hdr.type), req_hdr.ls_id,
351				    req_hdr.adv_rtr)) == NULL) {
352					imsg_compose(ibuf_ospfe, IMSG_LS_BADREQ,
353					    imsg.hdr.peerid, 0, NULL, 0);
354					continue;
355				}
356				imsg_compose(ibuf_ospfe, IMSG_LS_UPD,
357				    imsg.hdr.peerid, 0, v->lsa,
358				    ntohs(v->lsa->hdr.len));
359			}
360			if (l != 0)
361				log_warnx("rde_dispatch_imsg: peerid %lu, "
362				    "trailing garbage in LS Request "
363				    "packet", imsg.hdr.peerid);
364			break;
365		case IMSG_LS_UPD:
366			nbr = rde_nbr_find(imsg.hdr.peerid);
367			if (nbr == NULL)
368				break;
369
370			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
371			if (lsa == NULL)
372				fatal(NULL);
373			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
374
375			if (!lsa_check(nbr, lsa,
376			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
377				free(lsa);
378				break;
379			}
380
381			v = lsa_find(nbr->area, lsa->hdr.type, lsa->hdr.ls_id,
382				    lsa->hdr.adv_rtr);
383			if (v == NULL)
384				db_hdr = NULL;
385			else
386				db_hdr = &v->lsa->hdr;
387
388			if (nbr->self) {
389				lsa_merge(nbr, lsa, v);
390				/* lsa_merge frees the right lsa */
391				break;
392			}
393
394			r = lsa_newer(&lsa->hdr, db_hdr);
395			if (r > 0) {
396				/* new LSA newer than DB */
397				if (v && v->flooded &&
398				    v->changed + MIN_LS_ARRIVAL >= now) {
399					free(lsa);
400					break;
401				}
402
403				rde_req_list_del(nbr, &lsa->hdr);
404
405				if (!(self = lsa_self(nbr, lsa, v)))
406					if (lsa_add(nbr, lsa))
407						/* delayed lsa */
408						break;
409
410				/* flood and perhaps ack LSA */
411				imsg_compose(ibuf_ospfe, IMSG_LS_FLOOD,
412				    imsg.hdr.peerid, 0, lsa,
413				    ntohs(lsa->hdr.len));
414
415				/* reflood self originated LSA */
416				if (self && v)
417					imsg_compose(ibuf_ospfe, IMSG_LS_FLOOD,
418					    v->peerid, 0, v->lsa,
419					    ntohs(v->lsa->hdr.len));
420				/* lsa not added so free it */
421				if (self)
422					free(lsa);
423			} else if (r < 0) {
424				/* lsa no longer needed */
425				free(lsa);
426
427				/*
428				 * point 6 of "The Flooding Procedure"
429				 * We are violating the RFC here because
430				 * it does not make sense to reset a session
431				 * because an equal LSA is already in the table.
432				 * Only if the LSA sent is older than the one
433				 * in the table we should reset the session.
434				 */
435				if (rde_req_list_exists(nbr, &lsa->hdr)) {
436					imsg_compose(ibuf_ospfe, IMSG_LS_BADREQ,
437					    imsg.hdr.peerid, 0, NULL, 0);
438					break;
439				}
440
441				/* new LSA older than DB */
442				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
443				    ntohs(db_hdr->age) == MAX_AGE)
444					/* seq-num wrap */
445					break;
446
447				if (v->changed + MIN_LS_ARRIVAL >= now)
448					break;
449
450				/* directly send current LSA, no ack */
451				imsg_compose(ibuf_ospfe, IMSG_LS_UPD,
452				    imsg.hdr.peerid, 0, v->lsa,
453				    ntohs(v->lsa->hdr.len));
454			} else {
455				/* LSA equal send direct ack */
456				imsg_compose(ibuf_ospfe, IMSG_LS_ACK,
457				    imsg.hdr.peerid, 0, &lsa->hdr,
458				    sizeof(lsa->hdr));
459				free(lsa);
460			}
461			break;
462		case IMSG_LS_MAXAGE:
463			nbr = rde_nbr_find(imsg.hdr.peerid);
464			if (nbr == NULL)
465				break;
466
467			if (imsg.hdr.len != IMSG_HEADER_SIZE +
468			    sizeof(struct lsa_hdr))
469				fatalx("invalid size of OE request");
470			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
471
472			if (rde_nbr_loading(nbr->area))
473				break;
474
475			v = lsa_find(nbr->area, lsa_hdr.type, lsa_hdr.ls_id,
476				    lsa_hdr.adv_rtr);
477			if (v == NULL)
478				db_hdr = NULL;
479			else
480				db_hdr = &v->lsa->hdr;
481
482			/*
483			 * only delete LSA if the one in the db is not newer
484			 */
485			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
486				lsa_del(nbr, &lsa_hdr);
487			break;
488		case IMSG_CTL_SHOW_DATABASE:
489		case IMSG_CTL_SHOW_DB_EXT:
490		case IMSG_CTL_SHOW_DB_NET:
491		case IMSG_CTL_SHOW_DB_RTR:
492		case IMSG_CTL_SHOW_DB_SELF:
493		case IMSG_CTL_SHOW_DB_SUM:
494		case IMSG_CTL_SHOW_DB_ASBR:
495			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
496			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
497				log_warnx("rde_dispatch: wrong imsg len");
498				break;
499			}
500			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
501				LIST_FOREACH(area, &rdeconf->area_list, entry) {
502					imsg_compose(ibuf_ospfe, IMSG_CTL_AREA,
503					    0, imsg.hdr.pid, area,
504					    sizeof(*area));
505					lsa_dump(&area->lsa_tree, imsg.hdr.type,
506					    imsg.hdr.pid);
507				}
508				lsa_dump(&asext_tree, imsg.hdr.type,
509				    imsg.hdr.pid);
510			} else {
511				memcpy(&aid, imsg.data, sizeof(aid));
512				if ((area = area_find(rdeconf, aid)) != NULL) {
513					imsg_compose(ibuf_ospfe, IMSG_CTL_AREA,
514					    0, imsg.hdr.pid, area,
515					    sizeof(*area));
516					lsa_dump(&area->lsa_tree, imsg.hdr.type,
517					    imsg.hdr.pid);
518					if (!area->stub)
519						lsa_dump(&asext_tree,
520						    imsg.hdr.type,
521						    imsg.hdr.pid);
522				}
523			}
524			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
525			    NULL, 0);
526			break;
527		case IMSG_CTL_SHOW_RIB:
528			LIST_FOREACH(area, &rdeconf->area_list, entry) {
529				imsg_compose(ibuf_ospfe, IMSG_CTL_AREA,
530				    0, imsg.hdr.pid, area, sizeof(*area));
531
532				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
533				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
534			}
535			aid.s_addr = 0;
536			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
537
538			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
539			    NULL, 0);
540			break;
541		case IMSG_CTL_SHOW_SUM:
542			rde_send_summary(imsg.hdr.pid);
543			LIST_FOREACH(area, &rdeconf->area_list, entry)
544				rde_send_summary_area(area, imsg.hdr.pid);
545			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
546			    NULL, 0);
547			break;
548		default:
549			log_debug("rde_dispatch_msg: unexpected imsg %d",
550			    imsg.hdr.type);
551			break;
552		}
553		imsg_free(&imsg);
554	}
555	if (!shut)
556		imsg_event_add(ibuf);
557	else {
558		/* this pipe is dead, so remove the event handler */
559		event_del(&ibuf->ev);
560		event_loopexit(NULL);
561	}
562}
563
564/* ARGSUSED */
565void
566rde_dispatch_parent(int fd, short event, void *bula)
567{
568	static struct area	*narea;
569	struct iface		*niface;
570	struct imsg		 imsg;
571	struct kroute		 kr;
572	struct rroute		 rr;
573	struct imsgbuf		*ibuf = bula;
574	struct lsa		*lsa;
575	struct vertex		*v;
576	struct rt_node		*rn;
577	ssize_t			 n;
578	int			 shut = 0;
579
580	switch (event) {
581	case EV_READ:
582		if ((n = imsg_read(ibuf)) == -1)
583			fatal("imsg_read error");
584		if (n == 0)	/* connection closed */
585			shut = 1;
586		break;
587	case EV_WRITE:
588		if (msgbuf_write(&ibuf->w) == -1)
589			fatal("msgbuf_write");
590		imsg_event_add(ibuf);
591		return;
592	default:
593		fatalx("unknown event");
594	}
595
596	for (;;) {
597		if ((n = imsg_get(ibuf, &imsg)) == -1)
598			fatal("rde_dispatch_parent: imsg_read error");
599		if (n == 0)
600			break;
601
602		switch (imsg.hdr.type) {
603		case IMSG_NETWORK_ADD:
604			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
605				log_warnx("rde_dispatch: wrong imsg len");
606				break;
607			}
608			memcpy(&rr, imsg.data, sizeof(rr));
609
610			if ((lsa = rde_asext_get(&rr)) != NULL) {
611				v = lsa_find(NULL, lsa->hdr.type,
612				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
613
614				lsa_merge(nbrself, lsa, v);
615			}
616			break;
617		case IMSG_NETWORK_DEL:
618			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
619				log_warnx("rde_dispatch: wrong imsg len");
620				break;
621			}
622			memcpy(&rr, imsg.data, sizeof(rr));
623
624			if ((lsa = rde_asext_put(&rr)) != NULL) {
625				v = lsa_find(NULL, lsa->hdr.type,
626				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
627
628				/*
629				 * if v == NULL no LSA is in the table and
630				 * nothing has to be done.
631				 */
632				if (v)
633					lsa_merge(nbrself, lsa, v);
634			}
635			break;
636		case IMSG_KROUTE_GET:
637			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
638				log_warnx("rde_dispatch: wrong imsg len");
639				break;
640			}
641			memcpy(&kr, imsg.data, sizeof(kr));
642
643			if ((rn = rt_find(kr.prefix.s_addr, kr.prefixlen,
644			    DT_NET)) != NULL)
645				rde_send_change_kroute(rn);
646			else
647				/* should not happen */
648				imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0,
649				    0, &kr, sizeof(kr));
650			break;
651		case IMSG_RECONF_CONF:
652			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
653			    NULL)
654				fatal(NULL);
655			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
656
657			LIST_INIT(&nconf->area_list);
658			LIST_INIT(&nconf->cand_list);
659			break;
660		case IMSG_RECONF_AREA:
661			if ((narea = area_new()) == NULL)
662				fatal(NULL);
663			memcpy(narea, imsg.data, sizeof(struct area));
664
665			LIST_INIT(&narea->iface_list);
666			LIST_INIT(&narea->nbr_list);
667			RB_INIT(&narea->lsa_tree);
668
669			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
670			break;
671		case IMSG_RECONF_IFACE:
672			if ((niface = malloc(sizeof(struct iface))) == NULL)
673				fatal(NULL);
674			memcpy(niface, imsg.data, sizeof(struct iface));
675
676			LIST_INIT(&niface->nbr_list);
677			TAILQ_INIT(&niface->ls_ack_list);
678
679			niface->area = narea;
680			LIST_INSERT_HEAD(&narea->iface_list, niface, entry);
681
682			break;
683		case IMSG_RECONF_END:
684			merge_config(rdeconf, nconf);
685			nconf = NULL;
686			break;
687		default:
688			log_debug("rde_dispatch_parent: unexpected imsg %d",
689			    imsg.hdr.type);
690			break;
691		}
692		imsg_free(&imsg);
693	}
694	if (!shut)
695		imsg_event_add(ibuf);
696	else {
697		/* this pipe is dead, so remove the event handler */
698		event_del(&ibuf->ev);
699		event_loopexit(NULL);
700	}
701}
702
703u_int32_t
704rde_router_id(void)
705{
706	return (rdeconf->rtr_id.s_addr);
707}
708
709void
710rde_send_change_kroute(struct rt_node *r)
711{
712	struct kroute		 kr;
713	struct rt_nexthop	*rn;
714
715	TAILQ_FOREACH(rn, &r->nexthop, entry) {
716		if (!rn->invalid)
717			break;
718	}
719	if (!rn)
720		fatalx("rde_send_change_kroute: no valid nexthop found");
721
722	bzero(&kr, sizeof(kr));
723	kr.prefix.s_addr = r->prefix.s_addr;
724	kr.nexthop.s_addr = rn->nexthop.s_addr;
725	kr.prefixlen = r->prefixlen;
726	kr.ext_tag = r->ext_tag;
727
728	imsg_compose(ibuf_main, IMSG_KROUTE_CHANGE, 0, 0, &kr, sizeof(kr));
729}
730
731void
732rde_send_delete_kroute(struct rt_node *r)
733{
734	struct kroute	 kr;
735
736	bzero(&kr, sizeof(kr));
737	kr.prefix.s_addr = r->prefix.s_addr;
738	kr.prefixlen = r->prefixlen;
739
740	imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0, 0, &kr, sizeof(kr));
741}
742
743void
744rde_send_summary(pid_t pid)
745{
746	static struct ctl_sum	 sumctl;
747	struct timeval		 now;
748	struct area		*area;
749	struct vertex		*v;
750
751	bzero(&sumctl, sizeof(struct ctl_sum));
752
753	sumctl.rtr_id.s_addr = rde_router_id();
754	sumctl.spf_delay = rdeconf->spf_delay;
755	sumctl.spf_hold_time = rdeconf->spf_hold_time;
756
757	LIST_FOREACH(area, &rdeconf->area_list, entry)
758		sumctl.num_area++;
759
760	RB_FOREACH(v, lsa_tree, &asext_tree)
761		sumctl.num_ext_lsa++;
762
763	gettimeofday(&now, NULL);
764	if (rdeconf->uptime < now.tv_sec)
765		sumctl.uptime = now.tv_sec - rdeconf->uptime;
766	else
767		sumctl.uptime = 0;
768
769	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
770	    sizeof(sumctl));
771}
772
773void
774rde_send_summary_area(struct area *area, pid_t pid)
775{
776	static struct ctl_sum_area	 sumareactl;
777	struct iface			*iface;
778	struct rde_nbr			*nbr;
779	struct lsa_tree			*tree = &area->lsa_tree;
780	struct vertex			*v;
781
782	bzero(&sumareactl, sizeof(struct ctl_sum_area));
783
784	sumareactl.area.s_addr = area->id.s_addr;
785	sumareactl.num_spf_calc = area->num_spf_calc;
786
787	LIST_FOREACH(iface, &area->iface_list, entry)
788		sumareactl.num_iface++;
789
790	LIST_FOREACH(nbr, &area->nbr_list, entry)
791		if (nbr->state == NBR_STA_FULL && !nbr->self)
792			sumareactl.num_adj_nbr++;
793
794	RB_FOREACH(v, lsa_tree, tree)
795		sumareactl.num_lsa++;
796
797	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
798	    sizeof(sumareactl));
799}
800
801LIST_HEAD(rde_nbr_head, rde_nbr);
802
803struct nbr_table {
804	struct rde_nbr_head	*hashtbl;
805	u_int32_t		 hashmask;
806} rdenbrtable;
807
808#define RDE_NBR_HASH(x)		\
809	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
810
811void
812rde_nbr_init(u_int32_t hashsize)
813{
814	struct rde_nbr_head	*head;
815	u_int32_t		 hs, i;
816
817	for (hs = 1; hs < hashsize; hs <<= 1)
818		;
819	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
820	if (rdenbrtable.hashtbl == NULL)
821		fatal("rde_nbr_init");
822
823	for (i = 0; i < hs; i++)
824		LIST_INIT(&rdenbrtable.hashtbl[i]);
825
826	rdenbrtable.hashmask = hs - 1;
827
828	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
829		fatal("rde_nbr_init");
830
831	nbrself->id.s_addr = rde_router_id();
832	nbrself->peerid = NBR_IDSELF;
833	nbrself->state = NBR_STA_DOWN;
834	nbrself->self = 1;
835	head = RDE_NBR_HASH(NBR_IDSELF);
836	LIST_INSERT_HEAD(head, nbrself, hash);
837}
838
839void
840rde_nbr_free(void)
841{
842	free(nbrself);
843	free(rdenbrtable.hashtbl);
844}
845
846struct rde_nbr *
847rde_nbr_find(u_int32_t peerid)
848{
849	struct rde_nbr_head	*head;
850	struct rde_nbr		*nbr;
851
852	head = RDE_NBR_HASH(peerid);
853
854	LIST_FOREACH(nbr, head, hash) {
855		if (nbr->peerid == peerid)
856			return (nbr);
857	}
858
859	return (NULL);
860}
861
862struct rde_nbr *
863rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
864{
865	struct rde_nbr_head	*head;
866	struct rde_nbr		*nbr;
867	struct area		*area;
868
869	if (rde_nbr_find(peerid))
870		return (NULL);
871	if ((area = area_find(rdeconf, new->area_id)) == NULL)
872		fatalx("rde_nbr_new: unknown area");
873
874	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
875		fatal("rde_nbr_new");
876
877	memcpy(nbr, new, sizeof(*nbr));
878	nbr->peerid = peerid;
879	nbr->area = area;
880
881	TAILQ_INIT(&nbr->req_list);
882
883	head = RDE_NBR_HASH(peerid);
884	LIST_INSERT_HEAD(head, nbr, hash);
885	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
886
887	return (nbr);
888}
889
890void
891rde_nbr_del(struct rde_nbr *nbr)
892{
893	if (nbr == NULL)
894		return;
895
896	rde_req_list_free(nbr);
897
898	LIST_REMOVE(nbr, entry);
899	LIST_REMOVE(nbr, hash);
900
901	free(nbr);
902}
903
904int
905rde_nbr_loading(struct area *area)
906{
907	struct rde_nbr		*nbr;
908	int			 checkall = 0;
909
910	if (area == NULL) {
911		area = LIST_FIRST(&rdeconf->area_list);
912		checkall = 1;
913	}
914
915	while (area != NULL) {
916		LIST_FOREACH(nbr, &area->nbr_list, entry) {
917			if (nbr->self)
918				continue;
919			if (nbr->state & NBR_STA_XCHNG ||
920			    nbr->state & NBR_STA_LOAD)
921				return (1);
922		}
923		if (!checkall)
924			break;
925		area = LIST_NEXT(area, entry);
926	}
927
928	return (0);
929}
930
931struct rde_nbr *
932rde_nbr_self(struct area *area)
933{
934	struct rde_nbr		*nbr;
935
936	LIST_FOREACH(nbr, &area->nbr_list, entry)
937		if (nbr->self)
938			return (nbr);
939
940	/* this may not happen */
941	fatalx("rde_nbr_self: area without self");
942	return (NULL);
943}
944
945/*
946 * LSA req list
947 */
948void
949rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
950{
951	struct rde_req_entry	*le;
952
953	if ((le = calloc(1, sizeof(*le))) == NULL)
954		fatal("rde_req_list_add");
955
956	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
957	le->type = lsa->type;
958	le->ls_id = lsa->ls_id;
959	le->adv_rtr = lsa->adv_rtr;
960}
961
962int
963rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
964{
965	struct rde_req_entry	*le;
966
967	TAILQ_FOREACH(le, &nbr->req_list, entry) {
968		if ((lsa_hdr->type == le->type) &&
969		    (lsa_hdr->ls_id == le->ls_id) &&
970		    (lsa_hdr->adv_rtr == le->adv_rtr))
971			return (1);
972	}
973	return (0);
974}
975
976void
977rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
978{
979	struct rde_req_entry	*le;
980
981	TAILQ_FOREACH(le, &nbr->req_list, entry) {
982		if ((lsa_hdr->type == le->type) &&
983		    (lsa_hdr->ls_id == le->ls_id) &&
984		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
985			TAILQ_REMOVE(&nbr->req_list, le, entry);
986			free(le);
987			return;
988		}
989	}
990}
991
992void
993rde_req_list_free(struct rde_nbr *nbr)
994{
995	struct rde_req_entry	*le;
996
997	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
998		TAILQ_REMOVE(&nbr->req_list, le, entry);
999		free(le);
1000	}
1001}
1002
1003/*
1004 * as-external LSA handling
1005 */
1006struct lsa *
1007rde_asext_get(struct rroute *rr)
1008{
1009#if 0
1010	struct area	*area;
1011	struct iface	*iface;
1012XXX
1013	LIST_FOREACH(area, &rdeconf->area_list, entry)
1014		LIST_FOREACH(iface, &area->iface_list, entry) {
1015			if ((iface->addr.s_addr & iface->mask.s_addr) ==
1016			    rr->kr.prefix.s_addr && iface->mask.s_addr ==
1017			    prefixlen2mask(rr->kr.prefixlen)) {
1018				/* already announced as (stub) net LSA */
1019				log_debug("rde_asext_get: %s/%d is net LSA",
1020				    inet_ntoa(rr->kr.prefix), rr->kr.prefixlen);
1021				return (NULL);
1022			}
1023		}
1024#endif
1025	/* update of seqnum is done by lsa_merge */
1026	return (orig_asext_lsa(rr, DEFAULT_AGE));
1027}
1028
1029struct lsa *
1030rde_asext_put(struct rroute *rr)
1031{
1032	/*
1033	 * just try to remove the LSA. If the prefix is announced as
1034	 * stub net LSA lsa_find() will fail later and nothing will happen.
1035	 */
1036
1037	/* remove by reflooding with MAX_AGE */
1038	return (orig_asext_lsa(rr, MAX_AGE));
1039}
1040
1041/*
1042 * summary LSA stuff
1043 */
1044void
1045rde_summary_update(struct rt_node *rte, struct area *area)
1046{
1047	struct vertex		*v = NULL;
1048	struct lsa		*lsa;
1049	u_int8_t		 type = 0;
1050
1051	/* first check if we actually need to announce this route */
1052	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1053		return;
1054	/* never create summaries for as-ext LSA */
1055	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1056		return;
1057	/* no need for summary LSA in the originating area */
1058	if (rte->area.s_addr == area->id.s_addr)
1059		return;
1060	/* no need to originate inter-area routes to the backbone */
1061	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1062		return;
1063	/* TODO nexthop check, nexthop part of area -> no summary */
1064	if (rte->cost >= LS_INFINITY)
1065		return;
1066	/* TODO AS border router specific checks */
1067	/* TODO inter-area network route stuff */
1068	/* TODO intra-area stuff -- condense LSA ??? */
1069
1070	if (rte->d_type == DT_NET) {
1071		type = LSA_TYPE_SUM_NETWORK;
1072	} else if (rte->d_type == DT_RTR) {
1073		type = LSA_TYPE_SUM_ROUTER;
1074	} else
1075		fatalx("rde_summary_update: unknown route type");
1076
1077	/* update lsa but only if it was changed */
1078	v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1079	lsa = orig_sum_lsa(rte, type, rte->invalid);
1080	lsa_merge(rde_nbr_self(area), lsa, v);
1081
1082	if (v == NULL)
1083		v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1084
1085	/* suppressed/deleted routes are not found in the second lsa_find */
1086	if (v)
1087		v->cost = rte->cost;
1088}
1089
1090
1091/*
1092 * functions for self-originated LSA
1093 */
1094struct lsa *
1095orig_asext_lsa(struct rroute *rr, u_int16_t age)
1096{
1097	struct lsa	*lsa;
1098	u_int16_t	 len;
1099
1100	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext);
1101	if ((lsa = calloc(1, len)) == NULL)
1102		fatal("orig_asext_lsa");
1103
1104	log_debug("orig_asext_lsa: %s/%d age %d",
1105	    inet_ntoa(rr->kr.prefix), rr->kr.prefixlen, age);
1106
1107	/* LSA header */
1108	lsa->hdr.age = htons(age);
1109	lsa->hdr.type = LSA_TYPE_EXTERNAL;
1110	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1111	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1112	lsa->hdr.len = htons(len);
1113
1114	/* prefix and mask */
1115	/*
1116	 * TODO ls_id must be unique, for overlapping routes this may
1117	 * not be true. In this case a hack needs to be done to
1118	 * make the ls_id unique.
1119	 */
1120	lsa->hdr.ls_id = rr->kr.prefix.s_addr;
1121	lsa->data.asext.mask = prefixlen2mask(rr->kr.prefixlen);
1122
1123	/*
1124	 * nexthop -- on connected routes we are the nexthop,
1125	 * on all other cases we announce the true nexthop.
1126	 * XXX this is wrong as the true nexthop may be outside
1127	 * of the ospf cloud and so unreachable. For now we force
1128	 * all traffic to be directed to us.
1129	 */
1130	lsa->data.asext.fw_addr = 0;
1131
1132	lsa->data.asext.metric = htonl(rr->metric);
1133	lsa->data.asext.ext_tag = htonl(rr->kr.ext_tag);
1134
1135	lsa->hdr.ls_chksum = 0;
1136	lsa->hdr.ls_chksum =
1137	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1138
1139	return (lsa);
1140}
1141
1142struct lsa *
1143orig_sum_lsa(struct rt_node *rte, u_int8_t type, int invalid)
1144{
1145	struct lsa	*lsa;
1146	u_int16_t	 len;
1147
1148	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1149	if ((lsa = calloc(1, len)) == NULL)
1150		fatal("orig_sum_lsa");
1151
1152	/* LSA header */
1153	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1154	lsa->hdr.type = type;
1155	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1156	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1157	lsa->hdr.len = htons(len);
1158
1159	/* prefix and mask */
1160	/*
1161	 * TODO ls_id must be unique, for overlapping routes this may
1162	 * not be true. In this case a hack needs to be done to
1163	 * make the ls_id unique.
1164	 */
1165	lsa->hdr.ls_id = rte->prefix.s_addr;
1166	if (type == LSA_TYPE_SUM_NETWORK)
1167		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1168	else
1169		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1170
1171	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1172
1173	lsa->hdr.ls_chksum = 0;
1174	lsa->hdr.ls_chksum =
1175	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1176
1177	return (lsa);
1178}
1179