1/*	$OpenBSD: rde.c,v 1.112 2023/03/08 04:43:14 guenther Exp $ */
2
3/*
4 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5 * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <sys/types.h>
22#include <sys/socket.h>
23#include <sys/queue.h>
24#include <netinet/in.h>
25#include <arpa/inet.h>
26#include <err.h>
27#include <errno.h>
28#include <stdlib.h>
29#include <signal.h>
30#include <string.h>
31#include <pwd.h>
32#include <unistd.h>
33#include <event.h>
34
35#include "ospf.h"
36#include "ospfd.h"
37#include "ospfe.h"
38#include "log.h"
39#include "rde.h"
40
41void		 rde_sig_handler(int sig, short, void *);
42__dead void	 rde_shutdown(void);
43void		 rde_dispatch_imsg(int, short, void *);
44void		 rde_dispatch_parent(int, short, void *);
45void		 rde_dump_area(struct area *, int, pid_t);
46
47void		 rde_send_summary(pid_t);
48void		 rde_send_summary_area(struct area *, pid_t);
49void		 rde_nbr_init(u_int32_t);
50void		 rde_nbr_free(void);
51struct rde_nbr	*rde_nbr_find(u_int32_t);
52struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
53void		 rde_nbr_del(struct rde_nbr *);
54
55void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
56int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
57void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
58void		 rde_req_list_free(struct rde_nbr *);
59
60struct iface	*rde_asext_lookup(u_int32_t, int);
61void		 rde_asext_get(struct kroute *);
62void		 rde_asext_put(struct kroute *);
63void		 rde_asext_free(void);
64struct lsa	*orig_asext_lsa(struct kroute *, u_int32_t, u_int16_t);
65struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
66
67struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
68static struct imsgev	*iev_ospfe;
69static struct imsgev	*iev_main;
70struct rde_nbr		*nbrself;
71struct lsa_tree		 asext_tree;
72
73void
74rde_sig_handler(int sig, short event, void *arg)
75{
76	/*
77	 * signal handler rules don't apply, libevent decouples for us
78	 */
79
80	switch (sig) {
81	case SIGINT:
82	case SIGTERM:
83		rde_shutdown();
84		/* NOTREACHED */
85	default:
86		fatalx("unexpected signal");
87	}
88}
89
90/* route decision engine */
91pid_t
92rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
93    int pipe_parent2ospfe[2])
94{
95	struct event		 ev_sigint, ev_sigterm;
96	struct timeval		 now;
97	struct area		*area;
98	struct iface		*iface;
99	struct passwd		*pw;
100	pid_t			 pid;
101
102	switch (pid = fork()) {
103	case -1:
104		fatal("cannot fork");
105		/* NOTREACHED */
106	case 0:
107		break;
108	default:
109		return (pid);
110	}
111
112	/* cleanup a bit */
113	kif_clear();
114
115	rdeconf = xconf;
116
117	if ((pw = getpwnam(OSPFD_USER)) == NULL)
118		fatal("getpwnam");
119
120	if (chroot(pw->pw_dir) == -1)
121		fatal("chroot");
122	if (chdir("/") == -1)
123		fatal("chdir(\"/\")");
124
125	setproctitle("route decision engine");
126	/*
127	 * XXX needed with fork+exec
128	 * log_init(debug, LOG_DAEMON);
129	 * log_setverbose(verbose);
130	 */
131
132	ospfd_process = PROC_RDE_ENGINE;
133	log_procinit(log_procnames[ospfd_process]);
134
135	if (setgroups(1, &pw->pw_gid) ||
136	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
137	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
138		fatal("can't drop privileges");
139
140	if (pledge("stdio", NULL) == -1)
141		fatal("pledge");
142
143	event_init();
144	rde_nbr_init(NBR_HASHSIZE);
145	lsa_init(&asext_tree);
146
147	/* setup signal handler */
148	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
149	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
150	signal_add(&ev_sigint, NULL);
151	signal_add(&ev_sigterm, NULL);
152	signal(SIGPIPE, SIG_IGN);
153	signal(SIGHUP, SIG_IGN);
154
155	/* setup pipes */
156	close(pipe_ospfe2rde[0]);
157	close(pipe_parent2rde[0]);
158	close(pipe_parent2ospfe[0]);
159	close(pipe_parent2ospfe[1]);
160
161	if ((iev_ospfe = malloc(sizeof(struct imsgev))) == NULL ||
162	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
163		fatal(NULL);
164	imsg_init(&iev_ospfe->ibuf, pipe_ospfe2rde[1]);
165	iev_ospfe->handler = rde_dispatch_imsg;
166	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
167	iev_main->handler = rde_dispatch_parent;
168
169	/* setup event handler */
170	iev_ospfe->events = EV_READ;
171	event_set(&iev_ospfe->ev, iev_ospfe->ibuf.fd, iev_ospfe->events,
172	    iev_ospfe->handler, iev_ospfe);
173	event_add(&iev_ospfe->ev, NULL);
174
175	iev_main->events = EV_READ;
176	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
177	    iev_main->handler, iev_main);
178	event_add(&iev_main->ev, NULL);
179
180	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
181	cand_list_init();
182	rt_init();
183
184	/* remove unneeded stuff from config */
185	LIST_FOREACH(area, &rdeconf->area_list, entry)
186		LIST_FOREACH(iface, &area->iface_list, entry)
187			md_list_clr(&iface->auth_md_list);
188
189	conf_clear_redist_list(&rdeconf->redist_list);
190
191	gettimeofday(&now, NULL);
192	rdeconf->uptime = now.tv_sec;
193
194	event_dispatch();
195
196	rde_shutdown();
197	/* NOTREACHED */
198
199	return (0);
200}
201
202__dead void
203rde_shutdown(void)
204{
205	struct area	*a;
206	struct vertex	*v, *nv;
207
208	/* close pipes */
209	msgbuf_clear(&iev_ospfe->ibuf.w);
210	close(iev_ospfe->ibuf.fd);
211	msgbuf_clear(&iev_main->ibuf.w);
212	close(iev_main->ibuf.fd);
213
214	stop_spf_timer(rdeconf);
215	cand_list_clr();
216	rt_clear();
217
218	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
219		LIST_REMOVE(a, entry);
220		area_del(a);
221	}
222	for (v = RB_MIN(lsa_tree, &asext_tree); v != NULL; v = nv) {
223		nv = RB_NEXT(lsa_tree, &asext_tree, v);
224		vertex_free(v);
225	}
226	rde_asext_free();
227	rde_nbr_free();
228
229	free(iev_ospfe);
230	free(iev_main);
231	free(rdeconf);
232
233	log_info("route decision engine exiting");
234	_exit(0);
235}
236
237int
238rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
239    u_int16_t datalen)
240{
241	return (imsg_compose_event(iev_ospfe, type, peerid, pid, -1,
242	    data, datalen));
243}
244
245void
246rde_dispatch_imsg(int fd, short event, void *bula)
247{
248	struct imsgev		*iev = bula;
249	struct imsgbuf		*ibuf;
250	struct imsg		 imsg;
251	struct in_addr		 aid;
252	struct ls_req_hdr	 req_hdr;
253	struct lsa_hdr		 lsa_hdr, *db_hdr;
254	struct rde_nbr		 rn, *nbr;
255	struct timespec		 tp;
256	struct lsa		*lsa;
257	struct area		*area;
258	struct in_addr		 addr;
259	struct vertex		*v;
260	char			*buf;
261	ssize_t			 n;
262	time_t			 now;
263	int			 r, state, self, error, shut = 0, verbose;
264	u_int16_t		 l;
265
266	ibuf = &iev->ibuf;
267
268	if (event & EV_READ) {
269		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
270			fatal("imsg_read error");
271		if (n == 0)	/* connection closed */
272			shut = 1;
273	}
274	if (event & EV_WRITE) {
275		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
276			fatal("msgbuf_write");
277		if (n == 0)	/* connection closed */
278			shut = 1;
279	}
280
281	clock_gettime(CLOCK_MONOTONIC, &tp);
282	now = tp.tv_sec;
283
284	for (;;) {
285		if ((n = imsg_get(ibuf, &imsg)) == -1)
286			fatal("rde_dispatch_imsg: imsg_get error");
287		if (n == 0)
288			break;
289
290		switch (imsg.hdr.type) {
291		case IMSG_NEIGHBOR_UP:
292			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
293				fatalx("invalid size of OE request");
294			memcpy(&rn, imsg.data, sizeof(rn));
295
296			if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL)
297				fatalx("rde_dispatch_imsg: "
298				    "neighbor already exists");
299			break;
300		case IMSG_NEIGHBOR_DOWN:
301			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
302			break;
303		case IMSG_NEIGHBOR_ADDR:
304			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(addr))
305				fatalx("invalid size of OE request");
306			memcpy(&addr, imsg.data, sizeof(addr));
307
308			nbr = rde_nbr_find(imsg.hdr.peerid);
309			if (nbr == NULL)
310				break;
311
312			nbr->addr.s_addr = addr.s_addr;
313			break;
314		case IMSG_NEIGHBOR_CHANGE:
315			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
316				fatalx("invalid size of OE request");
317			memcpy(&state, imsg.data, sizeof(state));
318
319			nbr = rde_nbr_find(imsg.hdr.peerid);
320			if (nbr == NULL)
321				break;
322
323			nbr->state = state;
324			if (nbr->state & NBR_STA_FULL)
325				rde_req_list_free(nbr);
326			break;
327		case IMSG_NEIGHBOR_CAPA:
328			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(u_int8_t))
329				fatalx("invalid size of OE request");
330			nbr = rde_nbr_find(imsg.hdr.peerid);
331			if (nbr == NULL)
332				break;
333			nbr->capa_options = *(u_int8_t *)imsg.data;
334			break;
335		case IMSG_AREA_CHANGE:
336			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
337				fatalx("invalid size of OE request");
338
339			LIST_FOREACH(area, &rdeconf->area_list, entry) {
340				if (area->id.s_addr == imsg.hdr.peerid)
341					break;
342			}
343			if (area == NULL)
344				break;
345			memcpy(&state, imsg.data, sizeof(state));
346			area->active = state;
347			break;
348		case IMSG_DB_SNAPSHOT:
349			nbr = rde_nbr_find(imsg.hdr.peerid);
350			if (nbr == NULL)
351				break;
352
353			lsa_snap(nbr);
354
355			imsg_compose_event(iev_ospfe, IMSG_DB_END, imsg.hdr.peerid,
356			    0, -1, NULL, 0);
357			break;
358		case IMSG_DD:
359			nbr = rde_nbr_find(imsg.hdr.peerid);
360			if (nbr == NULL)
361				break;
362
363			buf = imsg.data;
364			error = 0;
365			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
366			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
367				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
368				buf += sizeof(lsa_hdr);
369
370				if (lsa_hdr.type == LSA_TYPE_EXTERNAL &&
371				    nbr->area->stub) {
372					error = 1;
373					break;
374				}
375				v = lsa_find(nbr->iface, lsa_hdr.type,
376				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
377				if (v == NULL)
378					db_hdr = NULL;
379				else
380					db_hdr = &v->lsa->hdr;
381
382				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
383					/*
384					 * only request LSAs that are
385					 * newer or missing
386					 */
387					rde_req_list_add(nbr, &lsa_hdr);
388					imsg_compose_event(iev_ospfe, IMSG_DD,
389					    imsg.hdr.peerid, 0, -1, &lsa_hdr,
390					    sizeof(lsa_hdr));
391				}
392			}
393			if (l != 0 && !error)
394				log_warnx("rde_dispatch_imsg: peerid %u, "
395				    "trailing garbage in Database Description "
396				    "packet", imsg.hdr.peerid);
397
398			if (!error)
399				imsg_compose_event(iev_ospfe, IMSG_DD_END,
400				    imsg.hdr.peerid, 0, -1, NULL, 0);
401			else
402				imsg_compose_event(iev_ospfe, IMSG_DD_BADLSA,
403				    imsg.hdr.peerid, 0, -1, NULL, 0);
404			break;
405		case IMSG_LS_REQ:
406			nbr = rde_nbr_find(imsg.hdr.peerid);
407			if (nbr == NULL)
408				break;
409
410			buf = imsg.data;
411			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
412			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
413				memcpy(&req_hdr, buf, sizeof(req_hdr));
414				buf += sizeof(req_hdr);
415
416				if ((v = lsa_find(nbr->iface,
417				    ntohl(req_hdr.type), req_hdr.ls_id,
418				    req_hdr.adv_rtr)) == NULL) {
419					log_debug("rde_dispatch_imsg: "
420					    "requested LSA not found");
421					imsg_compose_event(iev_ospfe,
422					    IMSG_LS_BADREQ, imsg.hdr.peerid,
423					    0, -1, NULL, 0);
424					continue;
425				}
426				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
427				    imsg.hdr.peerid, 0, -1, v->lsa,
428				    ntohs(v->lsa->hdr.len));
429			}
430			if (l != 0)
431				log_warnx("rde_dispatch_imsg: peerid %u, "
432				    "trailing garbage in LS Request "
433				    "packet", imsg.hdr.peerid);
434			break;
435		case IMSG_LS_UPD:
436			nbr = rde_nbr_find(imsg.hdr.peerid);
437			if (nbr == NULL)
438				break;
439
440			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
441			if (lsa == NULL)
442				fatal(NULL);
443			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
444
445			if (!lsa_check(nbr, lsa,
446			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
447				free(lsa);
448				break;
449			}
450
451			v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
452			    lsa->hdr.adv_rtr);
453			if (v == NULL)
454				db_hdr = NULL;
455			else
456				db_hdr = &v->lsa->hdr;
457
458			if (nbr->self) {
459				lsa_merge(nbr, lsa, v);
460				/* lsa_merge frees the right lsa */
461				break;
462			}
463
464			r = lsa_newer(&lsa->hdr, db_hdr);
465			if (r > 0) {
466				/* new LSA newer than DB */
467				if (v && v->flooded &&
468				    v->changed + MIN_LS_ARRIVAL >= now) {
469					free(lsa);
470					break;
471				}
472
473				rde_req_list_del(nbr, &lsa->hdr);
474
475				if (!(self = lsa_self(nbr, lsa, v)))
476					if (lsa_add(nbr, lsa))
477						/* delayed lsa */
478						break;
479
480				/* flood and perhaps ack LSA */
481				imsg_compose_event(iev_ospfe, IMSG_LS_FLOOD,
482				    imsg.hdr.peerid, 0, -1, lsa,
483				    ntohs(lsa->hdr.len));
484
485				/* reflood self originated LSA */
486				if (self && v)
487					imsg_compose_event(iev_ospfe,
488					    IMSG_LS_FLOOD, v->peerid, 0, -1,
489					    v->lsa, ntohs(v->lsa->hdr.len));
490				/* new LSA was not added so free it */
491				if (self)
492					free(lsa);
493			} else if (r < 0) {
494				/*
495				 * point 6 of "The Flooding Procedure"
496				 * We are violating the RFC here because
497				 * it does not make sense to reset a session
498				 * because an equal LSA is already in the table.
499				 * Only if the LSA sent is older than the one
500				 * in the table we should reset the session.
501				 */
502				if (rde_req_list_exists(nbr, &lsa->hdr)) {
503					imsg_compose_event(iev_ospfe,
504					    IMSG_LS_BADREQ, imsg.hdr.peerid,
505					    0, -1, NULL, 0);
506					free(lsa);
507					break;
508				}
509
510				/* lsa no longer needed */
511				free(lsa);
512
513				/* new LSA older than DB */
514				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
515				    ntohs(db_hdr->age) == MAX_AGE)
516					/* seq-num wrap */
517					break;
518
519				if (v->changed + MIN_LS_ARRIVAL >= now)
520					break;
521
522				/* directly send current LSA, no ack */
523				imsg_compose_event(iev_ospfe, IMSG_LS_UPD,
524				    imsg.hdr.peerid, 0, -1, v->lsa,
525				    ntohs(v->lsa->hdr.len));
526			} else {
527				/* LSA equal send direct ack */
528				imsg_compose_event(iev_ospfe, IMSG_LS_ACK,
529				    imsg.hdr.peerid, 0, -1, &lsa->hdr,
530				    sizeof(lsa->hdr));
531				free(lsa);
532			}
533			break;
534		case IMSG_LS_MAXAGE:
535			nbr = rde_nbr_find(imsg.hdr.peerid);
536			if (nbr == NULL)
537				break;
538
539			if (imsg.hdr.len != IMSG_HEADER_SIZE +
540			    sizeof(struct lsa_hdr))
541				fatalx("invalid size of OE request");
542			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
543
544			if (rde_nbr_loading(nbr->area))
545				break;
546
547			v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
548			    lsa_hdr.adv_rtr);
549			if (v == NULL)
550				db_hdr = NULL;
551			else
552				db_hdr = &v->lsa->hdr;
553
554			/*
555			 * only delete LSA if the one in the db is not newer
556			 */
557			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
558				lsa_del(nbr, &lsa_hdr);
559			break;
560		case IMSG_CTL_SHOW_DATABASE:
561		case IMSG_CTL_SHOW_DB_EXT:
562		case IMSG_CTL_SHOW_DB_NET:
563		case IMSG_CTL_SHOW_DB_RTR:
564		case IMSG_CTL_SHOW_DB_SELF:
565		case IMSG_CTL_SHOW_DB_SUM:
566		case IMSG_CTL_SHOW_DB_ASBR:
567		case IMSG_CTL_SHOW_DB_OPAQ:
568			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
569			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
570				log_warnx("rde_dispatch_imsg: wrong imsg len");
571				break;
572			}
573			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
574				LIST_FOREACH(area, &rdeconf->area_list, entry) {
575					rde_dump_area(area, imsg.hdr.type,
576					    imsg.hdr.pid);
577				}
578				lsa_dump(&asext_tree, imsg.hdr.type,
579				    imsg.hdr.pid);
580			} else {
581				memcpy(&aid, imsg.data, sizeof(aid));
582				if ((area = area_find(rdeconf, aid)) != NULL) {
583					rde_dump_area(area, imsg.hdr.type,
584					    imsg.hdr.pid);
585					if (!area->stub)
586						lsa_dump(&asext_tree,
587						    imsg.hdr.type,
588						    imsg.hdr.pid);
589				}
590			}
591			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
592			    imsg.hdr.pid, -1, NULL, 0);
593			break;
594		case IMSG_CTL_SHOW_RIB:
595			LIST_FOREACH(area, &rdeconf->area_list, entry) {
596				imsg_compose_event(iev_ospfe, IMSG_CTL_AREA,
597				    0, imsg.hdr.pid, -1, area, sizeof(*area));
598
599				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
600				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
601			}
602			aid.s_addr = 0;
603			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
604
605			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
606			    imsg.hdr.pid, -1, NULL, 0);
607			break;
608		case IMSG_CTL_SHOW_SUM:
609			rde_send_summary(imsg.hdr.pid);
610			LIST_FOREACH(area, &rdeconf->area_list, entry)
611				rde_send_summary_area(area, imsg.hdr.pid);
612			imsg_compose_event(iev_ospfe, IMSG_CTL_END, 0,
613			    imsg.hdr.pid, -1, NULL, 0);
614			break;
615		case IMSG_CTL_LOG_VERBOSE:
616			/* already checked by ospfe */
617			memcpy(&verbose, imsg.data, sizeof(verbose));
618			log_setverbose(verbose);
619			break;
620		default:
621			log_debug("rde_dispatch_imsg: unexpected imsg %d",
622			    imsg.hdr.type);
623			break;
624		}
625		imsg_free(&imsg);
626	}
627	if (!shut)
628		imsg_event_add(iev);
629	else {
630		/* this pipe is dead, so remove the event handler */
631		event_del(&iev->ev);
632		event_loopexit(NULL);
633	}
634}
635
636void
637rde_dispatch_parent(int fd, short event, void *bula)
638{
639	static struct area	*narea;
640	struct iface		*niface;
641	struct imsg		 imsg;
642	struct kroute		 rr;
643	struct imsgev		*iev = bula;
644	struct imsgbuf		*ibuf;
645	struct redistribute	*nred;
646	ssize_t			 n;
647	int			 shut = 0;
648
649	ibuf = &iev->ibuf;
650
651	if (event & EV_READ) {
652		if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
653			fatal("imsg_read error");
654		if (n == 0)	/* connection closed */
655			shut = 1;
656	}
657	if (event & EV_WRITE) {
658		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
659			fatal("msgbuf_write");
660		if (n == 0)	/* connection closed */
661			shut = 1;
662	}
663
664	for (;;) {
665		if ((n = imsg_get(ibuf, &imsg)) == -1)
666			fatal("rde_dispatch_parent: imsg_get error");
667		if (n == 0)
668			break;
669
670		switch (imsg.hdr.type) {
671		case IMSG_NETWORK_ADD:
672			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
673				log_warnx("rde_dispatch_parent: "
674				    "wrong imsg len");
675				break;
676			}
677			memcpy(&rr, imsg.data, sizeof(rr));
678			rde_asext_get(&rr);
679			break;
680		case IMSG_NETWORK_DEL:
681			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
682				log_warnx("rde_dispatch_parent: "
683				    "wrong imsg len");
684				break;
685			}
686			memcpy(&rr, imsg.data, sizeof(rr));
687			rde_asext_put(&rr);
688			break;
689		case IMSG_RECONF_CONF:
690			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
691			    NULL)
692				fatal(NULL);
693			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
694
695			LIST_INIT(&nconf->area_list);
696			LIST_INIT(&nconf->cand_list);
697			break;
698		case IMSG_RECONF_AREA:
699			if ((narea = area_new()) == NULL)
700				fatal(NULL);
701			memcpy(narea, imsg.data, sizeof(struct area));
702
703			LIST_INIT(&narea->iface_list);
704			LIST_INIT(&narea->nbr_list);
705			RB_INIT(&narea->lsa_tree);
706			SIMPLEQ_INIT(&narea->redist_list);
707
708			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
709			break;
710		case IMSG_RECONF_REDIST:
711			if ((nred= malloc(sizeof(struct redistribute))) == NULL)
712				fatal(NULL);
713			memcpy(nred, imsg.data, sizeof(struct redistribute));
714
715			SIMPLEQ_INSERT_TAIL(&narea->redist_list, nred, entry);
716			break;
717		case IMSG_RECONF_IFACE:
718			if ((niface = malloc(sizeof(struct iface))) == NULL)
719				fatal(NULL);
720			memcpy(niface, imsg.data, sizeof(struct iface));
721
722			LIST_INIT(&niface->nbr_list);
723			TAILQ_INIT(&niface->ls_ack_list);
724			TAILQ_INIT(&niface->auth_md_list);
725			RB_INIT(&niface->lsa_tree);
726
727			niface->area = narea;
728			LIST_INSERT_HEAD(&narea->iface_list, niface, entry);
729
730			break;
731		case IMSG_RECONF_END:
732			merge_config(rdeconf, nconf);
733			nconf = NULL;
734			break;
735		default:
736			log_debug("rde_dispatch_parent: unexpected imsg %d",
737			    imsg.hdr.type);
738			break;
739		}
740		imsg_free(&imsg);
741	}
742	if (!shut)
743		imsg_event_add(iev);
744	else {
745		/* this pipe is dead, so remove the event handler */
746		event_del(&iev->ev);
747		event_loopexit(NULL);
748	}
749}
750
751void
752rde_dump_area(struct area *area, int imsg_type, pid_t pid)
753{
754	struct iface	*iface;
755
756	/* dump header */
757	imsg_compose_event(iev_ospfe, IMSG_CTL_AREA, 0, pid, -1,
758	    area, sizeof(*area));
759
760	/* dump link local lsa */
761	LIST_FOREACH(iface, &area->iface_list, entry) {
762		imsg_compose_event(iev_ospfe, IMSG_CTL_IFACE,
763		    0, pid, -1, iface, sizeof(*iface));
764		lsa_dump(&iface->lsa_tree, imsg_type, pid);
765	}
766
767	/* dump area lsa */
768	lsa_dump(&area->lsa_tree, imsg_type, pid);
769}
770
771u_int32_t
772rde_router_id(void)
773{
774	return (rdeconf->rtr_id.s_addr);
775}
776
777struct area *
778rde_backbone_area(void)
779{
780	struct in_addr	id;
781
782	id.s_addr = INADDR_ANY;
783
784	return (area_find(rdeconf, id));
785}
786
787void
788rde_send_change_kroute(struct rt_node *r)
789{
790	int			 krcount = 0;
791	struct kroute		 kr;
792	struct rt_nexthop	*rn;
793	struct ibuf		*wbuf;
794
795	if ((wbuf = imsg_create(&iev_main->ibuf, IMSG_KROUTE_CHANGE, 0, 0,
796	    sizeof(kr))) == NULL) {
797		return;
798	}
799
800	TAILQ_FOREACH(rn, &r->nexthop, entry) {
801		if (rn->invalid)
802			continue;
803		if (rn->connected)
804			/* skip self-originated routes */
805			continue;
806		krcount++;
807
808		bzero(&kr, sizeof(kr));
809		kr.prefix.s_addr = r->prefix.s_addr;
810		kr.nexthop.s_addr = rn->nexthop.s_addr;
811		kr.prefixlen = r->prefixlen;
812		kr.ext_tag = r->ext_tag;
813		imsg_add(wbuf, &kr, sizeof(kr));
814	}
815	if (krcount == 0) {
816		/* no valid nexthop or self originated, so remove */
817		ibuf_free(wbuf);
818		rde_send_delete_kroute(r);
819		return;
820	}
821	imsg_close(&iev_main->ibuf, wbuf);
822	imsg_event_add(iev_main);
823}
824
825void
826rde_send_delete_kroute(struct rt_node *r)
827{
828	struct kroute	 kr;
829
830	bzero(&kr, sizeof(kr));
831	kr.prefix.s_addr = r->prefix.s_addr;
832	kr.prefixlen = r->prefixlen;
833
834	imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1,
835	    &kr, sizeof(kr));
836}
837
838void
839rde_send_summary(pid_t pid)
840{
841	static struct ctl_sum	 sumctl;
842	struct timeval		 now;
843	struct area		*area;
844	struct vertex		*v;
845
846	bzero(&sumctl, sizeof(struct ctl_sum));
847
848	sumctl.rtr_id.s_addr = rde_router_id();
849	sumctl.spf_delay = rdeconf->spf_delay;
850	sumctl.spf_hold_time = rdeconf->spf_hold_time;
851
852	LIST_FOREACH(area, &rdeconf->area_list, entry)
853		sumctl.num_area++;
854
855	RB_FOREACH(v, lsa_tree, &asext_tree) {
856		sumctl.num_ext_lsa++;
857		sumctl.ext_lsa_cksum += ntohs(v->lsa->hdr.ls_chksum);
858	}
859
860	gettimeofday(&now, NULL);
861	if (rdeconf->uptime < now.tv_sec)
862		sumctl.uptime = now.tv_sec - rdeconf->uptime;
863	else
864		sumctl.uptime = 0;
865
866	sumctl.rfc1583compat = rdeconf->rfc1583compat;
867
868	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
869	    sizeof(sumctl));
870}
871
872void
873rde_send_summary_area(struct area *area, pid_t pid)
874{
875	static struct ctl_sum_area	 sumareactl;
876	struct iface			*iface;
877	struct rde_nbr			*nbr;
878	struct lsa_tree			*tree = &area->lsa_tree;
879	struct vertex			*v;
880
881	bzero(&sumareactl, sizeof(struct ctl_sum_area));
882
883	sumareactl.area.s_addr = area->id.s_addr;
884	sumareactl.num_spf_calc = area->num_spf_calc;
885
886	LIST_FOREACH(iface, &area->iface_list, entry)
887		sumareactl.num_iface++;
888
889	LIST_FOREACH(nbr, &area->nbr_list, entry)
890		if (nbr->state == NBR_STA_FULL && !nbr->self)
891			sumareactl.num_adj_nbr++;
892
893	RB_FOREACH(v, lsa_tree, tree) {
894		sumareactl.num_lsa++;
895		sumareactl.lsa_cksum += ntohs(v->lsa->hdr.ls_chksum);
896	}
897
898	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
899	    sizeof(sumareactl));
900}
901
902LIST_HEAD(rde_nbr_head, rde_nbr);
903
904struct nbr_table {
905	struct rde_nbr_head	*hashtbl;
906	u_int32_t		 hashmask;
907} rdenbrtable;
908
909#define RDE_NBR_HASH(x)		\
910	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
911
912void
913rde_nbr_init(u_int32_t hashsize)
914{
915	struct rde_nbr_head	*head;
916	u_int32_t		 hs, i;
917
918	for (hs = 1; hs < hashsize; hs <<= 1)
919		;
920	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
921	if (rdenbrtable.hashtbl == NULL)
922		fatal("rde_nbr_init");
923
924	for (i = 0; i < hs; i++)
925		LIST_INIT(&rdenbrtable.hashtbl[i]);
926
927	rdenbrtable.hashmask = hs - 1;
928
929	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
930		fatal("rde_nbr_init");
931
932	nbrself->id.s_addr = rde_router_id();
933	nbrself->peerid = NBR_IDSELF;
934	nbrself->state = NBR_STA_DOWN;
935	nbrself->self = 1;
936	head = RDE_NBR_HASH(NBR_IDSELF);
937	LIST_INSERT_HEAD(head, nbrself, hash);
938}
939
940void
941rde_nbr_free(void)
942{
943	free(nbrself);
944	free(rdenbrtable.hashtbl);
945}
946
947struct rde_nbr *
948rde_nbr_find(u_int32_t peerid)
949{
950	struct rde_nbr_head	*head;
951	struct rde_nbr		*nbr;
952
953	head = RDE_NBR_HASH(peerid);
954
955	LIST_FOREACH(nbr, head, hash) {
956		if (nbr->peerid == peerid)
957			return (nbr);
958	}
959
960	return (NULL);
961}
962
963struct rde_nbr *
964rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
965{
966	struct rde_nbr_head	*head;
967	struct rde_nbr		*nbr;
968	struct area		*area;
969	struct iface		*iface;
970
971	if (rde_nbr_find(peerid))
972		return (NULL);
973	if ((area = area_find(rdeconf, new->area_id)) == NULL)
974		fatalx("rde_nbr_new: unknown area");
975
976	LIST_FOREACH(iface, &area->iface_list, entry) {
977		if (iface->ifindex == new->ifindex)
978			break;
979	}
980	if (iface == NULL)
981		fatalx("rde_nbr_new: unknown interface");
982
983	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
984		fatal("rde_nbr_new");
985
986	memcpy(nbr, new, sizeof(*nbr));
987	nbr->peerid = peerid;
988	nbr->area = area;
989	nbr->iface = iface;
990
991	TAILQ_INIT(&nbr->req_list);
992
993	head = RDE_NBR_HASH(peerid);
994	LIST_INSERT_HEAD(head, nbr, hash);
995	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
996
997	return (nbr);
998}
999
1000void
1001rde_nbr_iface_del(struct iface *iface)
1002{
1003	struct rde_nbr_head	*head;
1004	struct rde_nbr		*nbr, *xnbr;
1005	u_int32_t		 i;
1006
1007	for (i = 0; i <= rdenbrtable.hashmask; i++) {
1008		head = &rdenbrtable.hashtbl[i];
1009		LIST_FOREACH_SAFE(nbr, head, hash, xnbr) {
1010			if (nbr->iface == iface)
1011				rde_nbr_del(nbr);
1012		}
1013	}
1014}
1015
1016void
1017rde_nbr_del(struct rde_nbr *nbr)
1018{
1019	if (nbr == NULL)
1020		return;
1021
1022	rde_req_list_free(nbr);
1023
1024	LIST_REMOVE(nbr, entry);
1025	LIST_REMOVE(nbr, hash);
1026
1027	free(nbr);
1028}
1029
1030int
1031rde_nbr_loading(struct area *area)
1032{
1033	struct rde_nbr		*nbr;
1034	int			 checkall = 0;
1035
1036	if (area == NULL) {
1037		area = LIST_FIRST(&rdeconf->area_list);
1038		checkall = 1;
1039	}
1040
1041	while (area != NULL) {
1042		LIST_FOREACH(nbr, &area->nbr_list, entry) {
1043			if (nbr->self)
1044				continue;
1045			if (nbr->state & NBR_STA_XCHNG ||
1046			    nbr->state & NBR_STA_LOAD)
1047				return (1);
1048		}
1049		if (!checkall)
1050			break;
1051		area = LIST_NEXT(area, entry);
1052	}
1053
1054	return (0);
1055}
1056
1057struct rde_nbr *
1058rde_nbr_self(struct area *area)
1059{
1060	struct rde_nbr		*nbr;
1061
1062	LIST_FOREACH(nbr, &area->nbr_list, entry)
1063		if (nbr->self)
1064			return (nbr);
1065
1066	/* this may not happen */
1067	fatalx("rde_nbr_self: area without self");
1068	return (NULL);
1069}
1070
1071/*
1072 * LSA req list
1073 */
1074void
1075rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
1076{
1077	struct rde_req_entry	*le;
1078
1079	if ((le = calloc(1, sizeof(*le))) == NULL)
1080		fatal("rde_req_list_add");
1081
1082	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
1083	le->type = lsa->type;
1084	le->ls_id = lsa->ls_id;
1085	le->adv_rtr = lsa->adv_rtr;
1086}
1087
1088int
1089rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1090{
1091	struct rde_req_entry	*le;
1092
1093	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1094		if ((lsa_hdr->type == le->type) &&
1095		    (lsa_hdr->ls_id == le->ls_id) &&
1096		    (lsa_hdr->adv_rtr == le->adv_rtr))
1097			return (1);
1098	}
1099	return (0);
1100}
1101
1102void
1103rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1104{
1105	struct rde_req_entry	*le;
1106
1107	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1108		if ((lsa_hdr->type == le->type) &&
1109		    (lsa_hdr->ls_id == le->ls_id) &&
1110		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1111			TAILQ_REMOVE(&nbr->req_list, le, entry);
1112			free(le);
1113			return;
1114		}
1115	}
1116}
1117
1118void
1119rde_req_list_free(struct rde_nbr *nbr)
1120{
1121	struct rde_req_entry	*le;
1122
1123	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1124		TAILQ_REMOVE(&nbr->req_list, le, entry);
1125		free(le);
1126	}
1127}
1128
1129/*
1130 * as-external LSA handling
1131 */
1132struct asext_node {
1133	RB_ENTRY(asext_node)    entry;
1134	struct kroute		r;
1135	u_int32_t		ls_id;
1136};
1137
1138static __inline int	asext_compare(struct asext_node *, struct asext_node *);
1139struct asext_node	*asext_find(u_int32_t, u_int8_t);
1140
1141RB_HEAD(asext_tree, asext_node)		ast;
1142RB_PROTOTYPE(asext_tree, asext_node, entry, asext_compare)
1143RB_GENERATE(asext_tree, asext_node, entry, asext_compare)
1144
1145static __inline int
1146asext_compare(struct asext_node *a, struct asext_node *b)
1147{
1148	if (ntohl(a->r.prefix.s_addr) < ntohl(b->r.prefix.s_addr))
1149		return (-1);
1150	if (ntohl(a->r.prefix.s_addr) > ntohl(b->r.prefix.s_addr))
1151		return (1);
1152	if (a->r.prefixlen < b->r.prefixlen)
1153		return (-1);
1154	if (a->r.prefixlen > b->r.prefixlen)
1155		return (1);
1156	return (0);
1157}
1158
1159struct asext_node *
1160asext_find(u_int32_t addr, u_int8_t prefixlen)
1161{
1162	struct asext_node	a;
1163
1164	a.r.prefix.s_addr = addr;
1165	a.r.prefixlen = prefixlen;
1166
1167	return (RB_FIND(asext_tree, &ast, &a));
1168}
1169
1170struct iface *
1171rde_asext_lookup(u_int32_t prefix, int plen)
1172{
1173	struct area	*area;
1174	struct iface	*iface;
1175
1176	LIST_FOREACH(area, &rdeconf->area_list, entry) {
1177		LIST_FOREACH(iface, &area->iface_list, entry) {
1178			if ((iface->addr.s_addr & iface->mask.s_addr) ==
1179			    (prefix & iface->mask.s_addr) && (plen == -1 ||
1180			    iface->mask.s_addr == prefixlen2mask(plen)))
1181				return (iface);
1182		}
1183	}
1184	return (NULL);
1185}
1186
1187void
1188rde_asext_get(struct kroute *kr)
1189{
1190	struct asext_node	*an, *oan;
1191	struct vertex		*v;
1192	struct lsa		*lsa;
1193	u_int32_t		 mask;
1194
1195	if (rde_asext_lookup(kr->prefix.s_addr, kr->prefixlen)) {
1196		/* already announced as (stub) net LSA */
1197		log_debug("rde_asext_get: %s/%d is net LSA",
1198		    inet_ntoa(kr->prefix), kr->prefixlen);
1199		return;
1200	}
1201
1202	an = asext_find(kr->prefix.s_addr, kr->prefixlen);
1203	if (an == NULL) {
1204		if ((an = calloc(1, sizeof(*an))) == NULL)
1205			fatal("rde_asext_get");
1206		bcopy(kr, &an->r, sizeof(*kr));
1207		an->ls_id = kr->prefix.s_addr;
1208		RB_INSERT(asext_tree, &ast, an);
1209	} else {
1210		/* the bcopy does not change the lookup key so it is save */
1211		bcopy(kr, &an->r, sizeof(*kr));
1212	}
1213
1214	/*
1215	 * ls_id must be unique, for overlapping routes this may
1216	 * not be true. In this case a unique ls_id needs to be found.
1217	 * The algorithm will change the ls_id of the less specific
1218	 * route. E.g. in the case of 10.0.0.0/16 and 10.0.0.0/24
1219	 * 10.0.0.0/24 will get the 10.0.0.0 ls_id and 10.0.0.0/16
1220	 * will change the ls_id to 10.0.255.255 and see if that is unique.
1221	 */
1222	oan = an;
1223	mask = prefixlen2mask(oan->r.prefixlen);
1224	v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id,
1225	    rdeconf->rtr_id.s_addr);
1226	while (v && v->lsa->data.asext.mask != mask) {
1227		/* conflict needs to be resolved. change less specific lsa */
1228		if (ntohl(v->lsa->data.asext.mask) < ntohl(mask)) {
1229			/* lsa to insert is more specific, fix other lsa */
1230			mask = v->lsa->data.asext.mask;
1231			oan = asext_find(v->lsa->hdr.ls_id & mask,
1232			   mask2prefixlen(mask));
1233			if (oan == NULL)
1234				fatalx("as-ext LSA DB corrupted");
1235		}
1236		/* oan is less specific and needs new ls_id */
1237		if (oan->ls_id == oan->r.prefix.s_addr)
1238			oan->ls_id |= ~mask;
1239		else {
1240			u_int32_t	tmp = ntohl(oan->ls_id);
1241			oan->ls_id = htonl(tmp - 1);
1242			if (oan->ls_id == oan->r.prefix.s_addr) {
1243				log_warnx("prefix %s/%d can not be "
1244				    "redistributed, no unique ls_id found.",
1245				    inet_ntoa(kr->prefix), kr->prefixlen);
1246				RB_REMOVE(asext_tree, &ast, an);
1247				free(an);
1248				return;
1249			}
1250		}
1251		mask = prefixlen2mask(oan->r.prefixlen);
1252		v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id,
1253		    rdeconf->rtr_id.s_addr);
1254	}
1255
1256	v = lsa_find(NULL, LSA_TYPE_EXTERNAL, an->ls_id,
1257	    rdeconf->rtr_id.s_addr);
1258	lsa = orig_asext_lsa(kr, an->ls_id, DEFAULT_AGE);
1259	lsa_merge(nbrself, lsa, v);
1260
1261	if (oan != an) {
1262		v = lsa_find(NULL, LSA_TYPE_EXTERNAL, oan->ls_id,
1263		    rdeconf->rtr_id.s_addr);
1264		lsa = orig_asext_lsa(&oan->r, oan->ls_id, DEFAULT_AGE);
1265		lsa_merge(nbrself, lsa, v);
1266	}
1267}
1268
1269void
1270rde_asext_put(struct kroute *kr)
1271{
1272	struct asext_node	*an;
1273	struct vertex		*v;
1274	struct lsa		*lsa;
1275
1276	/*
1277	 * just try to remove the LSA. If the prefix is announced as
1278	 * stub net LSA asext_find() will fail and nothing will happen.
1279	 */
1280	an = asext_find(kr->prefix.s_addr, kr->prefixlen);
1281	if (an == NULL) {
1282		log_debug("rde_asext_put: NO SUCH LSA %s/%d",
1283		    inet_ntoa(kr->prefix), kr->prefixlen);
1284		return;
1285	}
1286
1287	/* inherit metric and ext_tag from the current LSA,
1288	 * some routers don't like to get withdraws that are
1289	 * different from what they have in their table.
1290	 */
1291	v = lsa_find(NULL, LSA_TYPE_EXTERNAL, an->ls_id,
1292	    rdeconf->rtr_id.s_addr);
1293	if (v != NULL) {
1294		kr->metric = ntohl(v->lsa->data.asext.metric);
1295		kr->ext_tag = ntohl(v->lsa->data.asext.ext_tag);
1296	}
1297
1298	/* remove by reflooding with MAX_AGE */
1299	lsa = orig_asext_lsa(kr, an->ls_id, MAX_AGE);
1300	lsa_merge(nbrself, lsa, v);
1301
1302	RB_REMOVE(asext_tree, &ast, an);
1303	free(an);
1304}
1305
1306void
1307rde_asext_free(void)
1308{
1309	struct asext_node	*an, *nan;
1310
1311	for (an = RB_MIN(asext_tree, &ast); an != NULL; an = nan) {
1312		nan = RB_NEXT(asext_tree, &ast, an);
1313		RB_REMOVE(asext_tree, &ast, an);
1314		free(an);
1315	}
1316}
1317
1318struct lsa *
1319orig_asext_lsa(struct kroute *kr, u_int32_t ls_id, u_int16_t age)
1320{
1321	struct lsa	*lsa;
1322	struct iface	*iface;
1323	u_int16_t	 len;
1324
1325	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext);
1326	if ((lsa = calloc(1, len)) == NULL)
1327		fatal("orig_asext_lsa");
1328
1329	log_debug("orig_asext_lsa: %s/%d age %d",
1330	    inet_ntoa(kr->prefix), kr->prefixlen, age);
1331
1332	/* LSA header */
1333	lsa->hdr.age = htons(age);
1334	lsa->hdr.opts = area_ospf_options(NULL);
1335	lsa->hdr.type = LSA_TYPE_EXTERNAL;
1336	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1337	/* update of seqnum is done by lsa_merge */
1338	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1339	lsa->hdr.len = htons(len);
1340
1341	/* prefix and mask */
1342	lsa->hdr.ls_id = ls_id;
1343	lsa->data.asext.mask = prefixlen2mask(kr->prefixlen);
1344
1345	/*
1346	 * nexthop -- on connected routes we are the nexthop,
1347	 * in other cases we may announce the true nexthop if the
1348	 * nexthop is reachable via an OSPF enabled interface but only
1349	 * broadcast & NBMA interfaces are considered in that case.
1350	 * It does not make sense to announce the nexthop of a point-to-point
1351	 * link since the traffic has to go through this box anyway.
1352	 * Some implementations actually check that there are multiple
1353	 * neighbors on the particular segment, we skip that check.
1354	 */
1355	iface = rde_asext_lookup(kr->nexthop.s_addr, -1);
1356	if (kr->flags & F_CONNECTED)
1357		lsa->data.asext.fw_addr = 0;
1358	else if (iface && (iface->type == IF_TYPE_BROADCAST ||
1359	    iface->type == IF_TYPE_NBMA))
1360		lsa->data.asext.fw_addr = kr->nexthop.s_addr;
1361	else
1362		lsa->data.asext.fw_addr = 0;
1363
1364	lsa->data.asext.metric = htonl(kr->metric);
1365	lsa->data.asext.ext_tag = htonl(kr->ext_tag);
1366
1367	lsa->hdr.ls_chksum = 0;
1368	lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1369
1370	return (lsa);
1371}
1372
1373/*
1374 * summary LSA stuff
1375 */
1376void
1377rde_summary_update(struct rt_node *rte, struct area *area)
1378{
1379	struct rt_nexthop	*rn;
1380	struct rt_node		*nr;
1381	struct vertex		*v = NULL;
1382	struct lsa		*lsa;
1383	u_int8_t		 type = 0;
1384
1385	/* first check if we actually need to announce this route */
1386	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1387		return;
1388	/* route is invalid, lsa_remove_invalid_sums() will do the cleanup */
1389	if (rte->cost >= LS_INFINITY)
1390		return;
1391	/* never create summaries for as-ext LSA */
1392	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1393		return;
1394	/* no need for summary LSA in the originating area */
1395	if (rte->area.s_addr == area->id.s_addr)
1396		return;
1397	/* no need to originate inter-area routes to the backbone */
1398	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1399		return;
1400	/* nexthop check, nexthop part of area -> no summary */
1401	TAILQ_FOREACH(rn, &rte->nexthop, entry) {
1402		if (rn->invalid)
1403			continue;
1404		nr = rt_lookup(DT_NET, rn->nexthop.s_addr);
1405		if (nr && nr->area.s_addr == area->id.s_addr)
1406			continue;
1407		break;
1408	}
1409	if (rn == NULL)
1410		/* all nexthops belong to this area or are invalid */
1411		return;
1412
1413	/* TODO AS border router specific checks */
1414	/* TODO inter-area network route stuff */
1415	/* TODO intra-area stuff -- condense LSA ??? */
1416
1417	if (rte->d_type == DT_NET) {
1418		type = LSA_TYPE_SUM_NETWORK;
1419	} else if (rte->d_type == DT_RTR) {
1420		if (area->stub)
1421			/* do not redistribute type 4 LSA into stub areas */
1422			return;
1423		type = LSA_TYPE_SUM_ROUTER;
1424	} else
1425		fatalx("rde_summary_update: unknown route type");
1426
1427	/* update lsa but only if it was changed */
1428	v = lsa_find_area(area, type, rte->prefix.s_addr, rde_router_id());
1429	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1430	lsa_merge(rde_nbr_self(area), lsa, v);
1431
1432	if (v == NULL)
1433		v = lsa_find_area(area, type, rte->prefix.s_addr,
1434		    rde_router_id());
1435
1436	/* suppressed/deleted routes are not found in the second lsa_find */
1437	if (v)
1438		v->cost = rte->cost;
1439}
1440
1441struct lsa *
1442orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1443{
1444	struct lsa	*lsa;
1445	u_int16_t	 len;
1446
1447	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1448	if ((lsa = calloc(1, len)) == NULL)
1449		fatal("orig_sum_lsa");
1450
1451	/* LSA header */
1452	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1453	lsa->hdr.opts = area_ospf_options(area);
1454	lsa->hdr.type = type;
1455	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1456	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1457	lsa->hdr.len = htons(len);
1458
1459	/* prefix and mask */
1460	/*
1461	 * TODO ls_id must be unique, for overlapping routes this may
1462	 * not be true. In this case a hack needs to be done to
1463	 * make the ls_id unique.
1464	 */
1465	lsa->hdr.ls_id = rte->prefix.s_addr;
1466	if (type == LSA_TYPE_SUM_NETWORK)
1467		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1468	else
1469		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1470
1471	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1472
1473	lsa->hdr.ls_chksum = 0;
1474	lsa->hdr.ls_chksum =
1475	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1476
1477	return (lsa);
1478}
1479