rde.c revision 1.16
1/* $OpenBSD: rde.c,v 1.16 2009/01/28 22:47:36 stsp Exp $ */ 2 3/* 4 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org> 5 * Copyright (c) 2004 Esben Norby <norby@openbsd.org> 6 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21#include <sys/types.h> 22#include <sys/socket.h> 23#include <sys/queue.h> 24#include <sys/param.h> 25#include <netinet/in.h> 26#include <arpa/inet.h> 27#include <err.h> 28#include <errno.h> 29#include <stdlib.h> 30#include <signal.h> 31#include <string.h> 32#include <pwd.h> 33#include <unistd.h> 34#include <event.h> 35 36#include "ospf6.h" 37#include "ospf6d.h" 38#include "ospfe.h" 39#include "log.h" 40#include "rde.h" 41 42void rde_sig_handler(int sig, short, void *); 43void rde_shutdown(void); 44void rde_dispatch_imsg(int, short, void *); 45void rde_dispatch_parent(int, short, void *); 46void rde_dump_area(struct area *, int, pid_t); 47 48void rde_send_summary(pid_t); 49void rde_send_summary_area(struct area *, pid_t); 50void rde_nbr_init(u_int32_t); 51void rde_nbr_free(void); 52struct rde_nbr *rde_nbr_find(u_int32_t); 53struct rde_nbr *rde_nbr_new(u_int32_t, struct rde_nbr *); 54void rde_nbr_del(struct rde_nbr *); 55 56void rde_req_list_add(struct rde_nbr *, struct lsa_hdr *); 57int rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *); 58void rde_req_list_del(struct rde_nbr *, struct lsa_hdr *); 59void rde_req_list_free(struct rde_nbr *); 60 61struct lsa *rde_asext_get(struct rroute *); 62struct lsa *rde_asext_put(struct rroute *); 63 64struct lsa *orig_asext_lsa(struct rroute *, u_int16_t); 65struct lsa *orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int); 66struct lsa *orig_intra_lsa_net(struct area *, struct iface *); 67 68/* Tree of prefixes with global scope on given a link, 69 * see orig_intra_lsa_*() */ 70struct prefix_node { 71 RB_ENTRY(prefix_node) entry; 72 struct lsa_prefix *prefix; 73}; 74RB_HEAD(prefix_tree, prefix_node); 75RB_PROTOTYPE(prefix_tree, prefix_node, entry, prefix_compare); 76int prefix_compare(struct prefix_node *, struct prefix_node *); 77void prefix_tree_add_net(struct prefix_tree *, struct lsa_link *); 78void append_prefix_lsas(struct lsa **, u_int16_t *, u_int16_t *, 79 struct prefix_tree *); 80 81struct ospfd_conf *rdeconf = NULL, *nconf = NULL; 82struct imsgbuf *ibuf_ospfe; 83struct imsgbuf *ibuf_main; 84struct rde_nbr *nbrself; 85struct lsa_tree asext_tree; 86 87/* ARGSUSED */ 88void 89rde_sig_handler(int sig, short event, void *arg) 90{ 91 /* 92 * signal handler rules don't apply, libevent decouples for us 93 */ 94 95 switch (sig) { 96 case SIGINT: 97 case SIGTERM: 98 rde_shutdown(); 99 /* NOTREACHED */ 100 default: 101 fatalx("unexpected signal"); 102 } 103} 104 105/* route decision engine */ 106pid_t 107rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2], 108 int pipe_parent2ospfe[2]) 109{ 110 struct event ev_sigint, ev_sigterm; 111 struct timeval now; 112 struct passwd *pw; 113 struct redistribute *r; 114 pid_t pid; 115 116 switch (pid = fork()) { 117 case -1: 118 fatal("cannot fork"); 119 /* NOTREACHED */ 120 case 0: 121 break; 122 default: 123 return (pid); 124 } 125 126 rdeconf = xconf; 127 128 if ((pw = getpwnam(OSPF6D_USER)) == NULL) 129 fatal("getpwnam"); 130 131 if (chroot(pw->pw_dir) == -1) 132 fatal("chroot"); 133 if (chdir("/") == -1) 134 fatal("chdir(\"/\")"); 135 136 setproctitle("route decision engine"); 137 ospfd_process = PROC_RDE_ENGINE; 138 139 if (setgroups(1, &pw->pw_gid) || 140 setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || 141 setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) 142 fatal("can't drop privileges"); 143 144 event_init(); 145 rde_nbr_init(NBR_HASHSIZE); 146 lsa_init(&asext_tree); 147 148 /* setup signal handler */ 149 signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL); 150 signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL); 151 signal_add(&ev_sigint, NULL); 152 signal_add(&ev_sigterm, NULL); 153 signal(SIGPIPE, SIG_IGN); 154 signal(SIGHUP, SIG_IGN); 155 156 /* setup pipes */ 157 close(pipe_ospfe2rde[0]); 158 close(pipe_parent2rde[0]); 159 close(pipe_parent2ospfe[0]); 160 close(pipe_parent2ospfe[1]); 161 162 if ((ibuf_ospfe = malloc(sizeof(struct imsgbuf))) == NULL || 163 (ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL) 164 fatal(NULL); 165 imsg_init(ibuf_ospfe, pipe_ospfe2rde[1], rde_dispatch_imsg); 166 imsg_init(ibuf_main, pipe_parent2rde[1], rde_dispatch_parent); 167 168 /* setup event handler */ 169 ibuf_ospfe->events = EV_READ; 170 event_set(&ibuf_ospfe->ev, ibuf_ospfe->fd, ibuf_ospfe->events, 171 ibuf_ospfe->handler, ibuf_ospfe); 172 event_add(&ibuf_ospfe->ev, NULL); 173 174 ibuf_main->events = EV_READ; 175 event_set(&ibuf_main->ev, ibuf_main->fd, ibuf_main->events, 176 ibuf_main->handler, ibuf_main); 177 event_add(&ibuf_main->ev, NULL); 178 179 evtimer_set(&rdeconf->ev, spf_timer, rdeconf); 180 cand_list_init(); 181 rt_init(); 182 183 while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) { 184 SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry); 185 free(r); 186 } 187 188 gettimeofday(&now, NULL); 189 rdeconf->uptime = now.tv_sec; 190 191 event_dispatch(); 192 193 rde_shutdown(); 194 /* NOTREACHED */ 195 196 return (0); 197} 198 199void 200rde_shutdown(void) 201{ 202 struct area *a; 203 204 stop_spf_timer(rdeconf); 205 cand_list_clr(); 206 rt_clear(); 207 208 while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) { 209 LIST_REMOVE(a, entry); 210 area_del(a); 211 } 212 rde_nbr_free(); 213 214 msgbuf_clear(&ibuf_ospfe->w); 215 free(ibuf_ospfe); 216 msgbuf_clear(&ibuf_main->w); 217 free(ibuf_main); 218 free(rdeconf); 219 220 log_info("route decision engine exiting"); 221 _exit(0); 222} 223 224int 225rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data, 226 u_int16_t datalen) 227{ 228 return (imsg_compose(ibuf_ospfe, type, peerid, pid, data, datalen)); 229} 230 231/* ARGSUSED */ 232void 233rde_dispatch_imsg(int fd, short event, void *bula) 234{ 235 struct imsgbuf *ibuf = bula; 236 struct imsg imsg; 237 struct in_addr aid; 238 struct ls_req_hdr req_hdr; 239 struct lsa_hdr lsa_hdr, *db_hdr; 240 struct rde_nbr rn, *nbr; 241 struct timespec tp; 242 struct lsa *lsa; 243 struct area *area; 244 struct vertex *v; 245 char *buf; 246 ssize_t n; 247 time_t now; 248 int r, state, self, shut = 0; 249 u_int16_t l; 250 251 switch (event) { 252 case EV_READ: 253 if ((n = imsg_read(ibuf)) == -1) 254 fatal("imsg_read error"); 255 if (n == 0) /* connection closed */ 256 shut = 1; 257 break; 258 case EV_WRITE: 259 if (msgbuf_write(&ibuf->w) == -1) 260 fatal("msgbuf_write"); 261 imsg_event_add(ibuf); 262 return; 263 default: 264 fatalx("unknown event"); 265 } 266 267 clock_gettime(CLOCK_MONOTONIC, &tp); 268 now = tp.tv_sec; 269 270 for (;;) { 271 if ((n = imsg_get(ibuf, &imsg)) == -1) 272 fatal("rde_dispatch_imsg: imsg_read error"); 273 if (n == 0) 274 break; 275 276 switch (imsg.hdr.type) { 277 case IMSG_NEIGHBOR_UP: 278 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn)) 279 fatalx("invalid size of OE request"); 280 memcpy(&rn, imsg.data, sizeof(rn)); 281 282 if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL) 283 fatalx("rde_dispatch_imsg: " 284 "neighbor already exists"); 285 break; 286 case IMSG_NEIGHBOR_DOWN: 287 rde_nbr_del(rde_nbr_find(imsg.hdr.peerid)); 288 break; 289 case IMSG_NEIGHBOR_CHANGE: 290 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state)) 291 fatalx("invalid size of OE request"); 292 memcpy(&state, imsg.data, sizeof(state)); 293 294 nbr = rde_nbr_find(imsg.hdr.peerid); 295 if (nbr == NULL) 296 break; 297 298 if (state != nbr->state && (nbr->state & NBR_STA_FULL || 299 state & NBR_STA_FULL)) 300 area_track(nbr->area, state); 301 302 nbr->state = state; 303 if (nbr->state & NBR_STA_FULL) 304 rde_req_list_free(nbr); 305 break; 306 case IMSG_DB_SNAPSHOT: 307 nbr = rde_nbr_find(imsg.hdr.peerid); 308 if (nbr == NULL) 309 break; 310 311 lsa_snap(nbr, imsg.hdr.peerid); 312 313 imsg_compose(ibuf_ospfe, IMSG_DB_END, imsg.hdr.peerid, 314 0, NULL, 0); 315 break; 316 case IMSG_DD: 317 nbr = rde_nbr_find(imsg.hdr.peerid); 318 if (nbr == NULL) 319 break; 320 321 buf = imsg.data; 322 for (l = imsg.hdr.len - IMSG_HEADER_SIZE; 323 l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) { 324 memcpy(&lsa_hdr, buf, sizeof(lsa_hdr)); 325 buf += sizeof(lsa_hdr); 326 327 v = lsa_find(nbr->iface, lsa_hdr.type, 328 lsa_hdr.ls_id, lsa_hdr.adv_rtr); 329 if (v == NULL) 330 db_hdr = NULL; 331 else 332 db_hdr = &v->lsa->hdr; 333 334 if (lsa_newer(&lsa_hdr, db_hdr) > 0) { 335 /* 336 * only request LSAs that are 337 * newer or missing 338 */ 339 rde_req_list_add(nbr, &lsa_hdr); 340 imsg_compose(ibuf_ospfe, IMSG_DD, 341 imsg.hdr.peerid, 0, &lsa_hdr, 342 sizeof(lsa_hdr)); 343 } 344 } 345 if (l != 0) 346 log_warnx("rde_dispatch_imsg: peerid %lu, " 347 "trailing garbage in Database Description " 348 "packet", imsg.hdr.peerid); 349 350 imsg_compose(ibuf_ospfe, IMSG_DD_END, imsg.hdr.peerid, 351 0, NULL, 0); 352 break; 353 case IMSG_LS_REQ: 354 nbr = rde_nbr_find(imsg.hdr.peerid); 355 if (nbr == NULL) 356 break; 357 358 buf = imsg.data; 359 for (l = imsg.hdr.len - IMSG_HEADER_SIZE; 360 l >= sizeof(req_hdr); l -= sizeof(req_hdr)) { 361 memcpy(&req_hdr, buf, sizeof(req_hdr)); 362 buf += sizeof(req_hdr); 363 364 if ((v = lsa_find(nbr->iface, 365 req_hdr.type, req_hdr.ls_id, 366 req_hdr.adv_rtr)) == NULL) { 367 imsg_compose(ibuf_ospfe, IMSG_LS_BADREQ, 368 imsg.hdr.peerid, 0, NULL, 0); 369 continue; 370 } 371 imsg_compose(ibuf_ospfe, IMSG_LS_UPD, 372 imsg.hdr.peerid, 0, v->lsa, 373 ntohs(v->lsa->hdr.len)); 374 } 375 if (l != 0) 376 log_warnx("rde_dispatch_imsg: peerid %lu, " 377 "trailing garbage in LS Request " 378 "packet", imsg.hdr.peerid); 379 break; 380 case IMSG_LS_UPD: 381 nbr = rde_nbr_find(imsg.hdr.peerid); 382 if (nbr == NULL) 383 break; 384 385 lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE); 386 if (lsa == NULL) 387 fatal(NULL); 388 memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE); 389 390 if (!lsa_check(nbr, lsa, 391 imsg.hdr.len - IMSG_HEADER_SIZE)) { 392 free(lsa); 393 break; 394 } 395 396 v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id, 397 lsa->hdr.adv_rtr); 398 if (v == NULL) 399 db_hdr = NULL; 400 else 401 db_hdr = &v->lsa->hdr; 402 403 if (nbr->self) { 404 lsa_merge(nbr, lsa, v); 405 /* lsa_merge frees the right lsa */ 406 407 if (lsa->hdr.type == htons(LSA_TYPE_NETWORK)) { 408 struct lsa *intra; 409 intra = orig_intra_lsa_net(nbr->area, 410 nbr->iface); 411 if (intra) 412 lsa_merge(nbr, intra, NULL); 413 } 414 415 break; 416 } 417 418 r = lsa_newer(&lsa->hdr, db_hdr); 419 if (r > 0) { 420 /* new LSA newer than DB */ 421 if (v && v->flooded && 422 v->changed + MIN_LS_ARRIVAL >= now) { 423 free(lsa); 424 break; 425 } 426 427 rde_req_list_del(nbr, &lsa->hdr); 428 429 if (!(self = lsa_self(nbr, lsa, v))) 430 if (lsa_add(nbr, lsa)) 431 /* delayed lsa */ 432 break; 433 434 /* flood and perhaps ack LSA */ 435 imsg_compose(ibuf_ospfe, IMSG_LS_FLOOD, 436 imsg.hdr.peerid, 0, lsa, 437 ntohs(lsa->hdr.len)); 438 439 /* reflood self originated LSA */ 440 if (self && v) 441 imsg_compose(ibuf_ospfe, IMSG_LS_FLOOD, 442 v->peerid, 0, v->lsa, 443 ntohs(v->lsa->hdr.len)); 444 /* lsa not added so free it */ 445 if (self) 446 free(lsa); 447 } else if (r < 0) { 448 /* lsa no longer needed */ 449 free(lsa); 450 451 /* 452 * point 6 of "The Flooding Procedure" 453 * We are violating the RFC here because 454 * it does not make sense to reset a session 455 * because an equal LSA is already in the table. 456 * Only if the LSA sent is older than the one 457 * in the table we should reset the session. 458 */ 459 if (rde_req_list_exists(nbr, &lsa->hdr)) { 460 imsg_compose(ibuf_ospfe, IMSG_LS_BADREQ, 461 imsg.hdr.peerid, 0, NULL, 0); 462 break; 463 } 464 465 /* new LSA older than DB */ 466 if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM && 467 ntohs(db_hdr->age) == MAX_AGE) 468 /* seq-num wrap */ 469 break; 470 471 if (v->changed + MIN_LS_ARRIVAL >= now) 472 break; 473 474 /* directly send current LSA, no ack */ 475 imsg_compose(ibuf_ospfe, IMSG_LS_UPD, 476 imsg.hdr.peerid, 0, v->lsa, 477 ntohs(v->lsa->hdr.len)); 478 } else { 479 /* LSA equal send direct ack */ 480 imsg_compose(ibuf_ospfe, IMSG_LS_ACK, 481 imsg.hdr.peerid, 0, &lsa->hdr, 482 sizeof(lsa->hdr)); 483 free(lsa); 484 } 485 break; 486 case IMSG_LS_MAXAGE: 487 nbr = rde_nbr_find(imsg.hdr.peerid); 488 if (nbr == NULL) 489 break; 490 491 if (imsg.hdr.len != IMSG_HEADER_SIZE + 492 sizeof(struct lsa_hdr)) 493 fatalx("invalid size of OE request"); 494 memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr)); 495 496 if (rde_nbr_loading(nbr->area)) 497 break; 498 499 v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id, 500 lsa_hdr.adv_rtr); 501 if (v == NULL) 502 db_hdr = NULL; 503 else 504 db_hdr = &v->lsa->hdr; 505 506 /* 507 * only delete LSA if the one in the db is not newer 508 */ 509 if (lsa_newer(db_hdr, &lsa_hdr) <= 0) 510 lsa_del(nbr, &lsa_hdr); 511 break; 512 case IMSG_CTL_SHOW_DATABASE: 513 case IMSG_CTL_SHOW_DB_EXT: 514 case IMSG_CTL_SHOW_DB_LINK: 515 case IMSG_CTL_SHOW_DB_NET: 516 case IMSG_CTL_SHOW_DB_RTR: 517 case IMSG_CTL_SHOW_DB_INTRA: 518 case IMSG_CTL_SHOW_DB_SELF: 519 case IMSG_CTL_SHOW_DB_SUM: 520 case IMSG_CTL_SHOW_DB_ASBR: 521 if (imsg.hdr.len != IMSG_HEADER_SIZE && 522 imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) { 523 log_warnx("rde_dispatch_imsg: wrong imsg len"); 524 break; 525 } 526 if (imsg.hdr.len == IMSG_HEADER_SIZE) { 527 LIST_FOREACH(area, &rdeconf->area_list, entry) { 528 rde_dump_area(area, imsg.hdr.type, 529 imsg.hdr.pid); 530 } 531 lsa_dump(&asext_tree, imsg.hdr.type, 532 imsg.hdr.pid); 533 } else { 534 memcpy(&aid, imsg.data, sizeof(aid)); 535 if ((area = area_find(rdeconf, aid)) != NULL) { 536 rde_dump_area(area, imsg.hdr.type, 537 imsg.hdr.pid); 538 if (!area->stub) 539 lsa_dump(&asext_tree, 540 imsg.hdr.type, 541 imsg.hdr.pid); 542 } 543 } 544 imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid, 545 NULL, 0); 546 break; 547 case IMSG_CTL_SHOW_RIB: 548 LIST_FOREACH(area, &rdeconf->area_list, entry) { 549 imsg_compose(ibuf_ospfe, IMSG_CTL_AREA, 550 0, imsg.hdr.pid, area, sizeof(*area)); 551 552 rt_dump(area->id, imsg.hdr.pid, RIB_RTR); 553 rt_dump(area->id, imsg.hdr.pid, RIB_NET); 554 } 555 aid.s_addr = 0; 556 rt_dump(aid, imsg.hdr.pid, RIB_EXT); 557 558 imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid, 559 NULL, 0); 560 break; 561 case IMSG_CTL_SHOW_SUM: 562 rde_send_summary(imsg.hdr.pid); 563 LIST_FOREACH(area, &rdeconf->area_list, entry) 564 rde_send_summary_area(area, imsg.hdr.pid); 565 imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid, 566 NULL, 0); 567 break; 568 default: 569 log_debug("rde_dispatch_imsg: unexpected imsg %d", 570 imsg.hdr.type); 571 break; 572 } 573 imsg_free(&imsg); 574 } 575 if (!shut) 576 imsg_event_add(ibuf); 577 else { 578 /* this pipe is dead, so remove the event handler */ 579 event_del(&ibuf->ev); 580 event_loopexit(NULL); 581 } 582} 583 584/* ARGSUSED */ 585void 586rde_dispatch_parent(int fd, short event, void *bula) 587{ 588 static struct area *narea; 589 struct iface *niface, *iface; 590 struct imsg imsg; 591 struct kroute kr; 592 struct rroute rr; 593 struct imsgbuf *ibuf = bula; 594 struct lsa *lsa; 595 struct vertex *v; 596 struct rt_node *rn; 597 ssize_t n; 598 int shut = 0; 599 unsigned int ifindex; 600 601 switch (event) { 602 case EV_READ: 603 if ((n = imsg_read(ibuf)) == -1) 604 fatal("imsg_read error"); 605 if (n == 0) /* connection closed */ 606 shut = 1; 607 break; 608 case EV_WRITE: 609 if (msgbuf_write(&ibuf->w) == -1) 610 fatal("msgbuf_write"); 611 imsg_event_add(ibuf); 612 return; 613 default: 614 fatalx("unknown event"); 615 } 616 617 for (;;) { 618 if ((n = imsg_get(ibuf, &imsg)) == -1) 619 fatal("rde_dispatch_parent: imsg_read error"); 620 if (n == 0) 621 break; 622 623 switch (imsg.hdr.type) { 624 case IMSG_NETWORK_ADD: 625 if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) { 626 log_warnx("rde_dispatch_parent: " 627 "wrong imsg len"); 628 break; 629 } 630 memcpy(&rr, imsg.data, sizeof(rr)); 631 632 if ((lsa = rde_asext_get(&rr)) != NULL) { 633 v = lsa_find(NULL, lsa->hdr.type, 634 lsa->hdr.ls_id, lsa->hdr.adv_rtr); 635 636 lsa_merge(nbrself, lsa, v); 637 } 638 break; 639 case IMSG_NETWORK_DEL: 640 if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) { 641 log_warnx("rde_dispatch_parent: " 642 "wrong imsg len"); 643 break; 644 } 645 memcpy(&rr, imsg.data, sizeof(rr)); 646 647 if ((lsa = rde_asext_put(&rr)) != NULL) { 648 v = lsa_find(NULL, lsa->hdr.type, 649 lsa->hdr.ls_id, lsa->hdr.adv_rtr); 650 651 /* 652 * if v == NULL no LSA is in the table and 653 * nothing has to be done. 654 */ 655 if (v) 656 lsa_merge(nbrself, lsa, v); 657 } 658 break; 659 case IMSG_KROUTE_GET: 660 if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) { 661 log_warnx("rde_dispatch_parent: " 662 "wrong imsg len"); 663 break; 664 } 665 memcpy(&kr, imsg.data, sizeof(kr)); 666 667 if ((rn = rt_find(&kr.prefix, kr.prefixlen, 668 DT_NET)) != NULL) 669 rde_send_change_kroute(rn); 670 else 671 /* should not happen */ 672 imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0, 673 0, &kr, sizeof(kr)); 674 break; 675 case IMSG_IFADD: 676 if ((niface = malloc(sizeof(struct iface))) == NULL) 677 fatal(NULL); 678 memcpy(niface, imsg.data, sizeof(struct iface)); 679 680 LIST_INIT(&niface->nbr_list); 681 TAILQ_INIT(&niface->ls_ack_list); 682 RB_INIT(&niface->lsa_tree); 683 684 narea = area_find(rdeconf, niface->area_id); 685 LIST_INSERT_HEAD(&narea->iface_list, niface, entry); 686 break; 687 case IMSG_IFDELETE: 688 if (imsg.hdr.len != IMSG_HEADER_SIZE + 689 sizeof(ifindex)) 690 fatalx("IFINFO imsg with wrong len"); 691 692 memcpy(&ifindex, imsg.data, sizeof(ifindex)); 693 iface = if_find(ifindex); 694 if (iface == NULL) 695 fatalx("interface lost in ospfe"); 696 697 LIST_REMOVE(iface, entry); 698 if_del(iface); 699 break; 700 case IMSG_RECONF_CONF: 701 if ((nconf = malloc(sizeof(struct ospfd_conf))) == 702 NULL) 703 fatal(NULL); 704 memcpy(nconf, imsg.data, sizeof(struct ospfd_conf)); 705 706 LIST_INIT(&nconf->area_list); 707 LIST_INIT(&nconf->cand_list); 708 break; 709 case IMSG_RECONF_AREA: 710 if ((narea = area_new()) == NULL) 711 fatal(NULL); 712 memcpy(narea, imsg.data, sizeof(struct area)); 713 714 LIST_INIT(&narea->iface_list); 715 LIST_INIT(&narea->nbr_list); 716 RB_INIT(&narea->lsa_tree); 717 718 LIST_INSERT_HEAD(&nconf->area_list, narea, entry); 719 break; 720 case IMSG_RECONF_END: 721 merge_config(rdeconf, nconf); 722 nconf = NULL; 723 break; 724 default: 725 log_debug("rde_dispatch_parent: unexpected imsg %d", 726 imsg.hdr.type); 727 break; 728 } 729 imsg_free(&imsg); 730 } 731 if (!shut) 732 imsg_event_add(ibuf); 733 else { 734 /* this pipe is dead, so remove the event handler */ 735 event_del(&ibuf->ev); 736 event_loopexit(NULL); 737 } 738} 739 740void 741rde_dump_area(struct area *area, int imsg_type, pid_t pid) 742{ 743 struct iface *iface; 744 745 /* dump header */ 746 imsg_compose(ibuf_ospfe, IMSG_CTL_AREA, 0, pid, area, sizeof(*area)); 747 748 /* dump link local lsa */ 749 LIST_FOREACH(iface, &area->iface_list, entry) { 750 imsg_compose(ibuf_ospfe, IMSG_CTL_IFACE, 751 0, pid, iface, sizeof(*iface)); 752 lsa_dump(&iface->lsa_tree, imsg_type, pid); 753 } 754 755 /* dump area lsa */ 756 lsa_dump(&area->lsa_tree, imsg_type, pid); 757} 758 759u_int32_t 760rde_router_id(void) 761{ 762 return (rdeconf->rtr_id.s_addr); 763} 764 765void 766rde_send_change_kroute(struct rt_node *r) 767{ 768 struct kroute kr; 769 struct rt_nexthop *rn; 770 771 TAILQ_FOREACH(rn, &r->nexthop, entry) { 772 if (!rn->invalid) 773 break; 774 } 775 if (!rn) 776 fatalx("rde_send_change_kroute: no valid nexthop found"); 777 778 bzero(&kr, sizeof(kr)); 779 kr.prefix = r->prefix; 780 kr.nexthop = rn->nexthop; 781 kr.prefixlen = r->prefixlen; 782 kr.ext_tag = r->ext_tag; 783 784 imsg_compose(ibuf_main, IMSG_KROUTE_CHANGE, 0, 0, &kr, sizeof(kr)); 785} 786 787void 788rde_send_delete_kroute(struct rt_node *r) 789{ 790 struct kroute kr; 791 792 bzero(&kr, sizeof(kr)); 793 kr.prefix = r->prefix; 794 kr.prefixlen = r->prefixlen; 795 796 imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0, 0, &kr, sizeof(kr)); 797} 798 799void 800rde_send_summary(pid_t pid) 801{ 802 static struct ctl_sum sumctl; 803 struct timeval now; 804 struct area *area; 805 struct vertex *v; 806 807 bzero(&sumctl, sizeof(struct ctl_sum)); 808 809 sumctl.rtr_id.s_addr = rde_router_id(); 810 sumctl.spf_delay = rdeconf->spf_delay; 811 sumctl.spf_hold_time = rdeconf->spf_hold_time; 812 813 LIST_FOREACH(area, &rdeconf->area_list, entry) 814 sumctl.num_area++; 815 816 RB_FOREACH(v, lsa_tree, &asext_tree) 817 sumctl.num_ext_lsa++; 818 819 gettimeofday(&now, NULL); 820 if (rdeconf->uptime < now.tv_sec) 821 sumctl.uptime = now.tv_sec - rdeconf->uptime; 822 else 823 sumctl.uptime = 0; 824 825 rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl, 826 sizeof(sumctl)); 827} 828 829void 830rde_send_summary_area(struct area *area, pid_t pid) 831{ 832 static struct ctl_sum_area sumareactl; 833 struct iface *iface; 834 struct rde_nbr *nbr; 835 struct lsa_tree *tree = &area->lsa_tree; 836 struct vertex *v; 837 838 bzero(&sumareactl, sizeof(struct ctl_sum_area)); 839 840 sumareactl.area.s_addr = area->id.s_addr; 841 sumareactl.num_spf_calc = area->num_spf_calc; 842 843 LIST_FOREACH(iface, &area->iface_list, entry) 844 sumareactl.num_iface++; 845 846 LIST_FOREACH(nbr, &area->nbr_list, entry) 847 if (nbr->state == NBR_STA_FULL && !nbr->self) 848 sumareactl.num_adj_nbr++; 849 850 RB_FOREACH(v, lsa_tree, tree) 851 sumareactl.num_lsa++; 852 853 rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl, 854 sizeof(sumareactl)); 855} 856 857LIST_HEAD(rde_nbr_head, rde_nbr); 858 859struct nbr_table { 860 struct rde_nbr_head *hashtbl; 861 u_int32_t hashmask; 862} rdenbrtable; 863 864#define RDE_NBR_HASH(x) \ 865 &rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask] 866 867void 868rde_nbr_init(u_int32_t hashsize) 869{ 870 struct rde_nbr_head *head; 871 u_int32_t hs, i; 872 873 for (hs = 1; hs < hashsize; hs <<= 1) 874 ; 875 rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head)); 876 if (rdenbrtable.hashtbl == NULL) 877 fatal("rde_nbr_init"); 878 879 for (i = 0; i < hs; i++) 880 LIST_INIT(&rdenbrtable.hashtbl[i]); 881 882 rdenbrtable.hashmask = hs - 1; 883 884 if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL) 885 fatal("rde_nbr_init"); 886 887 nbrself->id.s_addr = rde_router_id(); 888 nbrself->peerid = NBR_IDSELF; 889 nbrself->state = NBR_STA_DOWN; 890 nbrself->self = 1; 891 head = RDE_NBR_HASH(NBR_IDSELF); 892 LIST_INSERT_HEAD(head, nbrself, hash); 893} 894 895void 896rde_nbr_free(void) 897{ 898 free(nbrself); 899 free(rdenbrtable.hashtbl); 900} 901 902struct rde_nbr * 903rde_nbr_find(u_int32_t peerid) 904{ 905 struct rde_nbr_head *head; 906 struct rde_nbr *nbr; 907 908 head = RDE_NBR_HASH(peerid); 909 910 LIST_FOREACH(nbr, head, hash) { 911 if (nbr->peerid == peerid) 912 return (nbr); 913 } 914 915 return (NULL); 916} 917 918struct rde_nbr * 919rde_nbr_new(u_int32_t peerid, struct rde_nbr *new) 920{ 921 struct rde_nbr_head *head; 922 struct rde_nbr *nbr; 923 struct area *area; 924 struct iface *iface; 925 926 if (rde_nbr_find(peerid)) 927 return (NULL); 928 if ((area = area_find(rdeconf, new->area_id)) == NULL) 929 fatalx("rde_nbr_new: unknown area"); 930 931 LIST_FOREACH(iface, &area->iface_list, entry) { 932 if (iface->ifindex == new->ifindex) 933 break; 934 } 935 if (iface == NULL) 936 fatalx("rde_nbr_new: unknown interface"); 937 938 if ((nbr = calloc(1, sizeof(*nbr))) == NULL) 939 fatal("rde_nbr_new"); 940 941 memcpy(nbr, new, sizeof(*nbr)); 942 nbr->peerid = peerid; 943 nbr->area = area; 944 nbr->iface = iface; 945 946 TAILQ_INIT(&nbr->req_list); 947 948 head = RDE_NBR_HASH(peerid); 949 LIST_INSERT_HEAD(head, nbr, hash); 950 LIST_INSERT_HEAD(&area->nbr_list, nbr, entry); 951 952 return (nbr); 953} 954 955void 956rde_nbr_del(struct rde_nbr *nbr) 957{ 958 if (nbr == NULL) 959 return; 960 961 rde_req_list_free(nbr); 962 963 LIST_REMOVE(nbr, entry); 964 LIST_REMOVE(nbr, hash); 965 966 free(nbr); 967} 968 969int 970rde_nbr_loading(struct area *area) 971{ 972 struct rde_nbr *nbr; 973 int checkall = 0; 974 975 if (area == NULL) { 976 area = LIST_FIRST(&rdeconf->area_list); 977 checkall = 1; 978 } 979 980 while (area != NULL) { 981 LIST_FOREACH(nbr, &area->nbr_list, entry) { 982 if (nbr->self) 983 continue; 984 if (nbr->state & NBR_STA_XCHNG || 985 nbr->state & NBR_STA_LOAD) 986 return (1); 987 } 988 if (!checkall) 989 break; 990 area = LIST_NEXT(area, entry); 991 } 992 993 return (0); 994} 995 996struct rde_nbr * 997rde_nbr_self(struct area *area) 998{ 999 struct rde_nbr *nbr; 1000 1001 LIST_FOREACH(nbr, &area->nbr_list, entry) 1002 if (nbr->self) 1003 return (nbr); 1004 1005 /* this may not happen */ 1006 fatalx("rde_nbr_self: area without self"); 1007 return (NULL); 1008} 1009 1010/* 1011 * LSA req list 1012 */ 1013void 1014rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa) 1015{ 1016 struct rde_req_entry *le; 1017 1018 if ((le = calloc(1, sizeof(*le))) == NULL) 1019 fatal("rde_req_list_add"); 1020 1021 TAILQ_INSERT_TAIL(&nbr->req_list, le, entry); 1022 le->type = lsa->type; 1023 le->ls_id = lsa->ls_id; 1024 le->adv_rtr = lsa->adv_rtr; 1025} 1026 1027int 1028rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr) 1029{ 1030 struct rde_req_entry *le; 1031 1032 TAILQ_FOREACH(le, &nbr->req_list, entry) { 1033 if ((lsa_hdr->type == le->type) && 1034 (lsa_hdr->ls_id == le->ls_id) && 1035 (lsa_hdr->adv_rtr == le->adv_rtr)) 1036 return (1); 1037 } 1038 return (0); 1039} 1040 1041void 1042rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr) 1043{ 1044 struct rde_req_entry *le; 1045 1046 TAILQ_FOREACH(le, &nbr->req_list, entry) { 1047 if ((lsa_hdr->type == le->type) && 1048 (lsa_hdr->ls_id == le->ls_id) && 1049 (lsa_hdr->adv_rtr == le->adv_rtr)) { 1050 TAILQ_REMOVE(&nbr->req_list, le, entry); 1051 free(le); 1052 return; 1053 } 1054 } 1055} 1056 1057void 1058rde_req_list_free(struct rde_nbr *nbr) 1059{ 1060 struct rde_req_entry *le; 1061 1062 while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) { 1063 TAILQ_REMOVE(&nbr->req_list, le, entry); 1064 free(le); 1065 } 1066} 1067 1068/* 1069 * as-external LSA handling 1070 */ 1071struct lsa * 1072rde_asext_get(struct rroute *rr) 1073{ 1074#if 0 1075 struct area *area; 1076 struct iface *iface; 1077XXX 1078 LIST_FOREACH(area, &rdeconf->area_list, entry) 1079 LIST_FOREACH(iface, &area->iface_list, entry) { 1080 if ((iface->addr.s_addr & iface->mask.s_addr) == 1081 rr->kr.prefix.s_addr && iface->mask.s_addr == 1082 prefixlen2mask(rr->kr.prefixlen)) { 1083 /* already announced as (stub) net LSA */ 1084 log_debug("rde_asext_get: %s/%d is net LSA", 1085 inet_ntoa(rr->kr.prefix), rr->kr.prefixlen); 1086 return (NULL); 1087 } 1088 } 1089#endif 1090 /* update of seqnum is done by lsa_merge */ 1091 return (orig_asext_lsa(rr, DEFAULT_AGE)); 1092} 1093 1094struct lsa * 1095rde_asext_put(struct rroute *rr) 1096{ 1097 /* 1098 * just try to remove the LSA. If the prefix is announced as 1099 * stub net LSA lsa_find() will fail later and nothing will happen. 1100 */ 1101 1102 /* remove by reflooding with MAX_AGE */ 1103 return (orig_asext_lsa(rr, MAX_AGE)); 1104} 1105 1106/* 1107 * summary LSA stuff 1108 */ 1109void 1110rde_summary_update(struct rt_node *rte, struct area *area) 1111{ 1112 struct vertex *v = NULL; 1113//XXX struct lsa *lsa; 1114 u_int16_t type = 0; 1115 1116 /* first check if we actually need to announce this route */ 1117 if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E)) 1118 return; 1119 /* never create summaries for as-ext LSA */ 1120 if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT) 1121 return; 1122 /* no need for summary LSA in the originating area */ 1123 if (rte->area.s_addr == area->id.s_addr) 1124 return; 1125 /* no need to originate inter-area routes to the backbone */ 1126 if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY) 1127 return; 1128 /* TODO nexthop check, nexthop part of area -> no summary */ 1129 if (rte->cost >= LS_INFINITY) 1130 return; 1131 /* TODO AS border router specific checks */ 1132 /* TODO inter-area network route stuff */ 1133 /* TODO intra-area stuff -- condense LSA ??? */ 1134 1135 if (rte->d_type == DT_NET) { 1136 type = LSA_TYPE_INTER_A_PREFIX; 1137 } else if (rte->d_type == DT_RTR) { 1138 type = LSA_TYPE_INTER_A_ROUTER; 1139 } else 1140 1141#if 0 /* XXX a lot todo */ 1142 /* update lsa but only if it was changed */ 1143 v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id()); 1144 lsa = orig_sum_lsa(rte, area, type, rte->invalid); 1145 lsa_merge(rde_nbr_self(area), lsa, v); 1146 1147 if (v == NULL) 1148 v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id()); 1149#endif 1150 1151 /* suppressed/deleted routes are not found in the second lsa_find */ 1152 if (v) 1153 v->cost = rte->cost; 1154} 1155 1156/* 1157 * Functions for self-originated LSAs 1158 */ 1159 1160struct lsa * 1161orig_intra_lsa_net(struct area *area, struct iface *iface) 1162{ 1163 struct lsa *lsa; 1164 struct vertex *v; 1165 struct rde_nbr *nbr; 1166 struct prefix_tree tree; 1167 u_int16_t len; 1168 u_int16_t numprefix; 1169 1170 log_debug("orig_intra_lsa_net: area %s", inet_ntoa(area->id)); 1171 1172 len = sizeof(struct lsa_hdr) + sizeof(struct lsa_intra_prefix); 1173 if ((lsa = calloc(1, len)) == NULL) 1174 fatal("orig_intra_lsa_net"); 1175 1176 lsa->data.pref_intra.ref_type = htons(LSA_TYPE_NETWORK); 1177 lsa->data.pref_intra.ref_lsid = htonl(iface->ifindex); 1178 lsa->data.pref_intra.ref_adv_rtr = rdeconf->rtr_id.s_addr; 1179 1180 /* Build an RB tree of all global prefixes contained 1181 * in this interface's link LSAs. This makes it easy 1182 * to eliminate duplicates. */ 1183 RB_INIT(&tree); 1184 RB_FOREACH(v, lsa_tree, &iface->lsa_tree) { 1185 if (v->lsa->hdr.type != htons(LSA_TYPE_LINK)) 1186 continue; 1187 1188 /* Make sure advertising router is adjacent... */ 1189 LIST_FOREACH(nbr, &area->nbr_list, entry) { 1190 if (v->lsa->hdr.adv_rtr == nbr->id.s_addr) 1191 break; 1192 } 1193 if (!nbr) { 1194 fatalx("orig_intra_lsa_net: cannot find neighbor"); 1195 free(lsa); 1196 return (NULL); 1197 } 1198 if (nbr->state < NBR_STA_2_WAY) 1199 continue; 1200 1201 /* ... and that the LSA's link state ID matches 1202 * the neighbour's interface ID. */ 1203 if (ntohl(v->lsa->hdr.ls_id) != nbr->iface_id) 1204 continue; 1205 1206 prefix_tree_add_net(&tree, &v->lsa->data.link); 1207 } 1208 1209 if (RB_EMPTY(&tree)) { 1210 free(lsa); 1211 return NULL; 1212 } 1213 1214 append_prefix_lsas(&lsa, &len, &numprefix, &tree); 1215 if (lsa == NULL) 1216 fatalx("orig_intra_lsa_net: failed to append LSAs"); 1217 1218 lsa->data.pref_intra.numprefix = htons(numprefix); 1219 1220 while (!RB_EMPTY(&tree)) 1221 free(RB_REMOVE(prefix_tree, &tree, RB_ROOT(&tree))); 1222 1223 /* LSA header */ 1224 lsa->hdr.age = htons(DEFAULT_AGE); 1225 lsa->hdr.type = htons(LSA_TYPE_INTRA_A_PREFIX); 1226 lsa->hdr.ls_id = htonl(iface->ifindex); 1227 lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr; 1228 lsa->hdr.seq_num = htonl(INIT_SEQ_NUM); 1229 lsa->hdr.len = htons(len); 1230 lsa->hdr.ls_chksum = htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); 1231 1232 return lsa; 1233} 1234 1235/* Prefix LSAs have variable size. We have to be careful to copy the right 1236 * amount of bytes, and to realloc() the right amount of memory. */ 1237void 1238append_prefix_lsas(struct lsa **lsa, u_int16_t *len, u_int16_t *numprefix, 1239 struct prefix_tree *tree) 1240{ 1241 struct prefix_node *node; 1242 struct lsa_prefix *copy; 1243 unsigned int lsa_prefix_len; 1244 unsigned int new_len; 1245 char *new_lsa; 1246 1247 *numprefix = 0; 1248 1249 RB_FOREACH(node, prefix_tree, tree) { 1250 lsa_prefix_len = sizeof(struct lsa_prefix) 1251 + LSA_PREFIXSIZE(node->prefix->prefixlen); 1252 1253 new_len = *len + lsa_prefix_len; 1254 1255 /* Make sure we have enough space for this prefix. */ 1256 if ((new_lsa = realloc(*lsa, new_len)) == NULL) { 1257 fatalx("append_prefix_lsas"); 1258 free(*lsa); 1259 *lsa = NULL; 1260 *len = 0; 1261 return; 1262 } 1263 1264 /* Append prefix to LSA. */ 1265 copy = (struct lsa_prefix *)(new_lsa + *len); 1266 memcpy(copy, node->prefix, lsa_prefix_len); 1267 copy->metric = 0; 1268 1269 *lsa = (struct lsa *)new_lsa; 1270 *len = new_len; 1271 (*numprefix)++; 1272 } 1273} 1274 1275int 1276prefix_compare(struct prefix_node *a, struct prefix_node *b) 1277{ 1278 struct lsa_prefix *p; 1279 struct lsa_prefix *q; 1280 int i; 1281 int len; 1282 1283 p = a->prefix; 1284 q = b->prefix; 1285 1286 len = MIN(LSA_PREFIXSIZE(p->prefixlen), LSA_PREFIXSIZE(q->prefixlen)); 1287 1288 i = memcmp(p + 1, q + 1, len); 1289 if (i) 1290 return (i); 1291 if (p->prefixlen < q->prefixlen) 1292 return (-1); 1293 if (p->prefixlen > q->prefixlen) 1294 return (1); 1295 return (0); 1296} 1297 1298void 1299prefix_tree_add_net(struct prefix_tree *tree, struct lsa_link *lsa) 1300{ 1301 struct prefix_node *old; 1302 struct prefix_node *new; 1303 struct in6_addr addr; 1304 unsigned int len; 1305 unsigned int i; 1306 char *cur_prefix; 1307 1308 cur_prefix = (char *)(lsa + 1); 1309 1310 for (i = 0; i < ntohl(lsa->numprefix); i++) { 1311 new = calloc(sizeof(*new), 1); 1312 new->prefix = (struct lsa_prefix *)cur_prefix; 1313 1314 len = sizeof(*new->prefix) 1315 + LSA_PREFIXSIZE(new->prefix->prefixlen); 1316 1317 bzero(&addr, sizeof(addr)); 1318 memcpy(&addr, new->prefix + 1, 1319 LSA_PREFIXSIZE(new->prefix->prefixlen)); 1320 1321 if (!(IN6_IS_ADDR_LINKLOCAL(&addr)) 1322 && (new->prefix->options & OSPF_PREFIX_NU) == 0 1323 && (new->prefix->options & OSPF_PREFIX_LA) == 0) { 1324 old = RB_INSERT(prefix_tree, tree, new); 1325 if (old != NULL) { 1326 old->prefix->options |= new->prefix->options; 1327 free(new); 1328 } 1329 } 1330 1331 cur_prefix = cur_prefix + len; 1332 } 1333} 1334 1335RB_GENERATE(prefix_tree, prefix_node, entry, prefix_compare) 1336 1337struct lsa * 1338orig_asext_lsa(struct rroute *rr, u_int16_t age) 1339{ 1340#if 0 /* XXX a lot todo */ 1341 struct lsa *lsa; 1342 u_int16_t len; 1343 1344 len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext); 1345 if ((lsa = calloc(1, len)) == NULL) 1346 fatal("orig_asext_lsa"); 1347 1348 log_debug("orig_asext_lsa: %s/%d age %d", 1349 log_in6addr(&rr->kr.prefix), rr->kr.prefixlen, age); 1350 1351 /* LSA header */ 1352 lsa->hdr.age = htons(age); 1353 lsa->hdr.type = LSA_TYPE_EXTERNAL; 1354 lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr; 1355 lsa->hdr.seq_num = htonl(INIT_SEQ_NUM); 1356 lsa->hdr.len = htons(len); 1357 1358 /* prefix and mask */ 1359 /* 1360 * TODO ls_id must be unique, for overlapping routes this may 1361 * not be true. In this case a hack needs to be done to 1362 * make the ls_id unique. 1363 */ 1364 lsa->hdr.ls_id = rr->kr.prefix.s_addr; 1365 lsa->data.asext.mask = prefixlen2mask(rr->kr.prefixlen); 1366 1367 /* 1368 * nexthop -- on connected routes we are the nexthop, 1369 * on all other cases we announce the true nexthop. 1370 * XXX this is wrong as the true nexthop may be outside 1371 * of the ospf cloud and so unreachable. For now we force 1372 * all traffic to be directed to us. 1373 */ 1374 lsa->data.asext.fw_addr = 0; 1375 1376 lsa->data.asext.metric = htonl(rr->metric); 1377 lsa->data.asext.ext_tag = htonl(rr->kr.ext_tag); 1378 1379 lsa->hdr.ls_chksum = 0; 1380 lsa->hdr.ls_chksum = 1381 htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); 1382 1383 return (lsa); 1384#endif 1385 return NULL; 1386} 1387 1388struct lsa * 1389orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid) 1390{ 1391#if 0 /* XXX a lot todo */ 1392 struct lsa *lsa; 1393 u_int16_t len; 1394 1395 len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum); 1396 if ((lsa = calloc(1, len)) == NULL) 1397 fatal("orig_sum_lsa"); 1398 1399 /* LSA header */ 1400 lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE); 1401 lsa->hdr.type = type; 1402 lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr; 1403 lsa->hdr.seq_num = htonl(INIT_SEQ_NUM); 1404 lsa->hdr.len = htons(len); 1405 1406 /* prefix and mask */ 1407 /* 1408 * TODO ls_id must be unique, for overlapping routes this may 1409 * not be true. In this case a hack needs to be done to 1410 * make the ls_id unique. 1411 */ 1412 lsa->hdr.ls_id = rte->prefix.s_addr; 1413 if (type == LSA_TYPE_SUM_NETWORK) 1414 lsa->data.sum.mask = prefixlen2mask(rte->prefixlen); 1415 else 1416 lsa->data.sum.mask = 0; /* must be zero per RFC */ 1417 1418 lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK); 1419 1420 lsa->hdr.ls_chksum = 0; 1421 lsa->hdr.ls_chksum = 1422 htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET)); 1423 1424 return (lsa); 1425#endif 1426 return NULL; 1427} 1428