rde.c revision 1.3
1/* $OpenBSD: rde.c,v 1.3 2007/01/08 13:01:10 claudio Exp $ */ 2 3/* 4 * Copyright (c) 2006 Michele Marchetto <mydecay@openbeer.it> 5 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org> 6 * Copyright (c) 2004 Esben Norby <norby@openbsd.org> 7 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org> 8 * 9 * Permission to use, copy, modify, and distribute this software for any 10 * purpose with or without fee is hereby granted, provided that the above 11 * copyright notice and this permission notice appear in all copies. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22#include <sys/types.h> 23#include <sys/socket.h> 24#include <sys/queue.h> 25#include <netinet/in.h> 26#include <arpa/inet.h> 27#include <err.h> 28#include <errno.h> 29#include <stdlib.h> 30#include <signal.h> 31#include <string.h> 32#include <pwd.h> 33#include <unistd.h> 34#include <event.h> 35 36#include "ripd.h" 37#include "rip.h" 38#include "ripe.h" 39#include "log.h" 40#include "rde.h" 41 42struct ripd_conf *rdeconf = NULL; 43struct imsgbuf *ibuf_ripe; 44struct imsgbuf *ibuf_main; 45 46void rde_sig_handler(int, short, void *); 47void rde_shutdown(void); 48void rde_dispatch_imsg(int, short, void *); 49void rde_dispatch_parent(int, short, void *); 50int rde_imsg_compose_parent(int, pid_t, void *, u_int16_t); 51int rde_imsg_compose_ripe(int, u_int32_t, pid_t, void *, u_int16_t); 52int rde_check_route(struct rip_route *); 53void triggered_update(struct rt_node *); 54 55/* ARGSUSED */ 56void 57rde_sig_handler(int sig, short event, void *arg) 58{ 59 /* 60 * signal handler rules don't apply, libevent decouples for us 61 */ 62 63 switch (sig) { 64 case SIGINT: 65 case SIGTERM: 66 rde_shutdown(); 67 /* NOTREACHED */ 68 default: 69 fatalx("unexpected signal"); 70 } 71} 72 73/* route decision engine */ 74pid_t 75rde(struct ripd_conf *xconf, int pipe_parent2rde[2], int pipe_ripe2rde[2], 76 int pipe_parent2ripe[2]) 77{ 78 struct event ev_sigint, ev_sigterm; 79 struct passwd *pw; 80 struct redistribute *r; 81 pid_t pid; 82 83 switch (pid = fork()) { 84 case -1: 85 fatal("cannot fork"); 86 /* NOTREACHED */ 87 case 0: 88 break; 89 default: 90 return (pid); 91 } 92 93 rdeconf = xconf; 94 95 if ((pw = getpwnam(RIPD_USER)) == NULL) 96 fatal("getpwnam"); 97 98 if (chroot(pw->pw_dir) == -1) 99 fatal("chroot"); 100 if (chdir("/") == -1) 101 fatal("chdir(\"/\")"); 102 103 setproctitle("route decision engine"); 104 ripd_process = PROC_RDE_ENGINE; 105 106 if (setgroups(1, &pw->pw_gid) || 107 setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || 108 setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) 109 fatal("can't drop privileges"); 110 111 event_init(); 112 113 /* setup signal handler */ 114 signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL); 115 signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL); 116 signal_add(&ev_sigint, NULL); 117 signal_add(&ev_sigterm, NULL); 118 119 /* setup pipes */ 120 close(pipe_ripe2rde[0]); 121 close(pipe_parent2rde[0]); 122 close(pipe_parent2ripe[0]); 123 close(pipe_parent2ripe[1]); 124 125 if ((ibuf_ripe = malloc(sizeof(struct imsgbuf))) == NULL || 126 (ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL) 127 fatal(NULL); 128 imsg_init(ibuf_ripe, pipe_ripe2rde[1], rde_dispatch_imsg); 129 imsg_init(ibuf_main, pipe_parent2rde[1], rde_dispatch_parent); 130 131 /* setup event handler */ 132 ibuf_ripe->events = EV_READ; 133 event_set(&ibuf_ripe->ev, ibuf_ripe->fd, ibuf_ripe->events, 134 ibuf_ripe->handler, ibuf_ripe); 135 event_add(&ibuf_ripe->ev, NULL); 136 137 ibuf_main->events = EV_READ; 138 event_set(&ibuf_main->ev, ibuf_main->fd, ibuf_main->events, 139 ibuf_main->handler, ibuf_main); 140 event_add(&ibuf_main->ev, NULL); 141 rt_init(); 142 143 /* remove unneeded config stuff */ 144 while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) { 145 SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry); 146 free(r); 147 } 148 149 event_dispatch(); 150 151 rde_shutdown(); 152 /* NOTREACHED */ 153 154 return (0); 155} 156 157void 158rde_shutdown(void) 159{ 160 rt_clear(); 161 162 msgbuf_clear(&ibuf_ripe->w); 163 free(ibuf_ripe); 164 msgbuf_clear(&ibuf_main->w); 165 free(ibuf_main); 166 free(rdeconf); 167 168 log_info("route decision engine exiting"); 169 _exit(0); 170} 171 172/* imesg */ 173int 174rde_imsg_compose_parent(int type, pid_t pid, void *data, u_int16_t datalen) 175{ 176 return (imsg_compose(ibuf_main, type, 0, pid, data, datalen)); 177} 178 179int 180rde_imsg_compose_ripe(int type, u_int32_t peerid, pid_t pid, void *data, 181 u_int16_t datalen) 182{ 183 return (imsg_compose(ibuf_ripe, type, peerid, pid, data, datalen)); 184} 185 186/* ARGSUSED */ 187void 188rde_dispatch_imsg(int fd, short event, void *bula) 189{ 190 struct imsgbuf *ibuf = bula; 191 struct rip_route rr; 192 struct imsg imsg; 193 int n; 194 195 switch (event) { 196 case EV_READ: 197 if ((n = imsg_read(ibuf)) == -1) 198 fatal("imsg_read error"); 199 if (n == 0) /* connection closed */ 200 fatalx("pipe closed"); 201 break; 202 case EV_WRITE: 203 if (msgbuf_write(&ibuf->w) == -1) 204 fatal("msgbuf_write"); 205 imsg_event_add(ibuf); 206 return; 207 default: 208 fatalx("unknown event"); 209 } 210 211 for (;;) { 212 if ((n = imsg_get(ibuf, &imsg)) == -1) 213 fatal("rde_dispatch_imsg: imsg_read error"); 214 if (n == 0) 215 break; 216 217 switch (imsg.hdr.type) { 218 case IMSG_ROUTE_FEED: 219 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rr)) 220 fatalx("invalid size of RDE request"); 221 222 memcpy(&rr, imsg.data, sizeof(rr)); 223 224 if (rde_check_route(&rr) == -1) 225 log_debug("rde_dispatch_imsg: " 226 "packet malformed\n"); 227 break; 228 case IMSG_FULL_REQUEST: 229 bzero(&rr, sizeof(rr)); 230 /* AFI == 0 && metric == INFINITY request the 231 whole routing table 232 */ 233 rr.metric = INFINITY; 234 rde_imsg_compose_ripe(IMSG_REQUEST_ADD, 0, 235 0, &rr, sizeof(rr)); 236 rde_imsg_compose_ripe(IMSG_SEND_REQUEST, 0, 237 0, NULL, 0); 238 break; 239 case IMSG_FULL_RESPONSE: 240 rt_snap(imsg.hdr.peerid); 241 rde_imsg_compose_ripe(IMSG_SEND_RESPONSE, 242 imsg.hdr.peerid, 0, NULL, 0); 243 break; 244 case IMSG_ROUTE_REQUEST: 245 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rr)) 246 fatalx("invalid size of RDE request"); 247 248 memcpy(&rr, imsg.data, sizeof(rr)); 249 250 rt_complete(&rr); 251 rde_imsg_compose_ripe(IMSG_RESPONSE_ADD, 252 imsg.hdr.peerid, 0, &rr, sizeof(rr)); 253 254 break; 255 case IMSG_ROUTE_REQUEST_END: 256 rde_imsg_compose_ripe(IMSG_SEND_RESPONSE, 257 imsg.hdr.peerid, 0, NULL, 0); 258 break; 259 case IMSG_CTL_SHOW_RIB: 260 rt_dump(imsg.hdr.pid); 261 262 imsg_compose(ibuf_ripe, IMSG_CTL_END, 0, imsg.hdr.pid, 263 NULL, 0); 264 265 break; 266 default: 267 log_debug("rde_dispatch_msg: unexpected imsg %d", 268 imsg.hdr.type); 269 break; 270 } 271 imsg_free(&imsg); 272 } 273 imsg_event_add(ibuf); 274} 275 276/* ARGSUSED */ 277void 278rde_dispatch_parent(int fd, short event, void *bula) 279{ 280 struct imsg imsg; 281 struct rt_node *rt; 282 struct kroute kr; 283 struct imsgbuf *ibuf = bula; 284 ssize_t n; 285 286 switch (event) { 287 case EV_READ: 288 if ((n = imsg_read(ibuf)) == -1) 289 fatal("imsg_read error"); 290 if (n == 0) /* connection closed */ 291 fatalx("pipe closed"); 292 break; 293 case EV_WRITE: 294 if (msgbuf_write(&ibuf->w) == -1) 295 fatal("msgbuf_write"); 296 imsg_event_add(ibuf); 297 return; 298 default: 299 fatalx("unknown event"); 300 } 301 302 for (;;) { 303 if ((n = imsg_get(ibuf, &imsg)) == -1) 304 fatal("rde_dispatch_parent: imsg_read error"); 305 if (n == 0) 306 break; 307 308 switch (imsg.hdr.type) { 309 case IMSG_NETWORK_ADD: 310 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(kr)) { 311 log_warnx("rde_dispatch: wrong imsg len"); 312 break; 313 } 314 315 memcpy(&kr, imsg.data, sizeof(kr)); 316 317 rt = rt_new_kr(&kr); 318 rt_insert(rt); 319 break; 320 case IMSG_NETWORK_DEL: 321 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(kr)) { 322 log_warnx("rde_dispatch: wrong imsg len"); 323 break; 324 } 325 memcpy(&kr, imsg.data, sizeof(kr)); 326 327 if ((rt = rt_find(kr.prefix.s_addr, 328 kr.netmask.s_addr)) != NULL) 329 rt_remove(rt); 330 break; 331 case IMSG_KROUTE_GET: 332 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(kr)) { 333 log_warnx("rde_dispatch: wrong imsg len"); 334 break; 335 } 336 memcpy(&kr, imsg.data, sizeof(kr)); 337 338 if ((rt = rt_find(kr.prefix.s_addr, 339 kr.netmask.s_addr)) != NULL) 340 rde_send_change_kroute(rt); 341 else 342 /* should not happen */ 343 imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0, 344 0, &kr, sizeof(kr)); 345 346 break; 347 default: 348 log_debug("rde_dispatch_parent: unexpected imsg %d", 349 imsg.hdr.type); 350 break; 351 } 352 imsg_free(&imsg); 353 } 354 imsg_event_add(ibuf); 355} 356 357void 358rde_send_change_kroute(struct rt_node *r) 359{ 360 struct kroute kr; 361 362 bzero(&kr, sizeof(kr)); 363 kr.prefix.s_addr = r->prefix.s_addr; 364 kr.nexthop.s_addr = r->nexthop.s_addr; 365 kr.netmask.s_addr = r->netmask.s_addr; 366 kr.metric = r->metric; 367 kr.flags = r->flags; 368 kr.ifindex = r->ifindex; 369 370 imsg_compose(ibuf_main, IMSG_KROUTE_CHANGE, 0, 0, &kr, sizeof(kr)); 371} 372 373void 374rde_send_delete_kroute(struct rt_node *r) 375{ 376 struct kroute kr; 377 378 bzero(&kr, sizeof(kr)); 379 kr.prefix.s_addr = r->prefix.s_addr; 380 kr.nexthop.s_addr = r->nexthop.s_addr; 381 kr.netmask.s_addr = r->netmask.s_addr; 382 kr.metric = r->metric; 383 kr.flags = r->flags; 384 kr.ifindex = r->ifindex; 385 386 imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0, 0, &kr, sizeof(kr)); 387} 388 389int 390rde_check_route(struct rip_route *e) 391{ 392 struct rt_node *rn; 393 struct iface *iface; 394 int metric; 395 396 if ((e->nexthop.s_addr & htonl(IN_CLASSA_NET)) == 397 htonl(INADDR_LOOPBACK & IN_CLASSA_NET) || 398 (e->nexthop.s_addr == INADDR_ANY)) 399 return (-1); 400 401 if ((e->address.s_addr & htonl(IN_CLASSA_NET)) == 402 htonl(INADDR_LOOPBACK & IN_CLASSA_NET)) 403 return (-1); 404 405 if (e->metric > INFINITY) 406 return (-1); 407 408 if ((iface = if_find_index(e->ifindex)) == NULL) 409 return (-1); 410 411 metric = MIN(INFINITY, e->metric + iface->cost); 412 if (metric >= INFINITY) 413 return (0); 414 415 if ((rn = rt_find(e->address.s_addr, e->mask.s_addr)) == NULL) { 416 rn = rt_new_rr(e, metric); 417 rt_insert(rn); 418 rde_send_change_kroute(rn); 419 route_start_timeout(rn); 420 triggered_update(rn); 421 } else { 422 /* 423 * XXX don't we have to track all incoming routes? 424 * what happens if the kernel route is removed later. 425 */ 426 if (rn->flags & F_KERNEL) 427 return (0); 428 429 if (metric < rn->metric) { 430 rn->metric = metric; 431 rn->nexthop.s_addr = e->nexthop.s_addr; 432 rn->ifindex = e->ifindex; 433 rde_send_change_kroute(rn); 434 triggered_update(rn); 435 } else if (e->nexthop.s_addr == rn->nexthop.s_addr && 436 e->metric > metric) { 437 rn->metric = metric; 438 rde_send_change_kroute(rn); 439 triggered_update(rn); 440 } 441 442 if (e->nexthop.s_addr == rn->nexthop.s_addr) 443 route_reset_timers(rn); 444 } 445 446 return (0); 447} 448 449void 450triggered_update(struct rt_node *rn) 451{ 452 struct rip_route rr; 453 454 rr.address.s_addr = rn->prefix.s_addr; 455 rr.mask.s_addr = rn->netmask.s_addr; 456 rr.nexthop.s_addr = rn->nexthop.s_addr; 457 rr.metric = rn->metric; 458 rr.ifindex = rn->ifindex; 459 460 rde_imsg_compose_ripe(IMSG_SEND_TRIGGERED_UPDATE, 0, 0, &rr, 461 sizeof(struct rip_route)); 462} 463