if_pfsync.c revision 228811
1/* $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $ */ 2 3/* 4 * Copyright (c) 2002 Michael Shalayeff 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 26 * THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29/* 30 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org> 31 * 32 * Permission to use, copy, modify, and distribute this software for any 33 * purpose with or without fee is hereby granted, provided that the above 34 * copyright notice and this permission notice appear in all copies. 35 * 36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 43 */ 44 45#ifdef __FreeBSD__ 46#include "opt_inet.h" 47#include "opt_inet6.h" 48#include "opt_pf.h" 49 50#include <sys/cdefs.h> 51__FBSDID("$FreeBSD: head/sys/contrib/pf/net/if_pfsync.c 228811 2011-12-22 18:31:47Z glebius $"); 52 53#define NBPFILTER 1 54 55#ifdef DEV_PFSYNC 56#define NPFSYNC DEV_PFSYNC 57#else 58#define NPFSYNC 0 59#endif 60#endif /* __FreeBSD__ */ 61 62#include <sys/param.h> 63#include <sys/kernel.h> 64#ifdef __FreeBSD__ 65#include <sys/bus.h> 66#include <sys/interrupt.h> 67#include <sys/priv.h> 68#endif 69#include <sys/proc.h> 70#include <sys/systm.h> 71#include <sys/time.h> 72#include <sys/mbuf.h> 73#include <sys/socket.h> 74#ifdef __FreeBSD__ 75#include <sys/endian.h> 76#include <sys/malloc.h> 77#include <sys/module.h> 78#include <sys/sockio.h> 79#include <sys/taskqueue.h> 80#include <sys/lock.h> 81#include <sys/mutex.h> 82#else 83#include <sys/ioctl.h> 84#include <sys/timeout.h> 85#endif 86#include <sys/sysctl.h> 87#ifndef __FreeBSD__ 88#include <sys/pool.h> 89#endif 90 91#include <net/if.h> 92#ifdef __FreeBSD__ 93#include <net/if_clone.h> 94#endif 95#include <net/if_types.h> 96#include <net/route.h> 97#include <net/bpf.h> 98#include <net/netisr.h> 99#ifdef __FreeBSD__ 100#include <net/vnet.h> 101#endif 102 103#include <netinet/in.h> 104#include <netinet/if_ether.h> 105#include <netinet/tcp.h> 106#include <netinet/tcp_seq.h> 107 108#ifdef INET 109#include <netinet/in_systm.h> 110#include <netinet/in_var.h> 111#include <netinet/ip.h> 112#include <netinet/ip_var.h> 113#endif 114 115#ifdef INET6 116#include <netinet6/nd6.h> 117#endif /* INET6 */ 118 119#ifdef __FreeBSD__ 120#include <netinet/ip_carp.h> 121#else 122#include "carp.h" 123#if NCARP > 0 124#include <netinet/ip_carp.h> 125#endif 126#endif 127 128#include <net/pfvar.h> 129#include <net/if_pfsync.h> 130 131#ifndef __FreeBSD__ 132#include "bpfilter.h" 133#include "pfsync.h" 134#endif 135 136#define PFSYNC_MINPKT ( \ 137 sizeof(struct ip) + \ 138 sizeof(struct pfsync_header) + \ 139 sizeof(struct pfsync_subheader) + \ 140 sizeof(struct pfsync_eof)) 141 142struct pfsync_pkt { 143 struct ip *ip; 144 struct in_addr src; 145 u_int8_t flags; 146}; 147 148int pfsync_input_hmac(struct mbuf *, int); 149 150int pfsync_upd_tcp(struct pf_state *, struct pfsync_state_peer *, 151 struct pfsync_state_peer *); 152 153int pfsync_in_clr(struct pfsync_pkt *, struct mbuf *, int, int); 154int pfsync_in_ins(struct pfsync_pkt *, struct mbuf *, int, int); 155int pfsync_in_iack(struct pfsync_pkt *, struct mbuf *, int, int); 156int pfsync_in_upd(struct pfsync_pkt *, struct mbuf *, int, int); 157int pfsync_in_upd_c(struct pfsync_pkt *, struct mbuf *, int, int); 158int pfsync_in_ureq(struct pfsync_pkt *, struct mbuf *, int, int); 159int pfsync_in_del(struct pfsync_pkt *, struct mbuf *, int, int); 160int pfsync_in_del_c(struct pfsync_pkt *, struct mbuf *, int, int); 161int pfsync_in_bus(struct pfsync_pkt *, struct mbuf *, int, int); 162int pfsync_in_tdb(struct pfsync_pkt *, struct mbuf *, int, int); 163int pfsync_in_eof(struct pfsync_pkt *, struct mbuf *, int, int); 164 165int pfsync_in_error(struct pfsync_pkt *, struct mbuf *, int, int); 166 167int (*pfsync_acts[])(struct pfsync_pkt *, struct mbuf *, int, int) = { 168 pfsync_in_clr, /* PFSYNC_ACT_CLR */ 169 pfsync_in_ins, /* PFSYNC_ACT_INS */ 170 pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */ 171 pfsync_in_upd, /* PFSYNC_ACT_UPD */ 172 pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */ 173 pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */ 174 pfsync_in_del, /* PFSYNC_ACT_DEL */ 175 pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */ 176 pfsync_in_error, /* PFSYNC_ACT_INS_F */ 177 pfsync_in_error, /* PFSYNC_ACT_DEL_F */ 178 pfsync_in_bus, /* PFSYNC_ACT_BUS */ 179 pfsync_in_tdb, /* PFSYNC_ACT_TDB */ 180 pfsync_in_eof /* PFSYNC_ACT_EOF */ 181}; 182 183struct pfsync_q { 184 int (*write)(struct pf_state *, struct mbuf *, int); 185 size_t len; 186 u_int8_t action; 187}; 188 189/* we have one of these for every PFSYNC_S_ */ 190int pfsync_out_state(struct pf_state *, struct mbuf *, int); 191int pfsync_out_iack(struct pf_state *, struct mbuf *, int); 192int pfsync_out_upd_c(struct pf_state *, struct mbuf *, int); 193int pfsync_out_del(struct pf_state *, struct mbuf *, int); 194 195struct pfsync_q pfsync_qs[] = { 196 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS }, 197 { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK }, 198 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_UPD }, 199 { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C }, 200 { pfsync_out_del, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C } 201}; 202 203void pfsync_q_ins(struct pf_state *, int); 204void pfsync_q_del(struct pf_state *); 205 206struct pfsync_upd_req_item { 207 TAILQ_ENTRY(pfsync_upd_req_item) ur_entry; 208 struct pfsync_upd_req ur_msg; 209}; 210TAILQ_HEAD(pfsync_upd_reqs, pfsync_upd_req_item); 211 212struct pfsync_deferral { 213 TAILQ_ENTRY(pfsync_deferral) pd_entry; 214 struct pf_state *pd_st; 215 struct mbuf *pd_m; 216#ifdef __FreeBSD__ 217 struct callout pd_tmo; 218#else 219 struct timeout pd_tmo; 220#endif 221}; 222TAILQ_HEAD(pfsync_deferrals, pfsync_deferral); 223 224#define PFSYNC_PLSIZE MAX(sizeof(struct pfsync_upd_req_item), \ 225 sizeof(struct pfsync_deferral)) 226 227#ifdef notyet 228int pfsync_out_tdb(struct tdb *, struct mbuf *, int); 229#endif 230 231struct pfsync_softc { 232#ifdef __FreeBSD__ 233 struct ifnet *sc_ifp; 234#else 235 struct ifnet sc_if; 236#endif 237 struct ifnet *sc_sync_if; 238 239#ifdef __FreeBSD__ 240 uma_zone_t sc_pool; 241#else 242 struct pool sc_pool; 243#endif 244 245 struct ip_moptions sc_imo; 246 247 struct in_addr sc_sync_peer; 248 u_int8_t sc_maxupdates; 249#ifdef __FreeBSD__ 250 int pfsync_sync_ok; 251#endif 252 253 struct ip sc_template; 254 255 struct pf_state_queue sc_qs[PFSYNC_S_COUNT]; 256 size_t sc_len; 257 258 struct pfsync_upd_reqs sc_upd_req_list; 259 260 struct pfsync_deferrals sc_deferrals; 261 u_int sc_deferred; 262 263 void *sc_plus; 264 size_t sc_pluslen; 265 266 u_int32_t sc_ureq_sent; 267 int sc_bulk_tries; 268#ifdef __FreeBSD__ 269 struct callout sc_bulkfail_tmo; 270#else 271 struct timeout sc_bulkfail_tmo; 272#endif 273 274 u_int32_t sc_ureq_received; 275 struct pf_state *sc_bulk_next; 276 struct pf_state *sc_bulk_last; 277#ifdef __FreeBSD__ 278 struct callout sc_bulk_tmo; 279#else 280 struct timeout sc_bulk_tmo; 281#endif 282 283 TAILQ_HEAD(, tdb) sc_tdb_q; 284 285#ifdef __FreeBSD__ 286 struct callout sc_tmo; 287#else 288 struct timeout sc_tmo; 289#endif 290#ifdef __FreeBSD__ 291 eventhandler_tag sc_detachtag; 292#endif 293 294}; 295 296#ifdef __FreeBSD__ 297static VNET_DEFINE(struct pfsync_softc *, pfsyncif) = NULL; 298#define V_pfsyncif VNET(pfsyncif) 299 300static VNET_DEFINE(struct pfsyncstats, pfsyncstats); 301#define V_pfsyncstats VNET(pfsyncstats) 302static VNET_DEFINE(int, pfsync_carp_adj) = CARP_MAXSKEW; 303#define V_pfsync_carp_adj VNET(pfsync_carp_adj) 304 305SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC"); 306SYSCTL_VNET_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_RW, 307 &VNET_NAME(pfsyncstats), pfsyncstats, 308 "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)"); 309SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_RW, 310 &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment"); 311#else 312struct pfsync_softc *pfsyncif = NULL; 313struct pfsyncstats pfsyncstats; 314#define V_pfsyncstats pfsyncstats 315#endif 316 317#ifdef __FreeBSD__ 318static void pfsyncintr(void *); 319struct pfsync_swi { 320 void * pfsync_swi_cookie; 321}; 322static struct pfsync_swi pfsync_swi; 323#define schednetisr(p) swi_sched(pfsync_swi.pfsync_swi_cookie, 0) 324#define NETISR_PFSYNC 325#endif 326 327void pfsyncattach(int); 328#ifdef __FreeBSD__ 329int pfsync_clone_create(struct if_clone *, int, caddr_t); 330void pfsync_clone_destroy(struct ifnet *); 331#else 332int pfsync_clone_create(struct if_clone *, int); 333int pfsync_clone_destroy(struct ifnet *); 334#endif 335int pfsync_alloc_scrub_memory(struct pfsync_state_peer *, 336 struct pf_state_peer *); 337void pfsync_update_net_tdb(struct pfsync_tdb *); 338int pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *, 339#ifdef __FreeBSD__ 340 struct route *); 341#else 342 struct rtentry *); 343#endif 344int pfsyncioctl(struct ifnet *, u_long, caddr_t); 345void pfsyncstart(struct ifnet *); 346 347struct mbuf *pfsync_if_dequeue(struct ifnet *); 348struct mbuf *pfsync_get_mbuf(struct pfsync_softc *); 349 350void pfsync_deferred(struct pf_state *, int); 351void pfsync_undefer(struct pfsync_deferral *, int); 352void pfsync_defer_tmo(void *); 353 354void pfsync_request_update(u_int32_t, u_int64_t); 355void pfsync_update_state_req(struct pf_state *); 356 357void pfsync_drop(struct pfsync_softc *); 358void pfsync_sendout(void); 359void pfsync_send_plus(void *, size_t); 360int pfsync_tdb_sendout(struct pfsync_softc *); 361int pfsync_sendout_mbuf(struct pfsync_softc *, struct mbuf *); 362void pfsync_timeout(void *); 363void pfsync_tdb_timeout(void *); 364void pfsync_send_bus(struct pfsync_softc *, u_int8_t); 365 366void pfsync_bulk_start(void); 367void pfsync_bulk_status(u_int8_t); 368void pfsync_bulk_update(void *); 369void pfsync_bulk_fail(void *); 370 371#ifdef __FreeBSD__ 372void pfsync_ifdetach(void *, struct ifnet *); 373 374/* XXX: ugly */ 375#define betoh64 (unsigned long long)be64toh 376#define timeout_del callout_stop 377#endif 378 379#define PFSYNC_MAX_BULKTRIES 12 380#ifndef __FreeBSD__ 381int pfsync_sync_ok; 382#endif 383 384#ifdef __FreeBSD__ 385IFC_SIMPLE_DECLARE(pfsync, 1); 386#else 387struct if_clone pfsync_cloner = 388 IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy); 389#endif 390 391void 392pfsyncattach(int npfsync) 393{ 394 if_clone_attach(&pfsync_cloner); 395} 396int 397#ifdef __FreeBSD__ 398pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param) 399#else 400pfsync_clone_create(struct if_clone *ifc, int unit) 401#endif 402{ 403 struct pfsync_softc *sc; 404 struct ifnet *ifp; 405 int q; 406 407 if (unit != 0) 408 return (EINVAL); 409 410#ifndef __FreeBSD__ 411 pfsync_sync_ok = 1; 412#endif 413 414 sc = malloc(sizeof(struct pfsync_softc), M_DEVBUF, M_NOWAIT | M_ZERO); 415 if (sc == NULL) 416 return (ENOMEM); 417 418 for (q = 0; q < PFSYNC_S_COUNT; q++) 419 TAILQ_INIT(&sc->sc_qs[q]); 420 421#ifdef __FreeBSD__ 422 sc->pfsync_sync_ok = 1; 423 sc->sc_pool = uma_zcreate("pfsync", PFSYNC_PLSIZE, 424 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 425 if (sc->sc_pool == NULL) { 426 free(sc, M_DEVBUF); 427 return (ENOMEM); 428 } 429#else 430 pool_init(&sc->sc_pool, PFSYNC_PLSIZE, 0, 0, 0, "pfsync", NULL); 431#endif 432 TAILQ_INIT(&sc->sc_upd_req_list); 433 TAILQ_INIT(&sc->sc_deferrals); 434 sc->sc_deferred = 0; 435 436 TAILQ_INIT(&sc->sc_tdb_q); 437 438 sc->sc_len = PFSYNC_MINPKT; 439 sc->sc_maxupdates = 128; 440 441#ifdef __FreeBSD__ 442 sc->sc_imo.imo_membership = (struct in_multi **)malloc( 443 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_DEVBUF, 444 M_NOWAIT | M_ZERO); 445 sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS; 446 sc->sc_imo.imo_multicast_vif = -1; 447#else 448 sc->sc_imo.imo_membership = (struct in_multi **)malloc( 449 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_IPMOPTS, 450 M_WAITOK | M_ZERO); 451 sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS; 452#endif 453 454#ifdef __FreeBSD__ 455 ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC); 456 if (ifp == NULL) { 457 free(sc->sc_imo.imo_membership, M_DEVBUF); 458 uma_zdestroy(sc->sc_pool); 459 free(sc, M_DEVBUF); 460 return (ENOSPC); 461 } 462 if_initname(ifp, ifc->ifc_name, unit); 463 464 sc->sc_detachtag = EVENTHANDLER_REGISTER(ifnet_departure_event, 465#ifdef __FreeBSD__ 466 pfsync_ifdetach, V_pfsyncif, EVENTHANDLER_PRI_ANY); 467#else 468 pfsync_ifdetach, pfsyncif, EVENTHANDLER_PRI_ANY); 469#endif 470 if (sc->sc_detachtag == NULL) { 471 if_free(ifp); 472 free(sc->sc_imo.imo_membership, M_DEVBUF); 473 uma_zdestroy(sc->sc_pool); 474 free(sc, M_DEVBUF); 475 return (ENOSPC); 476 } 477#else 478 ifp = &sc->sc_if; 479 snprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit); 480#endif 481 ifp->if_softc = sc; 482 ifp->if_ioctl = pfsyncioctl; 483 ifp->if_output = pfsyncoutput; 484 ifp->if_start = pfsyncstart; 485 ifp->if_type = IFT_PFSYNC; 486 ifp->if_snd.ifq_maxlen = ifqmaxlen; 487 ifp->if_hdrlen = sizeof(struct pfsync_header); 488 ifp->if_mtu = 1500; /* XXX */ 489#ifdef __FreeBSD__ 490 callout_init(&sc->sc_tmo, CALLOUT_MPSAFE); 491 callout_init_mtx(&sc->sc_bulk_tmo, &pf_task_mtx, 0); 492 callout_init(&sc->sc_bulkfail_tmo, CALLOUT_MPSAFE); 493#else 494 ifp->if_hardmtu = MCLBYTES; /* XXX */ 495 timeout_set(&sc->sc_tmo, pfsync_timeout, sc); 496 timeout_set(&sc->sc_bulk_tmo, pfsync_bulk_update, sc); 497 timeout_set(&sc->sc_bulkfail_tmo, pfsync_bulk_fail, sc); 498#endif 499 500 if_attach(ifp); 501#ifndef __FreeBSD__ 502 if_alloc_sadl(ifp); 503 504#if NCARP > 0 505 if_addgroup(ifp, "carp"); 506#endif 507#endif 508 509#if NBPFILTER > 0 510#ifdef __FreeBSD__ 511 bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN); 512#else 513 bpfattach(&sc->sc_if.if_bpf, ifp, DLT_PFSYNC, PFSYNC_HDRLEN); 514#endif 515#endif 516 517#ifdef __FreeBSD__ 518 V_pfsyncif = sc; 519#else 520 pfsyncif = sc; 521#endif 522 523 return (0); 524} 525 526#ifdef __FreeBSD__ 527void 528#else 529int 530#endif 531pfsync_clone_destroy(struct ifnet *ifp) 532{ 533 struct pfsync_softc *sc = ifp->if_softc; 534 535#ifdef __FreeBSD__ 536 EVENTHANDLER_DEREGISTER(ifnet_departure_event, sc->sc_detachtag); 537 PF_LOCK(); 538#endif 539 timeout_del(&sc->sc_bulk_tmo); 540 timeout_del(&sc->sc_tmo); 541#ifdef __FreeBSD__ 542 PF_UNLOCK(); 543 if (!sc->pfsync_sync_ok && carp_demote_adj_p) 544 (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy"); 545#else 546#if NCARP > 0 547 if (!pfsync_sync_ok) 548 carp_group_demote_adj(&sc->sc_if, -1); 549#endif 550#endif 551#if NBPFILTER > 0 552 bpfdetach(ifp); 553#endif 554 if_detach(ifp); 555 556 pfsync_drop(sc); 557 558 while (sc->sc_deferred > 0) 559 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0); 560 561#ifdef __FreeBSD__ 562 UMA_DESTROY(sc->sc_pool); 563#else 564 pool_destroy(&sc->sc_pool); 565#endif 566#ifdef __FreeBSD__ 567 if_free(ifp); 568 free(sc->sc_imo.imo_membership, M_DEVBUF); 569#else 570 free(sc->sc_imo.imo_membership, M_IPMOPTS); 571#endif 572 free(sc, M_DEVBUF); 573 574#ifdef __FreeBSD__ 575 V_pfsyncif = NULL; 576#else 577 pfsyncif = NULL; 578#endif 579 580#ifndef __FreeBSD__ 581 return (0); 582#endif 583} 584 585struct mbuf * 586pfsync_if_dequeue(struct ifnet *ifp) 587{ 588 struct mbuf *m; 589#ifndef __FreeBSD__ 590 int s; 591#endif 592 593#ifdef __FreeBSD__ 594 IF_LOCK(&ifp->if_snd); 595 _IF_DROP(&ifp->if_snd); 596 _IF_DEQUEUE(&ifp->if_snd, m); 597 IF_UNLOCK(&ifp->if_snd); 598#else 599 s = splnet(); 600 IF_DEQUEUE(&ifp->if_snd, m); 601 splx(s); 602#endif 603 604 return (m); 605} 606 607/* 608 * Start output on the pfsync interface. 609 */ 610void 611pfsyncstart(struct ifnet *ifp) 612{ 613 struct mbuf *m; 614 615 while ((m = pfsync_if_dequeue(ifp)) != NULL) { 616#ifndef __FreeBSD__ 617 IF_DROP(&ifp->if_snd); 618#endif 619 m_freem(m); 620 } 621} 622 623int 624pfsync_alloc_scrub_memory(struct pfsync_state_peer *s, 625 struct pf_state_peer *d) 626{ 627 if (s->scrub.scrub_flag && d->scrub == NULL) { 628#ifdef __FreeBSD__ 629 d->scrub = pool_get(&V_pf_state_scrub_pl, PR_NOWAIT | PR_ZERO); 630#else 631 d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO); 632#endif 633 if (d->scrub == NULL) 634 return (ENOMEM); 635 } 636 637 return (0); 638} 639 640#ifndef __FreeBSD__ 641void 642pfsync_state_export(struct pfsync_state *sp, struct pf_state *st) 643{ 644 bzero(sp, sizeof(struct pfsync_state)); 645 646 /* copy from state key */ 647 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 648 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 649 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 650 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 651 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 652 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 653 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 654 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 655 sp->proto = st->key[PF_SK_WIRE]->proto; 656 sp->af = st->key[PF_SK_WIRE]->af; 657 658 /* copy from state */ 659 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 660 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 661 sp->creation = htonl(time_second - st->creation); 662 sp->expire = pf_state_expires(st); 663 if (sp->expire <= time_second) 664 sp->expire = htonl(0); 665 else 666 sp->expire = htonl(sp->expire - time_second); 667 668 sp->direction = st->direction; 669 sp->log = st->log; 670 sp->timeout = st->timeout; 671 sp->state_flags = st->state_flags; 672 if (st->src_node) 673 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 674 if (st->nat_src_node) 675 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 676 677 bcopy(&st->id, &sp->id, sizeof(sp->id)); 678 sp->creatorid = st->creatorid; 679 pf_state_peer_hton(&st->src, &sp->src); 680 pf_state_peer_hton(&st->dst, &sp->dst); 681 682 if (st->rule.ptr == NULL) 683 sp->rule = htonl(-1); 684 else 685 sp->rule = htonl(st->rule.ptr->nr); 686 if (st->anchor.ptr == NULL) 687 sp->anchor = htonl(-1); 688 else 689 sp->anchor = htonl(st->anchor.ptr->nr); 690 if (st->nat_rule.ptr == NULL) 691 sp->nat_rule = htonl(-1); 692 else 693 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 694 695 pf_state_counter_hton(st->packets[0], sp->packets[0]); 696 pf_state_counter_hton(st->packets[1], sp->packets[1]); 697 pf_state_counter_hton(st->bytes[0], sp->bytes[0]); 698 pf_state_counter_hton(st->bytes[1], sp->bytes[1]); 699 700} 701#endif 702 703int 704pfsync_state_import(struct pfsync_state *sp, u_int8_t flags) 705{ 706 struct pf_state *st = NULL; 707 struct pf_state_key *skw = NULL, *sks = NULL; 708 struct pf_rule *r = NULL; 709 struct pfi_kif *kif; 710 int pool_flags; 711 int error; 712 713 PF_LOCK_ASSERT(); 714 715#ifdef __FreeBSD__ 716 if (sp->creatorid == 0 && V_pf_status.debug >= PF_DEBUG_MISC) { 717#else 718 if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) { 719#endif 720 printf("pfsync_state_import: invalid creator id:" 721 " %08x\n", ntohl(sp->creatorid)); 722 return (EINVAL); 723 } 724 725 if ((kif = pfi_kif_get(sp->ifname)) == NULL) { 726#ifdef __FreeBSD__ 727 if (V_pf_status.debug >= PF_DEBUG_MISC) 728#else 729 if (pf_status.debug >= PF_DEBUG_MISC) 730#endif 731 printf("pfsync_state_import: " 732 "unknown interface: %s\n", sp->ifname); 733 if (flags & PFSYNC_SI_IOCTL) 734 return (EINVAL); 735 return (0); /* skip this state */ 736 } 737 738 /* 739 * If the ruleset checksums match or the state is coming from the ioctl, 740 * it's safe to associate the state with the rule of that number. 741 */ 742 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) && 743 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) < 744 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount) 745 r = pf_main_ruleset.rules[ 746 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)]; 747 else 748#ifdef __FreeBSD__ 749 r = &V_pf_default_rule; 750#else 751 r = &pf_default_rule; 752#endif 753 754 if ((r->max_states && r->states_cur >= r->max_states)) 755 goto cleanup; 756 757#ifdef __FreeBSD__ 758 if (flags & PFSYNC_SI_IOCTL) 759 pool_flags = PR_WAITOK | PR_ZERO; 760 else 761 pool_flags = PR_NOWAIT | PR_ZERO; 762 763 if ((st = pool_get(&V_pf_state_pl, pool_flags)) == NULL) 764 goto cleanup; 765#else 766 if (flags & PFSYNC_SI_IOCTL) 767 pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO; 768 else 769 pool_flags = PR_LIMITFAIL | PR_ZERO; 770 771 if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL) 772 goto cleanup; 773#endif 774 775 if ((skw = pf_alloc_state_key(pool_flags)) == NULL) 776 goto cleanup; 777 778 if (PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0], 779 &sp->key[PF_SK_STACK].addr[0], sp->af) || 780 PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1], 781 &sp->key[PF_SK_STACK].addr[1], sp->af) || 782 sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] || 783 sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1]) { 784 if ((sks = pf_alloc_state_key(pool_flags)) == NULL) 785 goto cleanup; 786 } else 787 sks = skw; 788 789 /* allocate memory for scrub info */ 790 if (pfsync_alloc_scrub_memory(&sp->src, &st->src) || 791 pfsync_alloc_scrub_memory(&sp->dst, &st->dst)) 792 goto cleanup; 793 794 /* copy to state key(s) */ 795 skw->addr[0] = sp->key[PF_SK_WIRE].addr[0]; 796 skw->addr[1] = sp->key[PF_SK_WIRE].addr[1]; 797 skw->port[0] = sp->key[PF_SK_WIRE].port[0]; 798 skw->port[1] = sp->key[PF_SK_WIRE].port[1]; 799 skw->proto = sp->proto; 800 skw->af = sp->af; 801 if (sks != skw) { 802 sks->addr[0] = sp->key[PF_SK_STACK].addr[0]; 803 sks->addr[1] = sp->key[PF_SK_STACK].addr[1]; 804 sks->port[0] = sp->key[PF_SK_STACK].port[0]; 805 sks->port[1] = sp->key[PF_SK_STACK].port[1]; 806 sks->proto = sp->proto; 807 sks->af = sp->af; 808 } 809 810 /* copy to state */ 811 bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr)); 812 st->creation = time_second - ntohl(sp->creation); 813 st->expire = time_second; 814 if (sp->expire) { 815 /* XXX No adaptive scaling. */ 816 st->expire -= r->timeout[sp->timeout] - ntohl(sp->expire); 817 } 818 819 st->expire = ntohl(sp->expire) + time_second; 820 st->direction = sp->direction; 821 st->log = sp->log; 822 st->timeout = sp->timeout; 823 st->state_flags = sp->state_flags; 824 825 bcopy(sp->id, &st->id, sizeof(st->id)); 826 st->creatorid = sp->creatorid; 827 pf_state_peer_ntoh(&sp->src, &st->src); 828 pf_state_peer_ntoh(&sp->dst, &st->dst); 829 830 st->rule.ptr = r; 831 st->nat_rule.ptr = NULL; 832 st->anchor.ptr = NULL; 833 st->rt_kif = NULL; 834 835 st->pfsync_time = time_second; 836 st->sync_state = PFSYNC_S_NONE; 837 838 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */ 839 r->states_cur++; 840 r->states_tot++; 841 842 if (!ISSET(flags, PFSYNC_SI_IOCTL)) 843 SET(st->state_flags, PFSTATE_NOSYNC); 844 845 if ((error = pf_state_insert(kif, skw, sks, st)) != 0) { 846 /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */ 847 r->states_cur--; 848 goto cleanup_state; 849 } 850 851 if (!ISSET(flags, PFSYNC_SI_IOCTL)) { 852 CLR(st->state_flags, PFSTATE_NOSYNC); 853 if (ISSET(st->state_flags, PFSTATE_ACK)) { 854 pfsync_q_ins(st, PFSYNC_S_IACK); 855#ifdef __FreeBSD__ 856 pfsync_sendout(); 857#else 858 schednetisr(NETISR_PFSYNC); 859#endif 860 } 861 } 862 CLR(st->state_flags, PFSTATE_ACK); 863 864 return (0); 865 866cleanup: 867 error = ENOMEM; 868 if (skw == sks) 869 sks = NULL; 870#ifdef __FreeBSD__ 871 if (skw != NULL) 872 pool_put(&V_pf_state_key_pl, skw); 873 if (sks != NULL) 874 pool_put(&V_pf_state_key_pl, sks); 875#else 876 if (skw != NULL) 877 pool_put(&pf_state_key_pl, skw); 878 if (sks != NULL) 879 pool_put(&pf_state_key_pl, sks); 880#endif 881 882cleanup_state: /* pf_state_insert frees the state keys */ 883 if (st) { 884#ifdef __FreeBSD__ 885 if (st->dst.scrub) 886 pool_put(&V_pf_state_scrub_pl, st->dst.scrub); 887 if (st->src.scrub) 888 pool_put(&V_pf_state_scrub_pl, st->src.scrub); 889 pool_put(&V_pf_state_pl, st); 890#else 891 if (st->dst.scrub) 892 pool_put(&pf_state_scrub_pl, st->dst.scrub); 893 if (st->src.scrub) 894 pool_put(&pf_state_scrub_pl, st->src.scrub); 895 pool_put(&pf_state_pl, st); 896#endif 897 } 898 return (error); 899} 900 901void 902#ifdef __FreeBSD__ 903pfsync_input(struct mbuf *m, __unused int off) 904#else 905pfsync_input(struct mbuf *m, ...) 906#endif 907{ 908#ifdef __FreeBSD__ 909 struct pfsync_softc *sc = V_pfsyncif; 910#else 911 struct pfsync_softc *sc = pfsyncif; 912#endif 913 struct pfsync_pkt pkt; 914 struct ip *ip = mtod(m, struct ip *); 915 struct pfsync_header *ph; 916 struct pfsync_subheader subh; 917 918 int offset; 919 int rv; 920 921 V_pfsyncstats.pfsyncs_ipackets++; 922 923 /* verify that we have a sync interface configured */ 924#ifdef __FreeBSD__ 925 if (!sc || !sc->sc_sync_if || !V_pf_status.running) 926#else 927 if (!sc || !sc->sc_sync_if || !pf_status.running) 928#endif 929 goto done; 930 931 /* verify that the packet came in on the right interface */ 932 if (sc->sc_sync_if != m->m_pkthdr.rcvif) { 933 V_pfsyncstats.pfsyncs_badif++; 934 goto done; 935 } 936 937#ifdef __FreeBSD__ 938 sc->sc_ifp->if_ipackets++; 939 sc->sc_ifp->if_ibytes += m->m_pkthdr.len; 940#else 941 sc->sc_if.if_ipackets++; 942 sc->sc_if.if_ibytes += m->m_pkthdr.len; 943#endif 944 /* verify that the IP TTL is 255. */ 945 if (ip->ip_ttl != PFSYNC_DFLTTL) { 946 V_pfsyncstats.pfsyncs_badttl++; 947 goto done; 948 } 949 950 offset = ip->ip_hl << 2; 951 if (m->m_pkthdr.len < offset + sizeof(*ph)) { 952 V_pfsyncstats.pfsyncs_hdrops++; 953 goto done; 954 } 955 956 if (offset + sizeof(*ph) > m->m_len) { 957 if (m_pullup(m, offset + sizeof(*ph)) == NULL) { 958 V_pfsyncstats.pfsyncs_hdrops++; 959 return; 960 } 961 ip = mtod(m, struct ip *); 962 } 963 ph = (struct pfsync_header *)((char *)ip + offset); 964 965 /* verify the version */ 966 if (ph->version != PFSYNC_VERSION) { 967 V_pfsyncstats.pfsyncs_badver++; 968 goto done; 969 } 970 971#if 0 972 if (pfsync_input_hmac(m, offset) != 0) { 973 /* XXX stats */ 974 goto done; 975 } 976#endif 977 978 /* Cheaper to grab this now than having to mess with mbufs later */ 979 pkt.ip = ip; 980 pkt.src = ip->ip_src; 981 pkt.flags = 0; 982 983#ifdef __FreeBSD__ 984 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) 985#else 986 if (!bcmp(&ph->pfcksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) 987#endif 988 pkt.flags |= PFSYNC_SI_CKSUM; 989 990 offset += sizeof(*ph); 991 for (;;) { 992 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh); 993 offset += sizeof(subh); 994 995 if (subh.action >= PFSYNC_ACT_MAX) { 996 V_pfsyncstats.pfsyncs_badact++; 997 goto done; 998 } 999 1000 rv = (*pfsync_acts[subh.action])(&pkt, m, offset, 1001 ntohs(subh.count)); 1002 if (rv == -1) 1003 return; 1004 1005 offset += rv; 1006 } 1007 1008done: 1009 m_freem(m); 1010} 1011 1012int 1013pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1014{ 1015 struct pfsync_clr *clr; 1016 struct mbuf *mp; 1017 int len = sizeof(*clr) * count; 1018 int i, offp; 1019 1020 struct pf_state *st, *nexts; 1021 struct pf_state_key *sk, *nextsk; 1022 struct pf_state_item *si; 1023 u_int32_t creatorid; 1024 int s; 1025 1026 mp = m_pulldown(m, offset, len, &offp); 1027 if (mp == NULL) { 1028 V_pfsyncstats.pfsyncs_badlen++; 1029 return (-1); 1030 } 1031 clr = (struct pfsync_clr *)(mp->m_data + offp); 1032 1033 s = splsoftnet(); 1034#ifdef __FreeBSD__ 1035 PF_LOCK(); 1036#endif 1037 for (i = 0; i < count; i++) { 1038 creatorid = clr[i].creatorid; 1039 1040 if (clr[i].ifname[0] == '\0') { 1041#ifdef __FreeBSD__ 1042 for (st = RB_MIN(pf_state_tree_id, &V_tree_id); 1043 st; st = nexts) { 1044 nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, st); 1045#else 1046 for (st = RB_MIN(pf_state_tree_id, &tree_id); 1047 st; st = nexts) { 1048 nexts = RB_NEXT(pf_state_tree_id, &tree_id, st); 1049#endif 1050 if (st->creatorid == creatorid) { 1051 SET(st->state_flags, PFSTATE_NOSYNC); 1052 pf_unlink_state(st); 1053 } 1054 } 1055 } else { 1056 if (pfi_kif_get(clr[i].ifname) == NULL) 1057 continue; 1058 1059 /* XXX correct? */ 1060#ifdef __FreeBSD__ 1061 for (sk = RB_MIN(pf_state_tree, &V_pf_statetbl); 1062#else 1063 for (sk = RB_MIN(pf_state_tree, &pf_statetbl); 1064#endif 1065 sk; sk = nextsk) { 1066 nextsk = RB_NEXT(pf_state_tree, 1067#ifdef __FreeBSD__ 1068 &V_pf_statetbl, sk); 1069#else 1070 &pf_statetbl, sk); 1071#endif 1072 TAILQ_FOREACH(si, &sk->states, entry) { 1073 if (si->s->creatorid == creatorid) { 1074 SET(si->s->state_flags, 1075 PFSTATE_NOSYNC); 1076 pf_unlink_state(si->s); 1077 } 1078 } 1079 } 1080 } 1081 } 1082#ifdef __FreeBSD__ 1083 PF_UNLOCK(); 1084#endif 1085 splx(s); 1086 1087 return (len); 1088} 1089 1090int 1091pfsync_in_ins(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1092{ 1093 struct mbuf *mp; 1094 struct pfsync_state *sa, *sp; 1095 int len = sizeof(*sp) * count; 1096 int i, offp; 1097 1098 int s; 1099 1100 mp = m_pulldown(m, offset, len, &offp); 1101 if (mp == NULL) { 1102 V_pfsyncstats.pfsyncs_badlen++; 1103 return (-1); 1104 } 1105 sa = (struct pfsync_state *)(mp->m_data + offp); 1106 1107 s = splsoftnet(); 1108#ifdef __FreeBSD__ 1109 PF_LOCK(); 1110#endif 1111 for (i = 0; i < count; i++) { 1112 sp = &sa[i]; 1113 1114 /* check for invalid values */ 1115 if (sp->timeout >= PFTM_MAX || 1116 sp->src.state > PF_TCPS_PROXY_DST || 1117 sp->dst.state > PF_TCPS_PROXY_DST || 1118 sp->direction > PF_OUT || 1119 (sp->af != AF_INET && sp->af != AF_INET6)) { 1120#ifdef __FreeBSD__ 1121 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1122#else 1123 if (pf_status.debug >= PF_DEBUG_MISC) { 1124#endif 1125 printf("pfsync_input: PFSYNC5_ACT_INS: " 1126 "invalid value\n"); 1127 } 1128 V_pfsyncstats.pfsyncs_badval++; 1129 continue; 1130 } 1131 1132 if (pfsync_state_import(sp, pkt->flags) == ENOMEM) { 1133 /* drop out, but process the rest of the actions */ 1134 break; 1135 } 1136 } 1137#ifdef __FreeBSD__ 1138 PF_UNLOCK(); 1139#endif 1140 splx(s); 1141 1142 return (len); 1143} 1144 1145int 1146pfsync_in_iack(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1147{ 1148 struct pfsync_ins_ack *ia, *iaa; 1149 struct pf_state_cmp id_key; 1150 struct pf_state *st; 1151 1152 struct mbuf *mp; 1153 int len = count * sizeof(*ia); 1154 int offp, i; 1155 int s; 1156 1157 mp = m_pulldown(m, offset, len, &offp); 1158 if (mp == NULL) { 1159 V_pfsyncstats.pfsyncs_badlen++; 1160 return (-1); 1161 } 1162 iaa = (struct pfsync_ins_ack *)(mp->m_data + offp); 1163 1164 s = splsoftnet(); 1165#ifdef __FreeBSD__ 1166 PF_LOCK(); 1167#endif 1168 for (i = 0; i < count; i++) { 1169 ia = &iaa[i]; 1170 1171 bcopy(&ia->id, &id_key.id, sizeof(id_key.id)); 1172 id_key.creatorid = ia->creatorid; 1173 1174 st = pf_find_state_byid(&id_key); 1175 if (st == NULL) 1176 continue; 1177 1178 if (ISSET(st->state_flags, PFSTATE_ACK)) 1179 pfsync_deferred(st, 0); 1180 } 1181#ifdef __FreeBSD__ 1182 PF_UNLOCK(); 1183#endif 1184 splx(s); 1185 /* 1186 * XXX this is not yet implemented, but we know the size of the 1187 * message so we can skip it. 1188 */ 1189 1190 return (count * sizeof(struct pfsync_ins_ack)); 1191} 1192 1193int 1194pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src, 1195 struct pfsync_state_peer *dst) 1196{ 1197 int sfail = 0; 1198 1199 /* 1200 * The state should never go backwards except 1201 * for syn-proxy states. Neither should the 1202 * sequence window slide backwards. 1203 */ 1204 if (st->src.state > src->state && 1205 (st->src.state < PF_TCPS_PROXY_SRC || 1206 src->state >= PF_TCPS_PROXY_SRC)) 1207 sfail = 1; 1208 else if (SEQ_GT(st->src.seqlo, ntohl(src->seqlo))) 1209 sfail = 3; 1210 else if (st->dst.state > dst->state) { 1211 /* There might still be useful 1212 * information about the src state here, 1213 * so import that part of the update, 1214 * then "fail" so we send the updated 1215 * state back to the peer who is missing 1216 * our what we know. */ 1217 pf_state_peer_ntoh(src, &st->src); 1218 /* XXX do anything with timeouts? */ 1219 sfail = 7; 1220 } else if (st->dst.state >= TCPS_SYN_SENT && 1221 SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo))) 1222 sfail = 4; 1223 1224 return (sfail); 1225} 1226 1227int 1228pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1229{ 1230 struct pfsync_state *sa, *sp; 1231 struct pf_state_cmp id_key; 1232 struct pf_state_key *sk; 1233 struct pf_state *st; 1234 int sfail; 1235 1236 struct mbuf *mp; 1237 int len = count * sizeof(*sp); 1238 int offp, i; 1239 int s; 1240 1241 mp = m_pulldown(m, offset, len, &offp); 1242 if (mp == NULL) { 1243 V_pfsyncstats.pfsyncs_badlen++; 1244 return (-1); 1245 } 1246 sa = (struct pfsync_state *)(mp->m_data + offp); 1247 1248 s = splsoftnet(); 1249#ifdef __FreeBSD__ 1250 PF_LOCK(); 1251#endif 1252 for (i = 0; i < count; i++) { 1253 sp = &sa[i]; 1254 1255 /* check for invalid values */ 1256 if (sp->timeout >= PFTM_MAX || 1257 sp->src.state > PF_TCPS_PROXY_DST || 1258 sp->dst.state > PF_TCPS_PROXY_DST) { 1259#ifdef __FreeBSD__ 1260 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1261#else 1262 if (pf_status.debug >= PF_DEBUG_MISC) { 1263#endif 1264 printf("pfsync_input: PFSYNC_ACT_UPD: " 1265 "invalid value\n"); 1266 } 1267 V_pfsyncstats.pfsyncs_badval++; 1268 continue; 1269 } 1270 1271 bcopy(sp->id, &id_key.id, sizeof(id_key.id)); 1272 id_key.creatorid = sp->creatorid; 1273 1274 st = pf_find_state_byid(&id_key); 1275 if (st == NULL) { 1276 /* insert the update */ 1277 if (pfsync_state_import(sp, 0)) 1278 V_pfsyncstats.pfsyncs_badstate++; 1279 continue; 1280 } 1281 1282 if (ISSET(st->state_flags, PFSTATE_ACK)) 1283 pfsync_deferred(st, 1); 1284 1285 sk = st->key[PF_SK_WIRE]; /* XXX right one? */ 1286 sfail = 0; 1287 if (sk->proto == IPPROTO_TCP) 1288 sfail = pfsync_upd_tcp(st, &sp->src, &sp->dst); 1289 else { 1290 /* 1291 * Non-TCP protocol state machine always go 1292 * forwards 1293 */ 1294 if (st->src.state > sp->src.state) 1295 sfail = 5; 1296 else if (st->dst.state > sp->dst.state) 1297 sfail = 6; 1298 } 1299 1300 if (sfail) { 1301#ifdef __FreeBSD__ 1302 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1303#else 1304 if (pf_status.debug >= PF_DEBUG_MISC) { 1305#endif 1306 printf("pfsync: %s stale update (%d)" 1307 " id: %016llx creatorid: %08x\n", 1308 (sfail < 7 ? "ignoring" : "partial"), 1309 sfail, betoh64(st->id), 1310 ntohl(st->creatorid)); 1311 } 1312 V_pfsyncstats.pfsyncs_stale++; 1313 1314 pfsync_update_state(st); 1315#ifdef __FreeBSD__ 1316 pfsync_sendout(); 1317#else 1318 schednetisr(NETISR_PFSYNC); 1319#endif 1320 continue; 1321 } 1322 pfsync_alloc_scrub_memory(&sp->dst, &st->dst); 1323 pf_state_peer_ntoh(&sp->src, &st->src); 1324 pf_state_peer_ntoh(&sp->dst, &st->dst); 1325 st->expire = ntohl(sp->expire) + time_second; 1326 st->timeout = sp->timeout; 1327 st->pfsync_time = time_second; 1328 } 1329#ifdef __FreeBSD__ 1330 PF_UNLOCK(); 1331#endif 1332 splx(s); 1333 1334 return (len); 1335} 1336 1337int 1338pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1339{ 1340 struct pfsync_upd_c *ua, *up; 1341 struct pf_state_key *sk; 1342 struct pf_state_cmp id_key; 1343 struct pf_state *st; 1344 1345 int len = count * sizeof(*up); 1346 int sfail; 1347 1348 struct mbuf *mp; 1349 int offp, i; 1350 int s; 1351 1352 mp = m_pulldown(m, offset, len, &offp); 1353 if (mp == NULL) { 1354 V_pfsyncstats.pfsyncs_badlen++; 1355 return (-1); 1356 } 1357 ua = (struct pfsync_upd_c *)(mp->m_data + offp); 1358 1359 s = splsoftnet(); 1360#ifdef __FreeBSD__ 1361 PF_LOCK(); 1362#endif 1363 for (i = 0; i < count; i++) { 1364 up = &ua[i]; 1365 1366 /* check for invalid values */ 1367 if (up->timeout >= PFTM_MAX || 1368 up->src.state > PF_TCPS_PROXY_DST || 1369 up->dst.state > PF_TCPS_PROXY_DST) { 1370#ifdef __FreeBSD__ 1371 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1372#else 1373 if (pf_status.debug >= PF_DEBUG_MISC) { 1374#endif 1375 printf("pfsync_input: " 1376 "PFSYNC_ACT_UPD_C: " 1377 "invalid value\n"); 1378 } 1379 V_pfsyncstats.pfsyncs_badval++; 1380 continue; 1381 } 1382 1383 bcopy(&up->id, &id_key.id, sizeof(id_key.id)); 1384 id_key.creatorid = up->creatorid; 1385 1386 st = pf_find_state_byid(&id_key); 1387 if (st == NULL) { 1388 /* We don't have this state. Ask for it. */ 1389 pfsync_request_update(id_key.creatorid, id_key.id); 1390 continue; 1391 } 1392 1393 if (ISSET(st->state_flags, PFSTATE_ACK)) 1394 pfsync_deferred(st, 1); 1395 1396 sk = st->key[PF_SK_WIRE]; /* XXX right one? */ 1397 sfail = 0; 1398 if (sk->proto == IPPROTO_TCP) 1399 sfail = pfsync_upd_tcp(st, &up->src, &up->dst); 1400 else { 1401 /* 1402 * Non-TCP protocol state machine always go forwards 1403 */ 1404 if (st->src.state > up->src.state) 1405 sfail = 5; 1406 else if (st->dst.state > up->dst.state) 1407 sfail = 6; 1408 } 1409 1410 if (sfail) { 1411#ifdef __FreeBSD__ 1412 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1413#else 1414 if (pf_status.debug >= PF_DEBUG_MISC) { 1415#endif 1416 printf("pfsync: ignoring stale update " 1417 "(%d) id: %016llx " 1418 "creatorid: %08x\n", sfail, 1419 betoh64(st->id), 1420 ntohl(st->creatorid)); 1421 } 1422 V_pfsyncstats.pfsyncs_stale++; 1423 1424 pfsync_update_state(st); 1425#ifdef __FreeBSD__ 1426 pfsync_sendout(); 1427#else 1428 schednetisr(NETISR_PFSYNC); 1429#endif 1430 continue; 1431 } 1432 pfsync_alloc_scrub_memory(&up->dst, &st->dst); 1433 pf_state_peer_ntoh(&up->src, &st->src); 1434 pf_state_peer_ntoh(&up->dst, &st->dst); 1435 st->expire = ntohl(up->expire) + time_second; 1436 st->timeout = up->timeout; 1437 st->pfsync_time = time_second; 1438 } 1439#ifdef __FreeBSD__ 1440 PF_UNLOCK(); 1441#endif 1442 splx(s); 1443 1444 return (len); 1445} 1446 1447int 1448pfsync_in_ureq(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1449{ 1450 struct pfsync_upd_req *ur, *ura; 1451 struct mbuf *mp; 1452 int len = count * sizeof(*ur); 1453 int i, offp; 1454 1455 struct pf_state_cmp id_key; 1456 struct pf_state *st; 1457 1458 mp = m_pulldown(m, offset, len, &offp); 1459 if (mp == NULL) { 1460 V_pfsyncstats.pfsyncs_badlen++; 1461 return (-1); 1462 } 1463 ura = (struct pfsync_upd_req *)(mp->m_data + offp); 1464 1465 for (i = 0; i < count; i++) { 1466 ur = &ura[i]; 1467 1468 bcopy(&ur->id, &id_key.id, sizeof(id_key.id)); 1469 id_key.creatorid = ur->creatorid; 1470 1471 if (id_key.id == 0 && id_key.creatorid == 0) 1472 pfsync_bulk_start(); 1473 else { 1474 st = pf_find_state_byid(&id_key); 1475 if (st == NULL) { 1476 V_pfsyncstats.pfsyncs_badstate++; 1477 continue; 1478 } 1479 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) 1480 continue; 1481 1482 PF_LOCK(); 1483 pfsync_update_state_req(st); 1484 PF_UNLOCK(); 1485 } 1486 } 1487 1488 return (len); 1489} 1490 1491int 1492pfsync_in_del(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1493{ 1494 struct mbuf *mp; 1495 struct pfsync_state *sa, *sp; 1496 struct pf_state_cmp id_key; 1497 struct pf_state *st; 1498 int len = count * sizeof(*sp); 1499 int offp, i; 1500 int s; 1501 1502 mp = m_pulldown(m, offset, len, &offp); 1503 if (mp == NULL) { 1504 V_pfsyncstats.pfsyncs_badlen++; 1505 return (-1); 1506 } 1507 sa = (struct pfsync_state *)(mp->m_data + offp); 1508 1509 s = splsoftnet(); 1510#ifdef __FreeBSD__ 1511 PF_LOCK(); 1512#endif 1513 for (i = 0; i < count; i++) { 1514 sp = &sa[i]; 1515 1516 bcopy(sp->id, &id_key.id, sizeof(id_key.id)); 1517 id_key.creatorid = sp->creatorid; 1518 1519 st = pf_find_state_byid(&id_key); 1520 if (st == NULL) { 1521 V_pfsyncstats.pfsyncs_badstate++; 1522 continue; 1523 } 1524 SET(st->state_flags, PFSTATE_NOSYNC); 1525 pf_unlink_state(st); 1526 } 1527#ifdef __FreeBSD__ 1528 PF_UNLOCK(); 1529#endif 1530 splx(s); 1531 1532 return (len); 1533} 1534 1535int 1536pfsync_in_del_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1537{ 1538 struct mbuf *mp; 1539 struct pfsync_del_c *sa, *sp; 1540 struct pf_state_cmp id_key; 1541 struct pf_state *st; 1542 int len = count * sizeof(*sp); 1543 int offp, i; 1544 int s; 1545 1546 mp = m_pulldown(m, offset, len, &offp); 1547 if (mp == NULL) { 1548 V_pfsyncstats.pfsyncs_badlen++; 1549 return (-1); 1550 } 1551 sa = (struct pfsync_del_c *)(mp->m_data + offp); 1552 1553 s = splsoftnet(); 1554#ifdef __FreeBSD__ 1555 PF_LOCK(); 1556#endif 1557 for (i = 0; i < count; i++) { 1558 sp = &sa[i]; 1559 1560 bcopy(&sp->id, &id_key.id, sizeof(id_key.id)); 1561 id_key.creatorid = sp->creatorid; 1562 1563 st = pf_find_state_byid(&id_key); 1564 if (st == NULL) { 1565 V_pfsyncstats.pfsyncs_badstate++; 1566 continue; 1567 } 1568 1569 SET(st->state_flags, PFSTATE_NOSYNC); 1570 pf_unlink_state(st); 1571 } 1572#ifdef __FreeBSD__ 1573 PF_UNLOCK(); 1574#endif 1575 splx(s); 1576 1577 return (len); 1578} 1579 1580int 1581pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1582{ 1583#ifdef __FreeBSD__ 1584 struct pfsync_softc *sc = V_pfsyncif; 1585#else 1586 struct pfsync_softc *sc = pfsyncif; 1587#endif 1588 struct pfsync_bus *bus; 1589 struct mbuf *mp; 1590 int len = count * sizeof(*bus); 1591 int offp; 1592 1593 /* If we're not waiting for a bulk update, who cares. */ 1594 if (sc->sc_ureq_sent == 0) 1595 return (len); 1596 1597 mp = m_pulldown(m, offset, len, &offp); 1598 if (mp == NULL) { 1599 V_pfsyncstats.pfsyncs_badlen++; 1600 return (-1); 1601 } 1602 bus = (struct pfsync_bus *)(mp->m_data + offp); 1603 1604 switch (bus->status) { 1605 case PFSYNC_BUS_START: 1606#ifdef __FreeBSD__ 1607 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail, 1608 V_pfsyncif); 1609#else 1610 timeout_add_sec(&sc->sc_bulkfail_tmo, 5); /* XXX magic */ 1611#endif 1612#ifdef XXX 1613 pf_pool_limits[PF_LIMIT_STATES].limit / 1614 (PFSYNC_BULKPACKETS * sc->sc_maxcount)); 1615#endif 1616#ifdef __FreeBSD__ 1617 if (V_pf_status.debug >= PF_DEBUG_MISC) 1618#else 1619 if (pf_status.debug >= PF_DEBUG_MISC) 1620#endif 1621 printf("pfsync: received bulk update start\n"); 1622 break; 1623 1624 case PFSYNC_BUS_END: 1625 if (time_uptime - ntohl(bus->endtime) >= 1626 sc->sc_ureq_sent) { 1627 /* that's it, we're happy */ 1628 sc->sc_ureq_sent = 0; 1629 sc->sc_bulk_tries = 0; 1630 timeout_del(&sc->sc_bulkfail_tmo); 1631#ifdef __FreeBSD__ 1632 if (!sc->pfsync_sync_ok && carp_demote_adj_p) 1633 (*carp_demote_adj_p)(-V_pfsync_carp_adj, 1634 "pfsync bulk done"); 1635 sc->pfsync_sync_ok = 1; 1636#else 1637#if NCARP > 0 1638 if (!pfsync_sync_ok) 1639 carp_group_demote_adj(&sc->sc_if, -1); 1640#endif 1641 pfsync_sync_ok = 1; 1642#endif 1643#ifdef __FreeBSD__ 1644 if (V_pf_status.debug >= PF_DEBUG_MISC) 1645#else 1646 if (pf_status.debug >= PF_DEBUG_MISC) 1647#endif 1648 printf("pfsync: received valid " 1649 "bulk update end\n"); 1650 } else { 1651#ifdef __FreeBSD__ 1652 if (V_pf_status.debug >= PF_DEBUG_MISC) 1653#else 1654 if (pf_status.debug >= PF_DEBUG_MISC) 1655#endif 1656 printf("pfsync: received invalid " 1657 "bulk update end: bad timestamp\n"); 1658 } 1659 break; 1660 } 1661 1662 return (len); 1663} 1664 1665int 1666pfsync_in_tdb(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1667{ 1668 int len = count * sizeof(struct pfsync_tdb); 1669 1670#if defined(IPSEC) 1671 struct pfsync_tdb *tp; 1672 struct mbuf *mp; 1673 int offp; 1674 int i; 1675 int s; 1676 1677 mp = m_pulldown(m, offset, len, &offp); 1678 if (mp == NULL) { 1679 V_pfsyncstats.pfsyncs_badlen++; 1680 return (-1); 1681 } 1682 tp = (struct pfsync_tdb *)(mp->m_data + offp); 1683 1684 s = splsoftnet(); 1685#ifdef __FreeBSD__ 1686 PF_LOCK(); 1687#endif 1688 for (i = 0; i < count; i++) 1689 pfsync_update_net_tdb(&tp[i]); 1690#ifdef __FreeBSD__ 1691 PF_UNLOCK(); 1692#endif 1693 splx(s); 1694#endif 1695 1696 return (len); 1697} 1698 1699#if defined(IPSEC) 1700/* Update an in-kernel tdb. Silently fail if no tdb is found. */ 1701void 1702pfsync_update_net_tdb(struct pfsync_tdb *pt) 1703{ 1704 struct tdb *tdb; 1705 int s; 1706 1707 /* check for invalid values */ 1708 if (ntohl(pt->spi) <= SPI_RESERVED_MAX || 1709 (pt->dst.sa.sa_family != AF_INET && 1710 pt->dst.sa.sa_family != AF_INET6)) 1711 goto bad; 1712 1713 s = spltdb(); 1714 tdb = gettdb(pt->spi, &pt->dst, pt->sproto); 1715 if (tdb) { 1716 pt->rpl = ntohl(pt->rpl); 1717 pt->cur_bytes = betoh64(pt->cur_bytes); 1718 1719 /* Neither replay nor byte counter should ever decrease. */ 1720 if (pt->rpl < tdb->tdb_rpl || 1721 pt->cur_bytes < tdb->tdb_cur_bytes) { 1722 splx(s); 1723 goto bad; 1724 } 1725 1726 tdb->tdb_rpl = pt->rpl; 1727 tdb->tdb_cur_bytes = pt->cur_bytes; 1728 } 1729 splx(s); 1730 return; 1731 1732bad: 1733#ifdef __FreeBSD__ 1734 if (V_pf_status.debug >= PF_DEBUG_MISC) 1735#else 1736 if (pf_status.debug >= PF_DEBUG_MISC) 1737#endif 1738 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: " 1739 "invalid value\n"); 1740 V_pfsyncstats.pfsyncs_badstate++; 1741 return; 1742} 1743#endif 1744 1745 1746int 1747pfsync_in_eof(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1748{ 1749 /* check if we are at the right place in the packet */ 1750 if (offset != m->m_pkthdr.len - sizeof(struct pfsync_eof)) 1751 V_pfsyncstats.pfsyncs_badact++; 1752 1753 /* we're done. free and let the caller return */ 1754 m_freem(m); 1755 return (-1); 1756} 1757 1758int 1759pfsync_in_error(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1760{ 1761 V_pfsyncstats.pfsyncs_badact++; 1762 1763 m_freem(m); 1764 return (-1); 1765} 1766 1767int 1768pfsyncoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, 1769#ifdef __FreeBSD__ 1770 struct route *rt) 1771#else 1772 struct rtentry *rt) 1773#endif 1774{ 1775 m_freem(m); 1776 return (0); 1777} 1778 1779/* ARGSUSED */ 1780int 1781pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1782{ 1783#ifndef __FreeBSD__ 1784 struct proc *p = curproc; 1785#endif 1786 struct pfsync_softc *sc = ifp->if_softc; 1787 struct ifreq *ifr = (struct ifreq *)data; 1788 struct ip_moptions *imo = &sc->sc_imo; 1789 struct pfsyncreq pfsyncr; 1790 struct ifnet *sifp; 1791 struct ip *ip; 1792 int s, error; 1793 1794 switch (cmd) { 1795#if 0 1796 case SIOCSIFADDR: 1797 case SIOCAIFADDR: 1798 case SIOCSIFDSTADDR: 1799#endif 1800 case SIOCSIFFLAGS: 1801#ifdef __FreeBSD__ 1802 if (ifp->if_flags & IFF_UP) 1803 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1804 else 1805 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1806#else 1807 if (ifp->if_flags & IFF_UP) 1808 ifp->if_flags |= IFF_RUNNING; 1809 else 1810 ifp->if_flags &= ~IFF_RUNNING; 1811#endif 1812 break; 1813 case SIOCSIFMTU: 1814 if (ifr->ifr_mtu <= PFSYNC_MINPKT) 1815 return (EINVAL); 1816 if (ifr->ifr_mtu > MCLBYTES) /* XXX could be bigger */ 1817 ifr->ifr_mtu = MCLBYTES; 1818 if (ifr->ifr_mtu < ifp->if_mtu) { 1819 s = splnet(); 1820#ifdef __FreeBSD__ 1821 PF_LOCK(); 1822#endif 1823 pfsync_sendout(); 1824#ifdef __FreeBSD__ 1825 PF_UNLOCK(); 1826#endif 1827 splx(s); 1828 } 1829 ifp->if_mtu = ifr->ifr_mtu; 1830 break; 1831 case SIOCGETPFSYNC: 1832 bzero(&pfsyncr, sizeof(pfsyncr)); 1833 if (sc->sc_sync_if) { 1834 strlcpy(pfsyncr.pfsyncr_syncdev, 1835 sc->sc_sync_if->if_xname, IFNAMSIZ); 1836 } 1837 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer; 1838 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates; 1839 return (copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr))); 1840 1841 case SIOCSETPFSYNC: 1842#ifdef __FreeBSD__ 1843 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0) 1844#else 1845 if ((error = suser(p, p->p_acflag)) != 0) 1846#endif 1847 return (error); 1848 if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr)))) 1849 return (error); 1850 1851#ifdef __FreeBSD__ 1852 PF_LOCK(); 1853#endif 1854 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0) 1855#ifdef __FreeBSD__ 1856 sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP); 1857#else 1858 sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP; 1859#endif 1860 else 1861 sc->sc_sync_peer.s_addr = 1862 pfsyncr.pfsyncr_syncpeer.s_addr; 1863 1864 if (pfsyncr.pfsyncr_maxupdates > 255) 1865#ifdef __FreeBSD__ 1866 { 1867 PF_UNLOCK(); 1868#endif 1869 return (EINVAL); 1870#ifdef __FreeBSD__ 1871 } 1872#endif 1873 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates; 1874 1875 if (pfsyncr.pfsyncr_syncdev[0] == 0) { 1876 sc->sc_sync_if = NULL; 1877#ifdef __FreeBSD__ 1878 PF_UNLOCK(); 1879#endif 1880 if (imo->imo_num_memberships > 0) { 1881 in_delmulti(imo->imo_membership[ 1882 --imo->imo_num_memberships]); 1883 imo->imo_multicast_ifp = NULL; 1884 } 1885 break; 1886 } 1887 1888#ifdef __FreeBSD__ 1889 PF_UNLOCK(); 1890#endif 1891 if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL) 1892 return (EINVAL); 1893 1894#ifdef __FreeBSD__ 1895 PF_LOCK(); 1896#endif 1897 s = splnet(); 1898#ifdef __FreeBSD__ 1899 if (sifp->if_mtu < sc->sc_ifp->if_mtu || 1900#else 1901 if (sifp->if_mtu < sc->sc_if.if_mtu || 1902#endif 1903 (sc->sc_sync_if != NULL && 1904 sifp->if_mtu < sc->sc_sync_if->if_mtu) || 1905 sifp->if_mtu < MCLBYTES - sizeof(struct ip)) 1906 pfsync_sendout(); 1907 sc->sc_sync_if = sifp; 1908 1909 if (imo->imo_num_memberships > 0) { 1910#ifdef __FreeBSD__ 1911 PF_UNLOCK(); 1912#endif 1913 in_delmulti(imo->imo_membership[--imo->imo_num_memberships]); 1914#ifdef __FreeBSD__ 1915 PF_LOCK(); 1916#endif 1917 imo->imo_multicast_ifp = NULL; 1918 } 1919 1920 if (sc->sc_sync_if && 1921#ifdef __FreeBSD__ 1922 sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) { 1923#else 1924 sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) { 1925#endif 1926 struct in_addr addr; 1927 1928 if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) { 1929 sc->sc_sync_if = NULL; 1930#ifdef __FreeBSD__ 1931 PF_UNLOCK(); 1932#endif 1933 splx(s); 1934 return (EADDRNOTAVAIL); 1935 } 1936 1937#ifdef __FreeBSD__ 1938 addr.s_addr = htonl(INADDR_PFSYNC_GROUP); 1939#else 1940 addr.s_addr = INADDR_PFSYNC_GROUP; 1941#endif 1942 1943#ifdef __FreeBSD__ 1944 PF_UNLOCK(); 1945#endif 1946 if ((imo->imo_membership[0] = 1947 in_addmulti(&addr, sc->sc_sync_if)) == NULL) { 1948 sc->sc_sync_if = NULL; 1949 splx(s); 1950 return (ENOBUFS); 1951 } 1952#ifdef __FreeBSD__ 1953 PF_LOCK(); 1954#endif 1955 imo->imo_num_memberships++; 1956 imo->imo_multicast_ifp = sc->sc_sync_if; 1957 imo->imo_multicast_ttl = PFSYNC_DFLTTL; 1958 imo->imo_multicast_loop = 0; 1959 } 1960 1961 ip = &sc->sc_template; 1962 bzero(ip, sizeof(*ip)); 1963 ip->ip_v = IPVERSION; 1964 ip->ip_hl = sizeof(sc->sc_template) >> 2; 1965 ip->ip_tos = IPTOS_LOWDELAY; 1966 /* len and id are set later */ 1967#ifdef __FreeBSD__ 1968 ip->ip_off = IP_DF; 1969#else 1970 ip->ip_off = htons(IP_DF); 1971#endif 1972 ip->ip_ttl = PFSYNC_DFLTTL; 1973 ip->ip_p = IPPROTO_PFSYNC; 1974 ip->ip_src.s_addr = INADDR_ANY; 1975 ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr; 1976 1977 if (sc->sc_sync_if) { 1978 /* Request a full state table update. */ 1979 sc->sc_ureq_sent = time_uptime; 1980#ifdef __FreeBSD__ 1981 if (sc->pfsync_sync_ok && carp_demote_adj_p) 1982 (*carp_demote_adj_p)(V_pfsync_carp_adj, 1983 "pfsync bulk start"); 1984 sc->pfsync_sync_ok = 0; 1985#else 1986#if NCARP > 0 1987 if (pfsync_sync_ok) 1988 carp_group_demote_adj(&sc->sc_if, 1); 1989#endif 1990 pfsync_sync_ok = 0; 1991#endif 1992#ifdef __FreeBSD__ 1993 if (V_pf_status.debug >= PF_DEBUG_MISC) 1994#else 1995 if (pf_status.debug >= PF_DEBUG_MISC) 1996#endif 1997 printf("pfsync: requesting bulk update\n"); 1998#ifdef __FreeBSD__ 1999 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, 2000 pfsync_bulk_fail, V_pfsyncif); 2001#else 2002 timeout_add_sec(&sc->sc_bulkfail_tmo, 5); 2003#endif 2004 pfsync_request_update(0, 0); 2005 } 2006#ifdef __FreeBSD__ 2007 PF_UNLOCK(); 2008#endif 2009 splx(s); 2010 2011 break; 2012 2013 default: 2014 return (ENOTTY); 2015 } 2016 2017 return (0); 2018} 2019 2020int 2021pfsync_out_state(struct pf_state *st, struct mbuf *m, int offset) 2022{ 2023 struct pfsync_state *sp = (struct pfsync_state *)(m->m_data + offset); 2024 2025 pfsync_state_export(sp, st); 2026 2027 return (sizeof(*sp)); 2028} 2029 2030int 2031pfsync_out_iack(struct pf_state *st, struct mbuf *m, int offset) 2032{ 2033 struct pfsync_ins_ack *iack = 2034 (struct pfsync_ins_ack *)(m->m_data + offset); 2035 2036 iack->id = st->id; 2037 iack->creatorid = st->creatorid; 2038 2039 return (sizeof(*iack)); 2040} 2041 2042int 2043pfsync_out_upd_c(struct pf_state *st, struct mbuf *m, int offset) 2044{ 2045 struct pfsync_upd_c *up = (struct pfsync_upd_c *)(m->m_data + offset); 2046 2047 up->id = st->id; 2048 pf_state_peer_hton(&st->src, &up->src); 2049 pf_state_peer_hton(&st->dst, &up->dst); 2050 up->creatorid = st->creatorid; 2051 2052 up->expire = pf_state_expires(st); 2053 if (up->expire <= time_second) 2054 up->expire = htonl(0); 2055 else 2056 up->expire = htonl(up->expire - time_second); 2057 up->timeout = st->timeout; 2058 2059 bzero(up->_pad, sizeof(up->_pad)); /* XXX */ 2060 2061 return (sizeof(*up)); 2062} 2063 2064int 2065pfsync_out_del(struct pf_state *st, struct mbuf *m, int offset) 2066{ 2067 struct pfsync_del_c *dp = (struct pfsync_del_c *)(m->m_data + offset); 2068 2069 dp->id = st->id; 2070 dp->creatorid = st->creatorid; 2071 2072 SET(st->state_flags, PFSTATE_NOSYNC); 2073 2074 return (sizeof(*dp)); 2075} 2076 2077void 2078pfsync_drop(struct pfsync_softc *sc) 2079{ 2080 struct pf_state *st; 2081 struct pfsync_upd_req_item *ur; 2082#ifdef notyet 2083 struct tdb *t; 2084#endif 2085 int q; 2086 2087 for (q = 0; q < PFSYNC_S_COUNT; q++) { 2088 if (TAILQ_EMPTY(&sc->sc_qs[q])) 2089 continue; 2090 2091 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) { 2092#ifdef PFSYNC_DEBUG 2093#ifdef __FreeBSD__ 2094 KASSERT(st->sync_state == q, 2095 ("%s: st->sync_state == q", 2096 __FUNCTION__)); 2097#else 2098 KASSERT(st->sync_state == q); 2099#endif 2100#endif 2101 st->sync_state = PFSYNC_S_NONE; 2102 } 2103 TAILQ_INIT(&sc->sc_qs[q]); 2104 } 2105 2106 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) { 2107 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry); 2108 pool_put(&sc->sc_pool, ur); 2109 } 2110 2111 sc->sc_plus = NULL; 2112 2113#ifdef notyet 2114 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) { 2115 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry) 2116 CLR(t->tdb_flags, TDBF_PFSYNC); 2117 2118 TAILQ_INIT(&sc->sc_tdb_q); 2119 } 2120#endif 2121 2122 sc->sc_len = PFSYNC_MINPKT; 2123} 2124 2125void 2126pfsync_sendout(void) 2127{ 2128#ifdef __FreeBSD__ 2129 struct pfsync_softc *sc = V_pfsyncif; 2130#else 2131 struct pfsync_softc *sc = pfsyncif; 2132#endif 2133#if NBPFILTER > 0 2134#ifdef __FreeBSD__ 2135 struct ifnet *ifp = sc->sc_ifp; 2136#else 2137 struct ifnet *ifp = &sc->sc_if; 2138#endif 2139#endif 2140 struct mbuf *m; 2141 struct ip *ip; 2142 struct pfsync_header *ph; 2143 struct pfsync_subheader *subh; 2144 struct pf_state *st; 2145 struct pfsync_upd_req_item *ur; 2146#ifdef notyet 2147 struct tdb *t; 2148#endif 2149#ifdef __FreeBSD__ 2150 size_t pktlen; 2151 int dummy_error; 2152#endif 2153 int offset; 2154 int q, count = 0; 2155 2156#ifdef __FreeBSD__ 2157 PF_LOCK_ASSERT(); 2158#else 2159 splassert(IPL_NET); 2160#endif 2161 2162 if (sc == NULL || sc->sc_len == PFSYNC_MINPKT) 2163 return; 2164 2165#if NBPFILTER > 0 2166 if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) { 2167#else 2168 if (sc->sc_sync_if == NULL) { 2169#endif 2170 pfsync_drop(sc); 2171 return; 2172 } 2173 2174 MGETHDR(m, M_DONTWAIT, MT_DATA); 2175 if (m == NULL) { 2176#ifdef __FreeBSD__ 2177 sc->sc_ifp->if_oerrors++; 2178#else 2179 sc->sc_if.if_oerrors++; 2180#endif 2181 V_pfsyncstats.pfsyncs_onomem++; 2182 pfsync_drop(sc); 2183 return; 2184 } 2185 2186#ifdef __FreeBSD__ 2187 pktlen = max_linkhdr + sc->sc_len; 2188 if (pktlen > MHLEN) { 2189 /* Find the right pool to allocate from. */ 2190 /* XXX: This is ugly. */ 2191 m_cljget(m, M_DONTWAIT, pktlen <= MSIZE ? MSIZE : 2192 pktlen <= MCLBYTES ? MCLBYTES : 2193#if MJUMPAGESIZE != MCLBYTES 2194 pktlen <= MJUMPAGESIZE ? MJUMPAGESIZE : 2195#endif 2196 pktlen <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES); 2197#else 2198 if (max_linkhdr + sc->sc_len > MHLEN) { 2199 MCLGETI(m, M_DONTWAIT, NULL, max_linkhdr + sc->sc_len); 2200#endif 2201 if (!ISSET(m->m_flags, M_EXT)) { 2202 m_free(m); 2203#ifdef __FreeBSD__ 2204 sc->sc_ifp->if_oerrors++; 2205#else 2206 sc->sc_if.if_oerrors++; 2207#endif 2208 V_pfsyncstats.pfsyncs_onomem++; 2209 pfsync_drop(sc); 2210 return; 2211 } 2212 } 2213 m->m_data += max_linkhdr; 2214 m->m_len = m->m_pkthdr.len = sc->sc_len; 2215 2216 /* build the ip header */ 2217 ip = (struct ip *)m->m_data; 2218 bcopy(&sc->sc_template, ip, sizeof(*ip)); 2219 offset = sizeof(*ip); 2220 2221#ifdef __FreeBSD__ 2222 ip->ip_len = m->m_pkthdr.len; 2223#else 2224 ip->ip_len = htons(m->m_pkthdr.len); 2225#endif 2226 ip->ip_id = htons(ip_randomid()); 2227 2228 /* build the pfsync header */ 2229 ph = (struct pfsync_header *)(m->m_data + offset); 2230 bzero(ph, sizeof(*ph)); 2231 offset += sizeof(*ph); 2232 2233 ph->version = PFSYNC_VERSION; 2234 ph->len = htons(sc->sc_len - sizeof(*ip)); 2235#ifdef __FreeBSD__ 2236 bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH); 2237#else 2238 bcopy(pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH); 2239#endif 2240 2241 /* walk the queues */ 2242 for (q = 0; q < PFSYNC_S_COUNT; q++) { 2243 if (TAILQ_EMPTY(&sc->sc_qs[q])) 2244 continue; 2245 2246 subh = (struct pfsync_subheader *)(m->m_data + offset); 2247 offset += sizeof(*subh); 2248 2249 count = 0; 2250 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) { 2251#ifdef PFSYNC_DEBUG 2252#ifdef __FreeBSD__ 2253 KASSERT(st->sync_state == q, 2254 ("%s: st->sync_state == q", 2255 __FUNCTION__)); 2256#else 2257 KASSERT(st->sync_state == q); 2258#endif 2259#endif 2260 2261 offset += pfsync_qs[q].write(st, m, offset); 2262 st->sync_state = PFSYNC_S_NONE; 2263 count++; 2264 } 2265 TAILQ_INIT(&sc->sc_qs[q]); 2266 2267 bzero(subh, sizeof(*subh)); 2268 subh->action = pfsync_qs[q].action; 2269 subh->count = htons(count); 2270 } 2271 2272 if (!TAILQ_EMPTY(&sc->sc_upd_req_list)) { 2273 subh = (struct pfsync_subheader *)(m->m_data + offset); 2274 offset += sizeof(*subh); 2275 2276 count = 0; 2277 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) { 2278 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry); 2279 2280 bcopy(&ur->ur_msg, m->m_data + offset, 2281 sizeof(ur->ur_msg)); 2282 offset += sizeof(ur->ur_msg); 2283 2284 pool_put(&sc->sc_pool, ur); 2285 2286 count++; 2287 } 2288 2289 bzero(subh, sizeof(*subh)); 2290 subh->action = PFSYNC_ACT_UPD_REQ; 2291 subh->count = htons(count); 2292 } 2293 2294 /* has someone built a custom region for us to add? */ 2295 if (sc->sc_plus != NULL) { 2296 bcopy(sc->sc_plus, m->m_data + offset, sc->sc_pluslen); 2297 offset += sc->sc_pluslen; 2298 2299 sc->sc_plus = NULL; 2300 } 2301 2302#ifdef notyet 2303 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) { 2304 subh = (struct pfsync_subheader *)(m->m_data + offset); 2305 offset += sizeof(*subh); 2306 2307 count = 0; 2308 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry) { 2309 offset += pfsync_out_tdb(t, m, offset); 2310 CLR(t->tdb_flags, TDBF_PFSYNC); 2311 2312 count++; 2313 } 2314 TAILQ_INIT(&sc->sc_tdb_q); 2315 2316 bzero(subh, sizeof(*subh)); 2317 subh->action = PFSYNC_ACT_TDB; 2318 subh->count = htons(count); 2319 } 2320#endif 2321 2322 subh = (struct pfsync_subheader *)(m->m_data + offset); 2323 offset += sizeof(*subh); 2324 2325 bzero(subh, sizeof(*subh)); 2326 subh->action = PFSYNC_ACT_EOF; 2327 subh->count = htons(1); 2328 2329 /* XXX write checksum in EOF here */ 2330 2331 /* we're done, let's put it on the wire */ 2332#if NBPFILTER > 0 2333 if (ifp->if_bpf) { 2334 m->m_data += sizeof(*ip); 2335 m->m_len = m->m_pkthdr.len = sc->sc_len - sizeof(*ip); 2336#ifdef __FreeBSD__ 2337 BPF_MTAP(ifp, m); 2338#else 2339 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 2340#endif 2341 m->m_data -= sizeof(*ip); 2342 m->m_len = m->m_pkthdr.len = sc->sc_len; 2343 } 2344 2345 if (sc->sc_sync_if == NULL) { 2346 sc->sc_len = PFSYNC_MINPKT; 2347 m_freem(m); 2348 return; 2349 } 2350#endif 2351 2352#ifdef __FreeBSD__ 2353 sc->sc_ifp->if_opackets++; 2354 sc->sc_ifp->if_obytes += m->m_pkthdr.len; 2355 sc->sc_len = PFSYNC_MINPKT; 2356 2357 IFQ_ENQUEUE(&sc->sc_ifp->if_snd, m, dummy_error); 2358 schednetisr(NETISR_PFSYNC); 2359#else 2360 sc->sc_if.if_opackets++; 2361 sc->sc_if.if_obytes += m->m_pkthdr.len; 2362 2363 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL) == 0) 2364 pfsyncstats.pfsyncs_opackets++; 2365 else 2366 pfsyncstats.pfsyncs_oerrors++; 2367 2368 /* start again */ 2369 sc->sc_len = PFSYNC_MINPKT; 2370#endif 2371} 2372 2373void 2374pfsync_insert_state(struct pf_state *st) 2375{ 2376#ifdef __FreeBSD__ 2377 struct pfsync_softc *sc = V_pfsyncif; 2378#else 2379 struct pfsync_softc *sc = pfsyncif; 2380#endif 2381 2382#ifdef __FreeBSD__ 2383 PF_LOCK_ASSERT(); 2384#else 2385 splassert(IPL_SOFTNET); 2386#endif 2387 2388 if (ISSET(st->rule.ptr->rule_flag, PFRULE_NOSYNC) || 2389 st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) { 2390 SET(st->state_flags, PFSTATE_NOSYNC); 2391 return; 2392 } 2393 2394 if (sc == NULL || ISSET(st->state_flags, PFSTATE_NOSYNC)) 2395 return; 2396 2397#ifdef PFSYNC_DEBUG 2398#ifdef __FreeBSD__ 2399 KASSERT(st->sync_state == PFSYNC_S_NONE, 2400 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__)); 2401#else 2402 KASSERT(st->sync_state == PFSYNC_S_NONE); 2403#endif 2404#endif 2405 2406 if (sc->sc_len == PFSYNC_MINPKT) 2407#ifdef __FreeBSD__ 2408 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, 2409 V_pfsyncif); 2410#else 2411 timeout_add_sec(&sc->sc_tmo, 1); 2412#endif 2413 2414 pfsync_q_ins(st, PFSYNC_S_INS); 2415 2416 if (ISSET(st->state_flags, PFSTATE_ACK)) 2417#ifdef __FreeBSD__ 2418 pfsync_sendout(); 2419#else 2420 schednetisr(NETISR_PFSYNC); 2421#endif 2422 else 2423 st->sync_updates = 0; 2424} 2425 2426int defer = 10; 2427 2428int 2429pfsync_defer(struct pf_state *st, struct mbuf *m) 2430{ 2431#ifdef __FreeBSD__ 2432 struct pfsync_softc *sc = V_pfsyncif; 2433#else 2434 struct pfsync_softc *sc = pfsyncif; 2435#endif 2436 struct pfsync_deferral *pd; 2437 2438#ifdef __FreeBSD__ 2439 PF_LOCK_ASSERT(); 2440#else 2441 splassert(IPL_SOFTNET); 2442#endif 2443 2444 if (sc->sc_deferred >= 128) 2445 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0); 2446 2447 pd = pool_get(&sc->sc_pool, M_NOWAIT); 2448 if (pd == NULL) 2449 return (0); 2450 sc->sc_deferred++; 2451 2452#ifdef __FreeBSD__ 2453 m->m_flags |= M_SKIP_FIREWALL; 2454#else 2455 m->m_pkthdr.pf.flags |= PF_TAG_GENERATED; 2456#endif 2457 SET(st->state_flags, PFSTATE_ACK); 2458 2459 pd->pd_st = st; 2460 pd->pd_m = m; 2461 2462 TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry); 2463#ifdef __FreeBSD__ 2464 callout_init(&pd->pd_tmo, CALLOUT_MPSAFE); 2465 callout_reset(&pd->pd_tmo, defer, pfsync_defer_tmo, 2466 pd); 2467#else 2468 timeout_set(&pd->pd_tmo, pfsync_defer_tmo, pd); 2469 timeout_add(&pd->pd_tmo, defer); 2470#endif 2471 2472 return (1); 2473} 2474 2475void 2476pfsync_undefer(struct pfsync_deferral *pd, int drop) 2477{ 2478#ifdef __FreeBSD__ 2479 struct pfsync_softc *sc = V_pfsyncif; 2480#else 2481 struct pfsync_softc *sc = pfsyncif; 2482#endif 2483 int s; 2484 2485#ifdef __FreeBSD__ 2486 PF_LOCK_ASSERT(); 2487#else 2488 splassert(IPL_SOFTNET); 2489#endif 2490 2491 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry); 2492 sc->sc_deferred--; 2493 2494 CLR(pd->pd_st->state_flags, PFSTATE_ACK); 2495 timeout_del(&pd->pd_tmo); /* bah */ 2496 if (drop) 2497 m_freem(pd->pd_m); 2498 else { 2499 s = splnet(); 2500#ifdef __FreeBSD__ 2501 /* XXX: use pf_defered?! */ 2502 PF_UNLOCK(); 2503#endif 2504 ip_output(pd->pd_m, (void *)NULL, (void *)NULL, 0, 2505 (void *)NULL, (void *)NULL); 2506#ifdef __FreeBSD__ 2507 PF_LOCK(); 2508#endif 2509 splx(s); 2510 } 2511 2512 pool_put(&sc->sc_pool, pd); 2513} 2514 2515void 2516pfsync_defer_tmo(void *arg) 2517{ 2518#if defined(__FreeBSD__) && defined(VIMAGE) 2519 struct pfsync_deferral *pd = arg; 2520#endif 2521 int s; 2522 2523 s = splsoftnet(); 2524#ifdef __FreeBSD__ 2525 CURVNET_SET(pd->pd_m->m_pkthdr.rcvif->if_vnet); /* XXX */ 2526 PF_LOCK(); 2527#endif 2528 pfsync_undefer(arg, 0); 2529#ifdef __FreeBSD__ 2530 PF_UNLOCK(); 2531 CURVNET_RESTORE(); 2532#endif 2533 splx(s); 2534} 2535 2536void 2537pfsync_deferred(struct pf_state *st, int drop) 2538{ 2539#ifdef __FreeBSD__ 2540 struct pfsync_softc *sc = V_pfsyncif; 2541#else 2542 struct pfsync_softc *sc = pfsyncif; 2543#endif 2544 struct pfsync_deferral *pd; 2545 2546 TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) { 2547 if (pd->pd_st == st) { 2548 pfsync_undefer(pd, drop); 2549 return; 2550 } 2551 } 2552 2553 panic("pfsync_send_deferred: unable to find deferred state"); 2554} 2555 2556u_int pfsync_upds = 0; 2557 2558void 2559pfsync_update_state(struct pf_state *st) 2560{ 2561#ifdef __FreeBSD__ 2562 struct pfsync_softc *sc = V_pfsyncif; 2563#else 2564 struct pfsync_softc *sc = pfsyncif; 2565#endif 2566 int sync = 0; 2567 2568#ifdef __FreeBSD__ 2569 PF_LOCK_ASSERT(); 2570#else 2571 splassert(IPL_SOFTNET); 2572#endif 2573 2574 if (sc == NULL) 2575 return; 2576 2577 if (ISSET(st->state_flags, PFSTATE_ACK)) 2578 pfsync_deferred(st, 0); 2579 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) { 2580 if (st->sync_state != PFSYNC_S_NONE) 2581 pfsync_q_del(st); 2582 return; 2583 } 2584 2585 if (sc->sc_len == PFSYNC_MINPKT) 2586#ifdef __FreeBSD__ 2587 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, 2588 V_pfsyncif); 2589#else 2590 timeout_add_sec(&sc->sc_tmo, 1); 2591#endif 2592 2593 switch (st->sync_state) { 2594 case PFSYNC_S_UPD_C: 2595 case PFSYNC_S_UPD: 2596 case PFSYNC_S_INS: 2597 /* we're already handling it */ 2598 2599 st->sync_updates++; 2600 if (st->sync_updates >= sc->sc_maxupdates) 2601 sync = 1; 2602 break; 2603 2604 case PFSYNC_S_IACK: 2605 pfsync_q_del(st); 2606 case PFSYNC_S_NONE: 2607 pfsync_q_ins(st, PFSYNC_S_UPD_C); 2608 st->sync_updates = 0; 2609 break; 2610 2611 default: 2612 panic("pfsync_update_state: unexpected sync state %d", 2613 st->sync_state); 2614 } 2615 2616 if (sync || (time_second - st->pfsync_time) < 2) { 2617 pfsync_upds++; 2618#ifdef __FreeBSD__ 2619 pfsync_sendout(); 2620#else 2621 schednetisr(NETISR_PFSYNC); 2622#endif 2623 } 2624} 2625 2626void 2627pfsync_request_update(u_int32_t creatorid, u_int64_t id) 2628{ 2629#ifdef __FreeBSD__ 2630 struct pfsync_softc *sc = V_pfsyncif; 2631#else 2632 struct pfsync_softc *sc = pfsyncif; 2633#endif 2634 struct pfsync_upd_req_item *item; 2635 size_t nlen = sizeof(struct pfsync_upd_req); 2636 int s; 2637 2638 PF_LOCK_ASSERT(); 2639 2640 /* 2641 * this code does nothing to prevent multiple update requests for the 2642 * same state being generated. 2643 */ 2644 2645 item = pool_get(&sc->sc_pool, PR_NOWAIT); 2646 if (item == NULL) { 2647 /* XXX stats */ 2648 return; 2649 } 2650 2651 item->ur_msg.id = id; 2652 item->ur_msg.creatorid = creatorid; 2653 2654 if (TAILQ_EMPTY(&sc->sc_upd_req_list)) 2655 nlen += sizeof(struct pfsync_subheader); 2656 2657#ifdef __FreeBSD__ 2658 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) { 2659#else 2660 if (sc->sc_len + nlen > sc->sc_if.if_mtu) { 2661#endif 2662 s = splnet(); 2663 pfsync_sendout(); 2664 splx(s); 2665 2666 nlen = sizeof(struct pfsync_subheader) + 2667 sizeof(struct pfsync_upd_req); 2668 } 2669 2670 TAILQ_INSERT_TAIL(&sc->sc_upd_req_list, item, ur_entry); 2671 sc->sc_len += nlen; 2672 2673#ifdef __FreeBSD__ 2674 pfsync_sendout(); 2675#else 2676 schednetisr(NETISR_PFSYNC); 2677#endif 2678} 2679 2680void 2681pfsync_update_state_req(struct pf_state *st) 2682{ 2683#ifdef __FreeBSD__ 2684 struct pfsync_softc *sc = V_pfsyncif; 2685#else 2686 struct pfsync_softc *sc = pfsyncif; 2687#endif 2688 2689 PF_LOCK_ASSERT(); 2690 2691 if (sc == NULL) 2692 panic("pfsync_update_state_req: nonexistant instance"); 2693 2694 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) { 2695 if (st->sync_state != PFSYNC_S_NONE) 2696 pfsync_q_del(st); 2697 return; 2698 } 2699 2700 switch (st->sync_state) { 2701 case PFSYNC_S_UPD_C: 2702 case PFSYNC_S_IACK: 2703 pfsync_q_del(st); 2704 case PFSYNC_S_NONE: 2705 pfsync_q_ins(st, PFSYNC_S_UPD); 2706#ifdef __FreeBSD__ 2707 pfsync_sendout(); 2708#else 2709 schednetisr(NETISR_PFSYNC); 2710#endif 2711 return; 2712 2713 case PFSYNC_S_INS: 2714 case PFSYNC_S_UPD: 2715 case PFSYNC_S_DEL: 2716 /* we're already handling it */ 2717 return; 2718 2719 default: 2720 panic("pfsync_update_state_req: unexpected sync state %d", 2721 st->sync_state); 2722 } 2723} 2724 2725void 2726pfsync_delete_state(struct pf_state *st) 2727{ 2728#ifdef __FreeBSD__ 2729 struct pfsync_softc *sc = V_pfsyncif; 2730#else 2731 struct pfsync_softc *sc = pfsyncif; 2732#endif 2733 2734#ifdef __FreeBSD__ 2735 PF_LOCK_ASSERT(); 2736#else 2737 splassert(IPL_SOFTNET); 2738#endif 2739 2740 if (sc == NULL) 2741 return; 2742 2743 if (ISSET(st->state_flags, PFSTATE_ACK)) 2744 pfsync_deferred(st, 1); 2745 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) { 2746 if (st->sync_state != PFSYNC_S_NONE) 2747 pfsync_q_del(st); 2748 return; 2749 } 2750 2751 if (sc->sc_len == PFSYNC_MINPKT) 2752#ifdef __FreeBSD__ 2753 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, 2754 V_pfsyncif); 2755#else 2756 timeout_add_sec(&sc->sc_tmo, 1); 2757#endif 2758 2759 switch (st->sync_state) { 2760 case PFSYNC_S_INS: 2761 /* we never got to tell the world so just forget about it */ 2762 pfsync_q_del(st); 2763 return; 2764 2765 case PFSYNC_S_UPD_C: 2766 case PFSYNC_S_UPD: 2767 case PFSYNC_S_IACK: 2768 pfsync_q_del(st); 2769 /* FALLTHROUGH to putting it on the del list */ 2770 2771 case PFSYNC_S_NONE: 2772 pfsync_q_ins(st, PFSYNC_S_DEL); 2773 return; 2774 2775 default: 2776 panic("pfsync_delete_state: unexpected sync state %d", 2777 st->sync_state); 2778 } 2779} 2780 2781void 2782pfsync_clear_states(u_int32_t creatorid, const char *ifname) 2783{ 2784 struct { 2785 struct pfsync_subheader subh; 2786 struct pfsync_clr clr; 2787 } __packed r; 2788 2789#ifdef __FreeBSD__ 2790 struct pfsync_softc *sc = V_pfsyncif; 2791#else 2792 struct pfsync_softc *sc = pfsyncif; 2793#endif 2794 2795#ifdef __FreeBSD__ 2796 PF_LOCK_ASSERT(); 2797#else 2798 splassert(IPL_SOFTNET); 2799#endif 2800 2801 if (sc == NULL) 2802 return; 2803 2804 bzero(&r, sizeof(r)); 2805 2806 r.subh.action = PFSYNC_ACT_CLR; 2807 r.subh.count = htons(1); 2808 2809 strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname)); 2810 r.clr.creatorid = creatorid; 2811 2812 pfsync_send_plus(&r, sizeof(r)); 2813} 2814 2815void 2816pfsync_q_ins(struct pf_state *st, int q) 2817{ 2818#ifdef __FreeBSD__ 2819 struct pfsync_softc *sc = V_pfsyncif; 2820#else 2821 struct pfsync_softc *sc = pfsyncif; 2822#endif 2823 size_t nlen = pfsync_qs[q].len; 2824 int s; 2825 2826 PF_LOCK_ASSERT(); 2827 2828#ifdef __FreeBSD__ 2829 KASSERT(st->sync_state == PFSYNC_S_NONE, 2830 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__)); 2831#else 2832 KASSERT(st->sync_state == PFSYNC_S_NONE); 2833#endif 2834 2835#if 1 || defined(PFSYNC_DEBUG) 2836 if (sc->sc_len < PFSYNC_MINPKT) 2837#ifdef __FreeBSD__ 2838 panic("pfsync pkt len is too low %zu", sc->sc_len); 2839#else 2840 panic("pfsync pkt len is too low %d", sc->sc_len); 2841#endif 2842#endif 2843 if (TAILQ_EMPTY(&sc->sc_qs[q])) 2844 nlen += sizeof(struct pfsync_subheader); 2845 2846#ifdef __FreeBSD__ 2847 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) { 2848#else 2849 if (sc->sc_len + nlen > sc->sc_if.if_mtu) { 2850#endif 2851 s = splnet(); 2852 pfsync_sendout(); 2853 splx(s); 2854 2855 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len; 2856 } 2857 2858 sc->sc_len += nlen; 2859 TAILQ_INSERT_TAIL(&sc->sc_qs[q], st, sync_list); 2860 st->sync_state = q; 2861} 2862 2863void 2864pfsync_q_del(struct pf_state *st) 2865{ 2866#ifdef __FreeBSD__ 2867 struct pfsync_softc *sc = V_pfsyncif; 2868#else 2869 struct pfsync_softc *sc = pfsyncif; 2870#endif 2871 int q = st->sync_state; 2872 2873#ifdef __FreeBSD__ 2874 KASSERT(st->sync_state != PFSYNC_S_NONE, 2875 ("%s: st->sync_state != PFSYNC_S_NONE", __FUNCTION__)); 2876#else 2877 KASSERT(st->sync_state != PFSYNC_S_NONE); 2878#endif 2879 2880 sc->sc_len -= pfsync_qs[q].len; 2881 TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list); 2882 st->sync_state = PFSYNC_S_NONE; 2883 2884 if (TAILQ_EMPTY(&sc->sc_qs[q])) 2885 sc->sc_len -= sizeof(struct pfsync_subheader); 2886} 2887 2888#ifdef notyet 2889void 2890pfsync_update_tdb(struct tdb *t, int output) 2891{ 2892#ifdef __FreeBSD__ 2893 struct pfsync_softc *sc = V_pfsyncif; 2894#else 2895 struct pfsync_softc *sc = pfsyncif; 2896#endif 2897 size_t nlen = sizeof(struct pfsync_tdb); 2898 int s; 2899 2900 if (sc == NULL) 2901 return; 2902 2903 if (!ISSET(t->tdb_flags, TDBF_PFSYNC)) { 2904 if (TAILQ_EMPTY(&sc->sc_tdb_q)) 2905 nlen += sizeof(struct pfsync_subheader); 2906 2907 if (sc->sc_len + nlen > sc->sc_if.if_mtu) { 2908 s = splnet(); 2909 PF_LOCK(); 2910 pfsync_sendout(); 2911 PF_UNLOCK(); 2912 splx(s); 2913 2914 nlen = sizeof(struct pfsync_subheader) + 2915 sizeof(struct pfsync_tdb); 2916 } 2917 2918 sc->sc_len += nlen; 2919 TAILQ_INSERT_TAIL(&sc->sc_tdb_q, t, tdb_sync_entry); 2920 SET(t->tdb_flags, TDBF_PFSYNC); 2921 t->tdb_updates = 0; 2922 } else { 2923 if (++t->tdb_updates >= sc->sc_maxupdates) 2924 schednetisr(NETISR_PFSYNC); 2925 } 2926 2927 if (output) 2928 SET(t->tdb_flags, TDBF_PFSYNC_RPL); 2929 else 2930 CLR(t->tdb_flags, TDBF_PFSYNC_RPL); 2931} 2932 2933void 2934pfsync_delete_tdb(struct tdb *t) 2935{ 2936#ifdef __FreeBSD__ 2937 struct pfsync_softc *sc = V_pfsyncif; 2938#else 2939 struct pfsync_softc *sc = pfsyncif; 2940#endif 2941 2942 if (sc == NULL || !ISSET(t->tdb_flags, TDBF_PFSYNC)) 2943 return; 2944 2945 sc->sc_len -= sizeof(struct pfsync_tdb); 2946 TAILQ_REMOVE(&sc->sc_tdb_q, t, tdb_sync_entry); 2947 CLR(t->tdb_flags, TDBF_PFSYNC); 2948 2949 if (TAILQ_EMPTY(&sc->sc_tdb_q)) 2950 sc->sc_len -= sizeof(struct pfsync_subheader); 2951} 2952 2953int 2954pfsync_out_tdb(struct tdb *t, struct mbuf *m, int offset) 2955{ 2956 struct pfsync_tdb *ut = (struct pfsync_tdb *)(m->m_data + offset); 2957 2958 bzero(ut, sizeof(*ut)); 2959 ut->spi = t->tdb_spi; 2960 bcopy(&t->tdb_dst, &ut->dst, sizeof(ut->dst)); 2961 /* 2962 * When a failover happens, the master's rpl is probably above 2963 * what we see here (we may be up to a second late), so 2964 * increase it a bit for outbound tdbs to manage most such 2965 * situations. 2966 * 2967 * For now, just add an offset that is likely to be larger 2968 * than the number of packets we can see in one second. The RFC 2969 * just says the next packet must have a higher seq value. 2970 * 2971 * XXX What is a good algorithm for this? We could use 2972 * a rate-determined increase, but to know it, we would have 2973 * to extend struct tdb. 2974 * XXX pt->rpl can wrap over MAXINT, but if so the real tdb 2975 * will soon be replaced anyway. For now, just don't handle 2976 * this edge case. 2977 */ 2978#define RPL_INCR 16384 2979 ut->rpl = htonl(t->tdb_rpl + (ISSET(t->tdb_flags, TDBF_PFSYNC_RPL) ? 2980 RPL_INCR : 0)); 2981 ut->cur_bytes = htobe64(t->tdb_cur_bytes); 2982 ut->sproto = t->tdb_sproto; 2983 2984 return (sizeof(*ut)); 2985} 2986#endif 2987 2988void 2989pfsync_bulk_start(void) 2990{ 2991#ifdef __FreeBSD__ 2992 struct pfsync_softc *sc = V_pfsyncif; 2993#else 2994 struct pfsync_softc *sc = pfsyncif; 2995#endif 2996 2997#ifdef __FreeBSD__ 2998 if (V_pf_status.debug >= PF_DEBUG_MISC) 2999#else 3000 if (pf_status.debug >= PF_DEBUG_MISC) 3001#endif 3002 printf("pfsync: received bulk update request\n"); 3003 3004#ifdef __FreeBSD__ 3005 PF_LOCK(); 3006 if (TAILQ_EMPTY(&V_state_list)) 3007#else 3008 if (TAILQ_EMPTY(&state_list)) 3009#endif 3010 pfsync_bulk_status(PFSYNC_BUS_END); 3011 else { 3012 sc->sc_ureq_received = time_uptime; 3013 if (sc->sc_bulk_next == NULL) 3014#ifdef __FreeBSD__ 3015 sc->sc_bulk_next = TAILQ_FIRST(&V_state_list); 3016#else 3017 sc->sc_bulk_next = TAILQ_FIRST(&state_list); 3018#endif 3019 sc->sc_bulk_last = sc->sc_bulk_next; 3020 3021 pfsync_bulk_status(PFSYNC_BUS_START); 3022 callout_reset(&sc->sc_bulk_tmo, 1, 3023 pfsync_bulk_update, sc); 3024 } 3025#ifdef __FreeBSD__ 3026 PF_UNLOCK(); 3027#endif 3028} 3029 3030void 3031pfsync_bulk_update(void *arg) 3032{ 3033 struct pfsync_softc *sc = arg; 3034 struct pf_state *st = sc->sc_bulk_next; 3035 int i = 0; 3036 int s; 3037 3038 PF_LOCK_ASSERT(); 3039 3040 s = splsoftnet(); 3041#ifdef __FreeBSD__ 3042 CURVNET_SET(sc->sc_ifp->if_vnet); 3043#endif 3044 for (;;) { 3045 if (st->sync_state == PFSYNC_S_NONE && 3046 st->timeout < PFTM_MAX && 3047 st->pfsync_time <= sc->sc_ureq_received) { 3048 pfsync_update_state_req(st); 3049 i++; 3050 } 3051 3052 st = TAILQ_NEXT(st, entry_list); 3053 if (st == NULL) 3054#ifdef __FreeBSD__ 3055 st = TAILQ_FIRST(&V_state_list); 3056#else 3057 st = TAILQ_FIRST(&state_list); 3058#endif 3059 3060 if (st == sc->sc_bulk_last) { 3061 /* we're done */ 3062 sc->sc_bulk_next = NULL; 3063 sc->sc_bulk_last = NULL; 3064 pfsync_bulk_status(PFSYNC_BUS_END); 3065 break; 3066 } 3067 3068#ifdef __FreeBSD__ 3069 if (i > 1 && (sc->sc_ifp->if_mtu - sc->sc_len) < 3070#else 3071 if (i > 1 && (sc->sc_if.if_mtu - sc->sc_len) < 3072#endif 3073 sizeof(struct pfsync_state)) { 3074 /* we've filled a packet */ 3075 sc->sc_bulk_next = st; 3076#ifdef __FreeBSD__ 3077 callout_reset(&sc->sc_bulk_tmo, 1, 3078 pfsync_bulk_update, sc); 3079#else 3080 timeout_add(&sc->sc_bulk_tmo, 1); 3081#endif 3082 break; 3083 } 3084 } 3085 3086#ifdef __FreeBSD__ 3087 CURVNET_RESTORE(); 3088#endif 3089 splx(s); 3090} 3091 3092void 3093pfsync_bulk_status(u_int8_t status) 3094{ 3095 struct { 3096 struct pfsync_subheader subh; 3097 struct pfsync_bus bus; 3098 } __packed r; 3099 3100#ifdef __FreeBSD__ 3101 struct pfsync_softc *sc = V_pfsyncif; 3102#else 3103 struct pfsync_softc *sc = pfsyncif; 3104#endif 3105 3106 PF_LOCK_ASSERT(); 3107 3108 bzero(&r, sizeof(r)); 3109 3110 r.subh.action = PFSYNC_ACT_BUS; 3111 r.subh.count = htons(1); 3112 3113#ifdef __FreeBSD__ 3114 r.bus.creatorid = V_pf_status.hostid; 3115#else 3116 r.bus.creatorid = pf_status.hostid; 3117#endif 3118 r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received); 3119 r.bus.status = status; 3120 3121 pfsync_send_plus(&r, sizeof(r)); 3122} 3123 3124void 3125pfsync_bulk_fail(void *arg) 3126{ 3127 struct pfsync_softc *sc = arg; 3128 3129#ifdef __FreeBSD__ 3130 CURVNET_SET(sc->sc_ifp->if_vnet); 3131#endif 3132 3133 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) { 3134 /* Try again */ 3135#ifdef __FreeBSD__ 3136 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, 3137 pfsync_bulk_fail, V_pfsyncif); 3138#else 3139 timeout_add_sec(&sc->sc_bulkfail_tmo, 5); 3140#endif 3141 PF_LOCK(); 3142 pfsync_request_update(0, 0); 3143 PF_UNLOCK(); 3144 } else { 3145 /* Pretend like the transfer was ok */ 3146 sc->sc_ureq_sent = 0; 3147 sc->sc_bulk_tries = 0; 3148#ifdef __FreeBSD__ 3149 if (!sc->pfsync_sync_ok && carp_demote_adj_p) 3150 (*carp_demote_adj_p)(-V_pfsync_carp_adj, 3151 "pfsync bulk fail"); 3152 sc->pfsync_sync_ok = 1; 3153#else 3154#if NCARP > 0 3155 if (!pfsync_sync_ok) 3156 carp_group_demote_adj(&sc->sc_if, -1); 3157#endif 3158 pfsync_sync_ok = 1; 3159#endif 3160#ifdef __FreeBSD__ 3161 if (V_pf_status.debug >= PF_DEBUG_MISC) 3162#else 3163 if (pf_status.debug >= PF_DEBUG_MISC) 3164#endif 3165 printf("pfsync: failed to receive bulk update\n"); 3166 } 3167 3168#ifdef __FreeBSD__ 3169 CURVNET_RESTORE(); 3170#endif 3171} 3172 3173void 3174pfsync_send_plus(void *plus, size_t pluslen) 3175{ 3176#ifdef __FreeBSD__ 3177 struct pfsync_softc *sc = V_pfsyncif; 3178#else 3179 struct pfsync_softc *sc = pfsyncif; 3180#endif 3181 int s; 3182 3183 PF_LOCK_ASSERT(); 3184 3185#ifdef __FreeBSD__ 3186 if (sc->sc_len + pluslen > sc->sc_ifp->if_mtu) { 3187#else 3188 if (sc->sc_len + pluslen > sc->sc_if.if_mtu) { 3189#endif 3190 s = splnet(); 3191 pfsync_sendout(); 3192 splx(s); 3193 } 3194 3195 sc->sc_plus = plus; 3196 sc->sc_len += (sc->sc_pluslen = pluslen); 3197 3198 s = splnet(); 3199 pfsync_sendout(); 3200 splx(s); 3201} 3202 3203int 3204pfsync_up(void) 3205{ 3206#ifdef __FreeBSD__ 3207 struct pfsync_softc *sc = V_pfsyncif; 3208#else 3209 struct pfsync_softc *sc = pfsyncif; 3210#endif 3211 3212#ifdef __FreeBSD__ 3213 if (sc == NULL || !ISSET(sc->sc_ifp->if_flags, IFF_DRV_RUNNING)) 3214#else 3215 if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING)) 3216#endif 3217 return (0); 3218 3219 return (1); 3220} 3221 3222int 3223pfsync_state_in_use(struct pf_state *st) 3224{ 3225#ifdef __FreeBSD__ 3226 struct pfsync_softc *sc = V_pfsyncif; 3227#else 3228 struct pfsync_softc *sc = pfsyncif; 3229#endif 3230 3231 if (sc == NULL) 3232 return (0); 3233 3234 if (st->sync_state != PFSYNC_S_NONE || 3235 st == sc->sc_bulk_next || 3236 st == sc->sc_bulk_last) 3237 return (1); 3238 3239 return (0); 3240} 3241 3242u_int pfsync_ints; 3243u_int pfsync_tmos; 3244 3245void 3246pfsync_timeout(void *arg) 3247{ 3248#if defined(__FreeBSD__) && defined(VIMAGE) 3249 struct pfsync_softc *sc = arg; 3250#endif 3251 int s; 3252 3253#ifdef __FreeBSD__ 3254 CURVNET_SET(sc->sc_ifp->if_vnet); 3255#endif 3256 3257 pfsync_tmos++; 3258 3259 s = splnet(); 3260#ifdef __FreeBSD__ 3261 PF_LOCK(); 3262#endif 3263 pfsync_sendout(); 3264#ifdef __FreeBSD__ 3265 PF_UNLOCK(); 3266#endif 3267 splx(s); 3268 3269#ifdef __FreeBSD__ 3270 CURVNET_RESTORE(); 3271#endif 3272} 3273 3274/* this is a softnet/netisr handler */ 3275void 3276#ifdef __FreeBSD__ 3277pfsyncintr(void *arg) 3278{ 3279 struct pfsync_softc *sc = arg; 3280 struct mbuf *m, *n; 3281 3282 CURVNET_SET(sc->sc_ifp->if_vnet); 3283 pfsync_ints++; 3284 3285 IF_DEQUEUE_ALL(&sc->sc_ifp->if_snd, m); 3286 3287 for (; m != NULL; m = n) { 3288 3289 n = m->m_nextpkt; 3290 m->m_nextpkt = NULL; 3291 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL) 3292 == 0) 3293 V_pfsyncstats.pfsyncs_opackets++; 3294 else 3295 V_pfsyncstats.pfsyncs_oerrors++; 3296 } 3297 CURVNET_RESTORE(); 3298} 3299#else 3300pfsyncintr(void) 3301{ 3302 int s; 3303 3304 pfsync_ints++; 3305 3306 s = splnet(); 3307 pfsync_sendout(); 3308 splx(s); 3309} 3310#endif 3311 3312int 3313pfsync_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 3314 size_t newlen) 3315{ 3316 3317#ifdef notyet 3318 /* All sysctl names at this level are terminal. */ 3319 if (namelen != 1) 3320 return (ENOTDIR); 3321 3322 switch (name[0]) { 3323 case PFSYNCCTL_STATS: 3324 if (newp != NULL) 3325 return (EPERM); 3326 return (sysctl_struct(oldp, oldlenp, newp, newlen, 3327 &V_pfsyncstats, sizeof(V_pfsyncstats))); 3328 } 3329#endif 3330 return (ENOPROTOOPT); 3331} 3332 3333#ifdef __FreeBSD__ 3334void 3335pfsync_ifdetach(void *arg, struct ifnet *ifp) 3336{ 3337 struct pfsync_softc *sc = (struct pfsync_softc *)arg; 3338 struct ip_moptions *imo; 3339 3340 if (sc == NULL || sc->sc_sync_if != ifp) 3341 return; /* not for us; unlocked read */ 3342 3343 CURVNET_SET(sc->sc_ifp->if_vnet); 3344 3345 PF_LOCK(); 3346 3347 /* Deal with a member interface going away from under us. */ 3348 sc->sc_sync_if = NULL; 3349 imo = &sc->sc_imo; 3350 if (imo->imo_num_memberships > 0) { 3351 KASSERT(imo->imo_num_memberships == 1, 3352 ("%s: imo_num_memberships != 1", __func__)); 3353 /* 3354 * Our event handler is always called after protocol 3355 * domains have been detached from the underlying ifnet. 3356 * Do not call in_delmulti(); we held a single reference 3357 * which the protocol domain has purged in in_purgemaddrs(). 3358 */ 3359 PF_UNLOCK(); 3360 imo->imo_membership[--imo->imo_num_memberships] = NULL; 3361 PF_LOCK(); 3362 imo->imo_multicast_ifp = NULL; 3363 } 3364 3365 PF_UNLOCK(); 3366 3367 CURVNET_RESTORE(); 3368} 3369 3370static int 3371vnet_pfsync_init(const void *unused) 3372{ 3373 int error = 0; 3374 3375 pfsyncattach(0); 3376 3377 error = swi_add(NULL, "pfsync", pfsyncintr, V_pfsyncif, 3378 SWI_NET, INTR_MPSAFE, &pfsync_swi.pfsync_swi_cookie); 3379 if (error) 3380 panic("%s: swi_add %d", __func__, error); 3381 3382 PF_LOCK(); 3383 pfsync_state_import_ptr = pfsync_state_import; 3384 pfsync_up_ptr = pfsync_up; 3385 pfsync_insert_state_ptr = pfsync_insert_state; 3386 pfsync_update_state_ptr = pfsync_update_state; 3387 pfsync_delete_state_ptr = pfsync_delete_state; 3388 pfsync_clear_states_ptr = pfsync_clear_states; 3389 pfsync_state_in_use_ptr = pfsync_state_in_use; 3390 pfsync_defer_ptr = pfsync_defer; 3391 PF_UNLOCK(); 3392 3393 return (0); 3394} 3395 3396static int 3397vnet_pfsync_uninit(const void *unused) 3398{ 3399 3400 swi_remove(pfsync_swi.pfsync_swi_cookie); 3401 3402 PF_LOCK(); 3403 pfsync_state_import_ptr = NULL; 3404 pfsync_up_ptr = NULL; 3405 pfsync_insert_state_ptr = NULL; 3406 pfsync_update_state_ptr = NULL; 3407 pfsync_delete_state_ptr = NULL; 3408 pfsync_clear_states_ptr = NULL; 3409 pfsync_state_in_use_ptr = NULL; 3410 pfsync_defer_ptr = NULL; 3411 PF_UNLOCK(); 3412 3413 if_clone_detach(&pfsync_cloner); 3414 3415 return (0); 3416} 3417 3418/* Define startup order. */ 3419#define PFSYNC_SYSINIT_ORDER SI_SUB_PROTO_IF 3420#define PFSYNC_MODEVENT_ORDER (SI_ORDER_FIRST) /* On boot slot in here. */ 3421#define PFSYNC_VNET_ORDER (PFSYNC_MODEVENT_ORDER + 2) /* Later still. */ 3422 3423/* 3424 * Starting up. 3425 * VNET_SYSINIT is called for each existing vnet and each new vnet. 3426 */ 3427VNET_SYSINIT(vnet_pfsync_init, PFSYNC_SYSINIT_ORDER, PFSYNC_VNET_ORDER, 3428 vnet_pfsync_init, NULL); 3429 3430/* 3431 * Closing up shop. These are done in REVERSE ORDER, 3432 * Not called on reboot. 3433 * VNET_SYSUNINIT is called for each exiting vnet as it exits. 3434 */ 3435VNET_SYSUNINIT(vnet_pfsync_uninit, PFSYNC_SYSINIT_ORDER, PFSYNC_VNET_ORDER, 3436 vnet_pfsync_uninit, NULL); 3437static int 3438pfsync_modevent(module_t mod, int type, void *data) 3439{ 3440 int error = 0; 3441 3442 switch (type) { 3443 case MOD_LOAD: 3444#ifndef __FreeBSD__ 3445 pfsyncattach(0); 3446#endif 3447 break; 3448 case MOD_UNLOAD: 3449#ifndef __FreeBSD__ 3450 if_clone_detach(&pfsync_cloner); 3451#endif 3452 break; 3453 default: 3454 error = EINVAL; 3455 break; 3456 } 3457 3458 return error; 3459} 3460 3461static moduledata_t pfsync_mod = { 3462 "pfsync", 3463 pfsync_modevent, 3464 0 3465}; 3466 3467#define PFSYNC_MODVER 1 3468 3469DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 3470MODULE_VERSION(pfsync, PFSYNC_MODVER); 3471MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER); 3472#endif /* __FreeBSD__ */ 3473