if_pfsync.c revision 229770
1/* $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $ */ 2 3/* 4 * Copyright (c) 2002 Michael Shalayeff 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 26 * THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29/* 30 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org> 31 * 32 * Permission to use, copy, modify, and distribute this software for any 33 * purpose with or without fee is hereby granted, provided that the above 34 * copyright notice and this permission notice appear in all copies. 35 * 36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 43 */ 44 45/* 46 * Revisions picked from OpenBSD after revision 1.110 import: 47 * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates 48 * 1.120, 1.175 - use monotonic time_uptime 49 * 1.122 - reduce number of updates for non-TCP sessions 50 */ 51 52#ifdef __FreeBSD__ 53#include "opt_inet.h" 54#include "opt_inet6.h" 55#include "opt_pf.h" 56 57#include <sys/cdefs.h> 58__FBSDID("$FreeBSD: stable/9/sys/contrib/pf/net/if_pfsync.c 229770 2012-01-07 11:01:35Z glebius $"); 59 60#define NBPFILTER 1 61 62#ifdef DEV_PFSYNC 63#define NPFSYNC DEV_PFSYNC 64#else 65#define NPFSYNC 0 66#endif 67 68#ifdef DEV_CARP 69#define NCARP DEV_CARP 70#else 71#define NCARP 0 72#endif 73#endif /* __FreeBSD__ */ 74 75#include <sys/param.h> 76#include <sys/kernel.h> 77#ifdef __FreeBSD__ 78#include <sys/bus.h> 79#include <sys/interrupt.h> 80#include <sys/priv.h> 81#endif 82#include <sys/proc.h> 83#include <sys/systm.h> 84#include <sys/time.h> 85#include <sys/mbuf.h> 86#include <sys/socket.h> 87#ifdef __FreeBSD__ 88#include <sys/endian.h> 89#include <sys/malloc.h> 90#include <sys/module.h> 91#include <sys/sockio.h> 92#include <sys/taskqueue.h> 93#include <sys/lock.h> 94#include <sys/mutex.h> 95#else 96#include <sys/ioctl.h> 97#include <sys/timeout.h> 98#endif 99#include <sys/sysctl.h> 100#ifndef __FreeBSD__ 101#include <sys/pool.h> 102#endif 103 104#include <net/if.h> 105#ifdef __FreeBSD__ 106#include <net/if_clone.h> 107#endif 108#include <net/if_types.h> 109#include <net/route.h> 110#include <net/bpf.h> 111#include <net/netisr.h> 112#ifdef __FreeBSD__ 113#include <net/vnet.h> 114#endif 115 116#include <netinet/in.h> 117#include <netinet/if_ether.h> 118#include <netinet/tcp.h> 119#include <netinet/tcp_seq.h> 120 121#ifdef INET 122#include <netinet/in_systm.h> 123#include <netinet/in_var.h> 124#include <netinet/ip.h> 125#include <netinet/ip_var.h> 126#endif 127 128#ifdef INET6 129#include <netinet6/nd6.h> 130#endif /* INET6 */ 131 132#ifndef __FreeBSD__ 133#include "carp.h" 134#endif 135#if NCARP > 0 136#include <netinet/ip_carp.h> 137#endif 138 139#include <net/pfvar.h> 140#include <net/if_pfsync.h> 141 142#ifndef __FreeBSD__ 143#include "bpfilter.h" 144#include "pfsync.h" 145#endif 146 147#define PFSYNC_MINPKT ( \ 148 sizeof(struct ip) + \ 149 sizeof(struct pfsync_header) + \ 150 sizeof(struct pfsync_subheader) + \ 151 sizeof(struct pfsync_eof)) 152 153struct pfsync_pkt { 154 struct ip *ip; 155 struct in_addr src; 156 u_int8_t flags; 157}; 158 159int pfsync_input_hmac(struct mbuf *, int); 160 161int pfsync_upd_tcp(struct pf_state *, struct pfsync_state_peer *, 162 struct pfsync_state_peer *); 163 164int pfsync_in_clr(struct pfsync_pkt *, struct mbuf *, int, int); 165int pfsync_in_ins(struct pfsync_pkt *, struct mbuf *, int, int); 166int pfsync_in_iack(struct pfsync_pkt *, struct mbuf *, int, int); 167int pfsync_in_upd(struct pfsync_pkt *, struct mbuf *, int, int); 168int pfsync_in_upd_c(struct pfsync_pkt *, struct mbuf *, int, int); 169int pfsync_in_ureq(struct pfsync_pkt *, struct mbuf *, int, int); 170int pfsync_in_del(struct pfsync_pkt *, struct mbuf *, int, int); 171int pfsync_in_del_c(struct pfsync_pkt *, struct mbuf *, int, int); 172int pfsync_in_bus(struct pfsync_pkt *, struct mbuf *, int, int); 173int pfsync_in_tdb(struct pfsync_pkt *, struct mbuf *, int, int); 174int pfsync_in_eof(struct pfsync_pkt *, struct mbuf *, int, int); 175 176int pfsync_in_error(struct pfsync_pkt *, struct mbuf *, int, int); 177 178int (*pfsync_acts[])(struct pfsync_pkt *, struct mbuf *, int, int) = { 179 pfsync_in_clr, /* PFSYNC_ACT_CLR */ 180 pfsync_in_ins, /* PFSYNC_ACT_INS */ 181 pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */ 182 pfsync_in_upd, /* PFSYNC_ACT_UPD */ 183 pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */ 184 pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */ 185 pfsync_in_del, /* PFSYNC_ACT_DEL */ 186 pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */ 187 pfsync_in_error, /* PFSYNC_ACT_INS_F */ 188 pfsync_in_error, /* PFSYNC_ACT_DEL_F */ 189 pfsync_in_bus, /* PFSYNC_ACT_BUS */ 190 pfsync_in_tdb, /* PFSYNC_ACT_TDB */ 191 pfsync_in_eof /* PFSYNC_ACT_EOF */ 192}; 193 194struct pfsync_q { 195 int (*write)(struct pf_state *, struct mbuf *, int); 196 size_t len; 197 u_int8_t action; 198}; 199 200/* we have one of these for every PFSYNC_S_ */ 201int pfsync_out_state(struct pf_state *, struct mbuf *, int); 202int pfsync_out_iack(struct pf_state *, struct mbuf *, int); 203int pfsync_out_upd_c(struct pf_state *, struct mbuf *, int); 204int pfsync_out_del(struct pf_state *, struct mbuf *, int); 205 206struct pfsync_q pfsync_qs[] = { 207 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS }, 208 { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK }, 209 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_UPD }, 210 { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C }, 211 { pfsync_out_del, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C } 212}; 213 214void pfsync_q_ins(struct pf_state *, int); 215void pfsync_q_del(struct pf_state *); 216 217struct pfsync_upd_req_item { 218 TAILQ_ENTRY(pfsync_upd_req_item) ur_entry; 219 struct pfsync_upd_req ur_msg; 220}; 221TAILQ_HEAD(pfsync_upd_reqs, pfsync_upd_req_item); 222 223struct pfsync_deferral { 224 TAILQ_ENTRY(pfsync_deferral) pd_entry; 225 struct pf_state *pd_st; 226 struct mbuf *pd_m; 227#ifdef __FreeBSD__ 228 struct callout pd_tmo; 229#else 230 struct timeout pd_tmo; 231#endif 232}; 233TAILQ_HEAD(pfsync_deferrals, pfsync_deferral); 234 235#define PFSYNC_PLSIZE MAX(sizeof(struct pfsync_upd_req_item), \ 236 sizeof(struct pfsync_deferral)) 237 238#ifdef notyet 239int pfsync_out_tdb(struct tdb *, struct mbuf *, int); 240#endif 241 242struct pfsync_softc { 243#ifdef __FreeBSD__ 244 struct ifnet *sc_ifp; 245#else 246 struct ifnet sc_if; 247#endif 248 struct ifnet *sc_sync_if; 249 250#ifdef __FreeBSD__ 251 uma_zone_t sc_pool; 252#else 253 struct pool sc_pool; 254#endif 255 256 struct ip_moptions sc_imo; 257 258 struct in_addr sc_sync_peer; 259 u_int8_t sc_maxupdates; 260#ifdef __FreeBSD__ 261 int pfsync_sync_ok; 262#endif 263 264 struct ip sc_template; 265 266 struct pf_state_queue sc_qs[PFSYNC_S_COUNT]; 267 size_t sc_len; 268 269 struct pfsync_upd_reqs sc_upd_req_list; 270 271 struct pfsync_deferrals sc_deferrals; 272 u_int sc_deferred; 273 274 void *sc_plus; 275 size_t sc_pluslen; 276 277 u_int32_t sc_ureq_sent; 278 int sc_bulk_tries; 279#ifdef __FreeBSD__ 280 struct callout sc_bulkfail_tmo; 281#else 282 struct timeout sc_bulkfail_tmo; 283#endif 284 285 u_int32_t sc_ureq_received; 286 struct pf_state *sc_bulk_next; 287 struct pf_state *sc_bulk_last; 288#ifdef __FreeBSD__ 289 struct callout sc_bulk_tmo; 290#else 291 struct timeout sc_bulk_tmo; 292#endif 293 294 TAILQ_HEAD(, tdb) sc_tdb_q; 295 296#ifdef __FreeBSD__ 297 struct callout sc_tmo; 298#else 299 struct timeout sc_tmo; 300#endif 301#ifdef __FreeBSD__ 302 eventhandler_tag sc_detachtag; 303#endif 304 305}; 306 307#ifdef __FreeBSD__ 308static VNET_DEFINE(struct pfsync_softc *, pfsyncif) = NULL; 309#define V_pfsyncif VNET(pfsyncif) 310 311static VNET_DEFINE(struct pfsyncstats, pfsyncstats); 312#define V_pfsyncstats VNET(pfsyncstats) 313 314SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC"); 315SYSCTL_VNET_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_RW, 316 &VNET_NAME(pfsyncstats), pfsyncstats, 317 "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)"); 318#else 319struct pfsync_softc *pfsyncif = NULL; 320struct pfsyncstats pfsyncstats; 321#define V_pfsyncstats pfsyncstats 322#endif 323 324#ifdef __FreeBSD__ 325static void pfsyncintr(void *); 326struct pfsync_swi { 327 void * pfsync_swi_cookie; 328}; 329static struct pfsync_swi pfsync_swi; 330#define schednetisr(p) swi_sched(pfsync_swi.pfsync_swi_cookie, 0) 331#define NETISR_PFSYNC 332#endif 333 334void pfsyncattach(int); 335#ifdef __FreeBSD__ 336int pfsync_clone_create(struct if_clone *, int, caddr_t); 337void pfsync_clone_destroy(struct ifnet *); 338#else 339int pfsync_clone_create(struct if_clone *, int); 340int pfsync_clone_destroy(struct ifnet *); 341#endif 342int pfsync_alloc_scrub_memory(struct pfsync_state_peer *, 343 struct pf_state_peer *); 344void pfsync_update_net_tdb(struct pfsync_tdb *); 345int pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *, 346#ifdef __FreeBSD__ 347 struct route *); 348#else 349 struct rtentry *); 350#endif 351int pfsyncioctl(struct ifnet *, u_long, caddr_t); 352void pfsyncstart(struct ifnet *); 353 354struct mbuf *pfsync_if_dequeue(struct ifnet *); 355struct mbuf *pfsync_get_mbuf(struct pfsync_softc *); 356 357void pfsync_deferred(struct pf_state *, int); 358void pfsync_undefer(struct pfsync_deferral *, int); 359void pfsync_defer_tmo(void *); 360 361void pfsync_request_update(u_int32_t, u_int64_t); 362void pfsync_update_state_req(struct pf_state *); 363 364void pfsync_drop(struct pfsync_softc *); 365void pfsync_sendout(void); 366void pfsync_send_plus(void *, size_t); 367int pfsync_tdb_sendout(struct pfsync_softc *); 368int pfsync_sendout_mbuf(struct pfsync_softc *, struct mbuf *); 369void pfsync_timeout(void *); 370void pfsync_tdb_timeout(void *); 371void pfsync_send_bus(struct pfsync_softc *, u_int8_t); 372 373void pfsync_bulk_start(void); 374void pfsync_bulk_status(u_int8_t); 375void pfsync_bulk_update(void *); 376void pfsync_bulk_fail(void *); 377 378#ifdef __FreeBSD__ 379void pfsync_ifdetach(void *, struct ifnet *); 380 381/* XXX: ugly */ 382#define betoh64 (unsigned long long)be64toh 383#define timeout_del callout_stop 384#endif 385 386#define PFSYNC_MAX_BULKTRIES 12 387#ifndef __FreeBSD__ 388int pfsync_sync_ok; 389#endif 390 391#ifdef __FreeBSD__ 392IFC_SIMPLE_DECLARE(pfsync, 1); 393#else 394struct if_clone pfsync_cloner = 395 IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy); 396#endif 397 398void 399pfsyncattach(int npfsync) 400{ 401 if_clone_attach(&pfsync_cloner); 402} 403int 404#ifdef __FreeBSD__ 405pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param) 406#else 407pfsync_clone_create(struct if_clone *ifc, int unit) 408#endif 409{ 410 struct pfsync_softc *sc; 411 struct ifnet *ifp; 412 int q; 413 414 if (unit != 0) 415 return (EINVAL); 416 417#ifndef __FreeBSD__ 418 pfsync_sync_ok = 1; 419#endif 420 421 sc = malloc(sizeof(struct pfsync_softc), M_DEVBUF, M_NOWAIT | M_ZERO); 422 if (sc == NULL) 423 return (ENOMEM); 424 425 for (q = 0; q < PFSYNC_S_COUNT; q++) 426 TAILQ_INIT(&sc->sc_qs[q]); 427 428#ifdef __FreeBSD__ 429 sc->pfsync_sync_ok = 1; 430 sc->sc_pool = uma_zcreate("pfsync", PFSYNC_PLSIZE, 431 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 432 if (sc->sc_pool == NULL) { 433 free(sc, M_DEVBUF); 434 return (ENOMEM); 435 } 436#else 437 pool_init(&sc->sc_pool, PFSYNC_PLSIZE, 0, 0, 0, "pfsync", NULL); 438#endif 439 TAILQ_INIT(&sc->sc_upd_req_list); 440 TAILQ_INIT(&sc->sc_deferrals); 441 sc->sc_deferred = 0; 442 443 TAILQ_INIT(&sc->sc_tdb_q); 444 445 sc->sc_len = PFSYNC_MINPKT; 446 sc->sc_maxupdates = 128; 447 448#ifdef __FreeBSD__ 449 sc->sc_imo.imo_membership = (struct in_multi **)malloc( 450 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_DEVBUF, 451 M_NOWAIT | M_ZERO); 452 sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS; 453 sc->sc_imo.imo_multicast_vif = -1; 454#else 455 sc->sc_imo.imo_membership = (struct in_multi **)malloc( 456 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_IPMOPTS, 457 M_WAITOK | M_ZERO); 458 sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS; 459#endif 460 461#ifdef __FreeBSD__ 462 ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC); 463 if (ifp == NULL) { 464 free(sc->sc_imo.imo_membership, M_DEVBUF); 465 uma_zdestroy(sc->sc_pool); 466 free(sc, M_DEVBUF); 467 return (ENOSPC); 468 } 469 if_initname(ifp, ifc->ifc_name, unit); 470 471 sc->sc_detachtag = EVENTHANDLER_REGISTER(ifnet_departure_event, 472#ifdef __FreeBSD__ 473 pfsync_ifdetach, V_pfsyncif, EVENTHANDLER_PRI_ANY); 474#else 475 pfsync_ifdetach, pfsyncif, EVENTHANDLER_PRI_ANY); 476#endif 477 if (sc->sc_detachtag == NULL) { 478 if_free(ifp); 479 free(sc->sc_imo.imo_membership, M_DEVBUF); 480 uma_zdestroy(sc->sc_pool); 481 free(sc, M_DEVBUF); 482 return (ENOSPC); 483 } 484#else 485 ifp = &sc->sc_if; 486 snprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit); 487#endif 488 ifp->if_softc = sc; 489 ifp->if_ioctl = pfsyncioctl; 490 ifp->if_output = pfsyncoutput; 491 ifp->if_start = pfsyncstart; 492 ifp->if_type = IFT_PFSYNC; 493 ifp->if_snd.ifq_maxlen = ifqmaxlen; 494 ifp->if_hdrlen = sizeof(struct pfsync_header); 495 ifp->if_mtu = 1500; /* XXX */ 496#ifdef __FreeBSD__ 497 callout_init(&sc->sc_tmo, CALLOUT_MPSAFE); 498 callout_init_mtx(&sc->sc_bulk_tmo, &pf_task_mtx, 0); 499 callout_init(&sc->sc_bulkfail_tmo, CALLOUT_MPSAFE); 500#else 501 ifp->if_hardmtu = MCLBYTES; /* XXX */ 502 timeout_set(&sc->sc_tmo, pfsync_timeout, sc); 503 timeout_set(&sc->sc_bulk_tmo, pfsync_bulk_update, sc); 504 timeout_set(&sc->sc_bulkfail_tmo, pfsync_bulk_fail, sc); 505#endif 506 507 if_attach(ifp); 508#ifndef __FreeBSD__ 509 if_alloc_sadl(ifp); 510#endif 511 512#if NCARP > 0 513 if_addgroup(ifp, "carp"); 514#endif 515 516#if NBPFILTER > 0 517#ifdef __FreeBSD__ 518 bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN); 519#else 520 bpfattach(&sc->sc_if.if_bpf, ifp, DLT_PFSYNC, PFSYNC_HDRLEN); 521#endif 522#endif 523 524#ifdef __FreeBSD__ 525 V_pfsyncif = sc; 526#else 527 pfsyncif = sc; 528#endif 529 530 return (0); 531} 532 533#ifdef __FreeBSD__ 534void 535#else 536int 537#endif 538pfsync_clone_destroy(struct ifnet *ifp) 539{ 540 struct pfsync_softc *sc = ifp->if_softc; 541 542#ifdef __FreeBSD__ 543 EVENTHANDLER_DEREGISTER(ifnet_departure_event, sc->sc_detachtag); 544 PF_LOCK(); 545#endif 546 timeout_del(&sc->sc_bulkfail_tmo); 547 timeout_del(&sc->sc_bulk_tmo); 548 timeout_del(&sc->sc_tmo); 549#ifdef __FreeBSD__ 550 PF_UNLOCK(); 551#endif 552#if NCARP > 0 553#ifdef notyet 554#ifdef __FreeBSD__ 555 if (!sc->pfsync_sync_ok) 556#else 557 if (!pfsync_sync_ok) 558#endif 559 carp_group_demote_adj(&sc->sc_if, -1); 560#endif 561#endif 562#if NBPFILTER > 0 563 bpfdetach(ifp); 564#endif 565 if_detach(ifp); 566 567 pfsync_drop(sc); 568 569 while (sc->sc_deferred > 0) 570 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0); 571 572#ifdef __FreeBSD__ 573 UMA_DESTROY(sc->sc_pool); 574#else 575 pool_destroy(&sc->sc_pool); 576#endif 577#ifdef __FreeBSD__ 578 if_free(ifp); 579 free(sc->sc_imo.imo_membership, M_DEVBUF); 580#else 581 free(sc->sc_imo.imo_membership, M_IPMOPTS); 582#endif 583 free(sc, M_DEVBUF); 584 585#ifdef __FreeBSD__ 586 V_pfsyncif = NULL; 587#else 588 pfsyncif = NULL; 589#endif 590 591#ifndef __FreeBSD__ 592 return (0); 593#endif 594} 595 596struct mbuf * 597pfsync_if_dequeue(struct ifnet *ifp) 598{ 599 struct mbuf *m; 600#ifndef __FreeBSD__ 601 int s; 602#endif 603 604#ifdef __FreeBSD__ 605 IF_LOCK(&ifp->if_snd); 606 _IF_DROP(&ifp->if_snd); 607 _IF_DEQUEUE(&ifp->if_snd, m); 608 IF_UNLOCK(&ifp->if_snd); 609#else 610 s = splnet(); 611 IF_DEQUEUE(&ifp->if_snd, m); 612 splx(s); 613#endif 614 615 return (m); 616} 617 618/* 619 * Start output on the pfsync interface. 620 */ 621void 622pfsyncstart(struct ifnet *ifp) 623{ 624 struct mbuf *m; 625 626 while ((m = pfsync_if_dequeue(ifp)) != NULL) { 627#ifndef __FreeBSD__ 628 IF_DROP(&ifp->if_snd); 629#endif 630 m_freem(m); 631 } 632} 633 634int 635pfsync_alloc_scrub_memory(struct pfsync_state_peer *s, 636 struct pf_state_peer *d) 637{ 638 if (s->scrub.scrub_flag && d->scrub == NULL) { 639#ifdef __FreeBSD__ 640 d->scrub = pool_get(&V_pf_state_scrub_pl, PR_NOWAIT | PR_ZERO); 641#else 642 d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO); 643#endif 644 if (d->scrub == NULL) 645 return (ENOMEM); 646 } 647 648 return (0); 649} 650 651#ifndef __FreeBSD__ 652void 653pfsync_state_export(struct pfsync_state *sp, struct pf_state *st) 654{ 655 bzero(sp, sizeof(struct pfsync_state)); 656 657 /* copy from state key */ 658 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 659 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 660 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 661 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 662 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 663 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 664 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 665 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 666 sp->proto = st->key[PF_SK_WIRE]->proto; 667 sp->af = st->key[PF_SK_WIRE]->af; 668 669 /* copy from state */ 670 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 671 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 672 sp->creation = htonl(time_uptime - st->creation); 673 sp->expire = pf_state_expires(st); 674 if (sp->expire <= time_second) 675 sp->expire = htonl(0); 676 else 677 sp->expire = htonl(sp->expire - time_second); 678 679 sp->direction = st->direction; 680 sp->log = st->log; 681 sp->timeout = st->timeout; 682 sp->state_flags = st->state_flags; 683 if (st->src_node) 684 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 685 if (st->nat_src_node) 686 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 687 688 bcopy(&st->id, &sp->id, sizeof(sp->id)); 689 sp->creatorid = st->creatorid; 690 pf_state_peer_hton(&st->src, &sp->src); 691 pf_state_peer_hton(&st->dst, &sp->dst); 692 693 if (st->rule.ptr == NULL) 694 sp->rule = htonl(-1); 695 else 696 sp->rule = htonl(st->rule.ptr->nr); 697 if (st->anchor.ptr == NULL) 698 sp->anchor = htonl(-1); 699 else 700 sp->anchor = htonl(st->anchor.ptr->nr); 701 if (st->nat_rule.ptr == NULL) 702 sp->nat_rule = htonl(-1); 703 else 704 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 705 706 pf_state_counter_hton(st->packets[0], sp->packets[0]); 707 pf_state_counter_hton(st->packets[1], sp->packets[1]); 708 pf_state_counter_hton(st->bytes[0], sp->bytes[0]); 709 pf_state_counter_hton(st->bytes[1], sp->bytes[1]); 710 711} 712#endif 713 714int 715pfsync_state_import(struct pfsync_state *sp, u_int8_t flags) 716{ 717 struct pf_state *st = NULL; 718 struct pf_state_key *skw = NULL, *sks = NULL; 719 struct pf_rule *r = NULL; 720 struct pfi_kif *kif; 721 int pool_flags; 722 int error; 723 724 PF_LOCK_ASSERT(); 725 726#ifdef __FreeBSD__ 727 if (sp->creatorid == 0 && V_pf_status.debug >= PF_DEBUG_MISC) { 728#else 729 if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) { 730#endif 731 printf("pfsync_state_import: invalid creator id:" 732 " %08x\n", ntohl(sp->creatorid)); 733 return (EINVAL); 734 } 735 736 if ((kif = pfi_kif_get(sp->ifname)) == NULL) { 737#ifdef __FreeBSD__ 738 if (V_pf_status.debug >= PF_DEBUG_MISC) 739#else 740 if (pf_status.debug >= PF_DEBUG_MISC) 741#endif 742 printf("pfsync_state_import: " 743 "unknown interface: %s\n", sp->ifname); 744 if (flags & PFSYNC_SI_IOCTL) 745 return (EINVAL); 746 return (0); /* skip this state */ 747 } 748 749 /* 750 * If the ruleset checksums match or the state is coming from the ioctl, 751 * it's safe to associate the state with the rule of that number. 752 */ 753 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) && 754 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) < 755 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount) 756 r = pf_main_ruleset.rules[ 757 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)]; 758 else 759#ifdef __FreeBSD__ 760 r = &V_pf_default_rule; 761#else 762 r = &pf_default_rule; 763#endif 764 765 if ((r->max_states && r->states_cur >= r->max_states)) 766 goto cleanup; 767 768#ifdef __FreeBSD__ 769 if (flags & PFSYNC_SI_IOCTL) 770 pool_flags = PR_WAITOK | PR_ZERO; 771 else 772 pool_flags = PR_NOWAIT | PR_ZERO; 773 774 if ((st = pool_get(&V_pf_state_pl, pool_flags)) == NULL) 775 goto cleanup; 776#else 777 if (flags & PFSYNC_SI_IOCTL) 778 pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO; 779 else 780 pool_flags = PR_LIMITFAIL | PR_ZERO; 781 782 if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL) 783 goto cleanup; 784#endif 785 786 if ((skw = pf_alloc_state_key(pool_flags)) == NULL) 787 goto cleanup; 788 789 if (PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0], 790 &sp->key[PF_SK_STACK].addr[0], sp->af) || 791 PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1], 792 &sp->key[PF_SK_STACK].addr[1], sp->af) || 793 sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] || 794 sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1]) { 795 if ((sks = pf_alloc_state_key(pool_flags)) == NULL) 796 goto cleanup; 797 } else 798 sks = skw; 799 800 /* allocate memory for scrub info */ 801 if (pfsync_alloc_scrub_memory(&sp->src, &st->src) || 802 pfsync_alloc_scrub_memory(&sp->dst, &st->dst)) 803 goto cleanup; 804 805 /* copy to state key(s) */ 806 skw->addr[0] = sp->key[PF_SK_WIRE].addr[0]; 807 skw->addr[1] = sp->key[PF_SK_WIRE].addr[1]; 808 skw->port[0] = sp->key[PF_SK_WIRE].port[0]; 809 skw->port[1] = sp->key[PF_SK_WIRE].port[1]; 810 skw->proto = sp->proto; 811 skw->af = sp->af; 812 if (sks != skw) { 813 sks->addr[0] = sp->key[PF_SK_STACK].addr[0]; 814 sks->addr[1] = sp->key[PF_SK_STACK].addr[1]; 815 sks->port[0] = sp->key[PF_SK_STACK].port[0]; 816 sks->port[1] = sp->key[PF_SK_STACK].port[1]; 817 sks->proto = sp->proto; 818 sks->af = sp->af; 819 } 820 821 /* copy to state */ 822 bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr)); 823 st->creation = time_uptime - ntohl(sp->creation); 824 st->expire = time_second; 825 if (sp->expire) { 826 /* XXX No adaptive scaling. */ 827 st->expire -= r->timeout[sp->timeout] - ntohl(sp->expire); 828 } 829 830 st->expire = ntohl(sp->expire) + time_second; 831 st->direction = sp->direction; 832 st->log = sp->log; 833 st->timeout = sp->timeout; 834 st->state_flags = sp->state_flags; 835 836 bcopy(sp->id, &st->id, sizeof(st->id)); 837 st->creatorid = sp->creatorid; 838 pf_state_peer_ntoh(&sp->src, &st->src); 839 pf_state_peer_ntoh(&sp->dst, &st->dst); 840 841 st->rule.ptr = r; 842 st->nat_rule.ptr = NULL; 843 st->anchor.ptr = NULL; 844 st->rt_kif = NULL; 845 846 st->pfsync_time = time_uptime; 847 st->sync_state = PFSYNC_S_NONE; 848 849 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */ 850 r->states_cur++; 851 r->states_tot++; 852 853 if (!ISSET(flags, PFSYNC_SI_IOCTL)) 854 SET(st->state_flags, PFSTATE_NOSYNC); 855 856 if ((error = pf_state_insert(kif, skw, sks, st)) != 0) { 857 /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */ 858 r->states_cur--; 859 goto cleanup_state; 860 } 861 862 if (!ISSET(flags, PFSYNC_SI_IOCTL)) { 863 CLR(st->state_flags, PFSTATE_NOSYNC); 864 if (ISSET(st->state_flags, PFSTATE_ACK)) { 865 pfsync_q_ins(st, PFSYNC_S_IACK); 866#ifdef __FreeBSD__ 867 pfsync_sendout(); 868#else 869 schednetisr(NETISR_PFSYNC); 870#endif 871 } 872 } 873 CLR(st->state_flags, PFSTATE_ACK); 874 875 return (0); 876 877cleanup: 878 error = ENOMEM; 879 if (skw == sks) 880 sks = NULL; 881#ifdef __FreeBSD__ 882 if (skw != NULL) 883 pool_put(&V_pf_state_key_pl, skw); 884 if (sks != NULL) 885 pool_put(&V_pf_state_key_pl, sks); 886#else 887 if (skw != NULL) 888 pool_put(&pf_state_key_pl, skw); 889 if (sks != NULL) 890 pool_put(&pf_state_key_pl, sks); 891#endif 892 893cleanup_state: /* pf_state_insert frees the state keys */ 894 if (st) { 895#ifdef __FreeBSD__ 896 if (st->dst.scrub) 897 pool_put(&V_pf_state_scrub_pl, st->dst.scrub); 898 if (st->src.scrub) 899 pool_put(&V_pf_state_scrub_pl, st->src.scrub); 900 pool_put(&V_pf_state_pl, st); 901#else 902 if (st->dst.scrub) 903 pool_put(&pf_state_scrub_pl, st->dst.scrub); 904 if (st->src.scrub) 905 pool_put(&pf_state_scrub_pl, st->src.scrub); 906 pool_put(&pf_state_pl, st); 907#endif 908 } 909 return (error); 910} 911 912void 913#ifdef __FreeBSD__ 914pfsync_input(struct mbuf *m, __unused int off) 915#else 916pfsync_input(struct mbuf *m, ...) 917#endif 918{ 919#ifdef __FreeBSD__ 920 struct pfsync_softc *sc = V_pfsyncif; 921#else 922 struct pfsync_softc *sc = pfsyncif; 923#endif 924 struct pfsync_pkt pkt; 925 struct ip *ip = mtod(m, struct ip *); 926 struct pfsync_header *ph; 927 struct pfsync_subheader subh; 928 929 int offset; 930 int rv; 931 932 V_pfsyncstats.pfsyncs_ipackets++; 933 934 /* verify that we have a sync interface configured */ 935#ifdef __FreeBSD__ 936 if (!sc || !sc->sc_sync_if || !V_pf_status.running) 937#else 938 if (!sc || !sc->sc_sync_if || !pf_status.running) 939#endif 940 goto done; 941 942 /* verify that the packet came in on the right interface */ 943 if (sc->sc_sync_if != m->m_pkthdr.rcvif) { 944 V_pfsyncstats.pfsyncs_badif++; 945 goto done; 946 } 947 948#ifdef __FreeBSD__ 949 sc->sc_ifp->if_ipackets++; 950 sc->sc_ifp->if_ibytes += m->m_pkthdr.len; 951#else 952 sc->sc_if.if_ipackets++; 953 sc->sc_if.if_ibytes += m->m_pkthdr.len; 954#endif 955 /* verify that the IP TTL is 255. */ 956 if (ip->ip_ttl != PFSYNC_DFLTTL) { 957 V_pfsyncstats.pfsyncs_badttl++; 958 goto done; 959 } 960 961 offset = ip->ip_hl << 2; 962 if (m->m_pkthdr.len < offset + sizeof(*ph)) { 963 V_pfsyncstats.pfsyncs_hdrops++; 964 goto done; 965 } 966 967 if (offset + sizeof(*ph) > m->m_len) { 968 if (m_pullup(m, offset + sizeof(*ph)) == NULL) { 969 V_pfsyncstats.pfsyncs_hdrops++; 970 return; 971 } 972 ip = mtod(m, struct ip *); 973 } 974 ph = (struct pfsync_header *)((char *)ip + offset); 975 976 /* verify the version */ 977 if (ph->version != PFSYNC_VERSION) { 978 V_pfsyncstats.pfsyncs_badver++; 979 goto done; 980 } 981 982#if 0 983 if (pfsync_input_hmac(m, offset) != 0) { 984 /* XXX stats */ 985 goto done; 986 } 987#endif 988 989 /* Cheaper to grab this now than having to mess with mbufs later */ 990 pkt.ip = ip; 991 pkt.src = ip->ip_src; 992 pkt.flags = 0; 993 994#ifdef __FreeBSD__ 995 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) 996#else 997 if (!bcmp(&ph->pfcksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) 998#endif 999 pkt.flags |= PFSYNC_SI_CKSUM; 1000 1001 offset += sizeof(*ph); 1002 for (;;) { 1003 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh); 1004 offset += sizeof(subh); 1005 1006 if (subh.action >= PFSYNC_ACT_MAX) { 1007 V_pfsyncstats.pfsyncs_badact++; 1008 goto done; 1009 } 1010 1011 rv = (*pfsync_acts[subh.action])(&pkt, m, offset, 1012 ntohs(subh.count)); 1013 if (rv == -1) 1014 return; 1015 1016 offset += rv; 1017 } 1018 1019done: 1020 m_freem(m); 1021} 1022 1023int 1024pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1025{ 1026 struct pfsync_clr *clr; 1027 struct mbuf *mp; 1028 int len = sizeof(*clr) * count; 1029 int i, offp; 1030 1031 struct pf_state *st, *nexts; 1032 struct pf_state_key *sk, *nextsk; 1033 struct pf_state_item *si; 1034 u_int32_t creatorid; 1035 int s; 1036 1037 mp = m_pulldown(m, offset, len, &offp); 1038 if (mp == NULL) { 1039 V_pfsyncstats.pfsyncs_badlen++; 1040 return (-1); 1041 } 1042 clr = (struct pfsync_clr *)(mp->m_data + offp); 1043 1044 s = splsoftnet(); 1045#ifdef __FreeBSD__ 1046 PF_LOCK(); 1047#endif 1048 for (i = 0; i < count; i++) { 1049 creatorid = clr[i].creatorid; 1050 1051 if (clr[i].ifname[0] == '\0') { 1052#ifdef __FreeBSD__ 1053 for (st = RB_MIN(pf_state_tree_id, &V_tree_id); 1054 st; st = nexts) { 1055 nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, st); 1056#else 1057 for (st = RB_MIN(pf_state_tree_id, &tree_id); 1058 st; st = nexts) { 1059 nexts = RB_NEXT(pf_state_tree_id, &tree_id, st); 1060#endif 1061 if (st->creatorid == creatorid) { 1062 SET(st->state_flags, PFSTATE_NOSYNC); 1063 pf_unlink_state(st); 1064 } 1065 } 1066 } else { 1067 if (pfi_kif_get(clr[i].ifname) == NULL) 1068 continue; 1069 1070 /* XXX correct? */ 1071#ifdef __FreeBSD__ 1072 for (sk = RB_MIN(pf_state_tree, &V_pf_statetbl); 1073#else 1074 for (sk = RB_MIN(pf_state_tree, &pf_statetbl); 1075#endif 1076 sk; sk = nextsk) { 1077 nextsk = RB_NEXT(pf_state_tree, 1078#ifdef __FreeBSD__ 1079 &V_pf_statetbl, sk); 1080#else 1081 &pf_statetbl, sk); 1082#endif 1083 TAILQ_FOREACH(si, &sk->states, entry) { 1084 if (si->s->creatorid == creatorid) { 1085 SET(si->s->state_flags, 1086 PFSTATE_NOSYNC); 1087 pf_unlink_state(si->s); 1088 } 1089 } 1090 } 1091 } 1092 } 1093#ifdef __FreeBSD__ 1094 PF_UNLOCK(); 1095#endif 1096 splx(s); 1097 1098 return (len); 1099} 1100 1101int 1102pfsync_in_ins(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1103{ 1104 struct mbuf *mp; 1105 struct pfsync_state *sa, *sp; 1106 int len = sizeof(*sp) * count; 1107 int i, offp; 1108 1109 int s; 1110 1111 mp = m_pulldown(m, offset, len, &offp); 1112 if (mp == NULL) { 1113 V_pfsyncstats.pfsyncs_badlen++; 1114 return (-1); 1115 } 1116 sa = (struct pfsync_state *)(mp->m_data + offp); 1117 1118 s = splsoftnet(); 1119#ifdef __FreeBSD__ 1120 PF_LOCK(); 1121#endif 1122 for (i = 0; i < count; i++) { 1123 sp = &sa[i]; 1124 1125 /* check for invalid values */ 1126 if (sp->timeout >= PFTM_MAX || 1127 sp->src.state > PF_TCPS_PROXY_DST || 1128 sp->dst.state > PF_TCPS_PROXY_DST || 1129 sp->direction > PF_OUT || 1130 (sp->af != AF_INET && sp->af != AF_INET6)) { 1131#ifdef __FreeBSD__ 1132 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1133#else 1134 if (pf_status.debug >= PF_DEBUG_MISC) { 1135#endif 1136 printf("pfsync_input: PFSYNC5_ACT_INS: " 1137 "invalid value\n"); 1138 } 1139 V_pfsyncstats.pfsyncs_badval++; 1140 continue; 1141 } 1142 1143 if (pfsync_state_import(sp, pkt->flags) == ENOMEM) { 1144 /* drop out, but process the rest of the actions */ 1145 break; 1146 } 1147 } 1148#ifdef __FreeBSD__ 1149 PF_UNLOCK(); 1150#endif 1151 splx(s); 1152 1153 return (len); 1154} 1155 1156int 1157pfsync_in_iack(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1158{ 1159 struct pfsync_ins_ack *ia, *iaa; 1160 struct pf_state_cmp id_key; 1161 struct pf_state *st; 1162 1163 struct mbuf *mp; 1164 int len = count * sizeof(*ia); 1165 int offp, i; 1166 int s; 1167 1168 mp = m_pulldown(m, offset, len, &offp); 1169 if (mp == NULL) { 1170 V_pfsyncstats.pfsyncs_badlen++; 1171 return (-1); 1172 } 1173 iaa = (struct pfsync_ins_ack *)(mp->m_data + offp); 1174 1175 s = splsoftnet(); 1176#ifdef __FreeBSD__ 1177 PF_LOCK(); 1178#endif 1179 for (i = 0; i < count; i++) { 1180 ia = &iaa[i]; 1181 1182 bcopy(&ia->id, &id_key.id, sizeof(id_key.id)); 1183 id_key.creatorid = ia->creatorid; 1184 1185 st = pf_find_state_byid(&id_key); 1186 if (st == NULL) 1187 continue; 1188 1189 if (ISSET(st->state_flags, PFSTATE_ACK)) 1190 pfsync_deferred(st, 0); 1191 } 1192#ifdef __FreeBSD__ 1193 PF_UNLOCK(); 1194#endif 1195 splx(s); 1196 /* 1197 * XXX this is not yet implemented, but we know the size of the 1198 * message so we can skip it. 1199 */ 1200 1201 return (count * sizeof(struct pfsync_ins_ack)); 1202} 1203 1204int 1205pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src, 1206 struct pfsync_state_peer *dst) 1207{ 1208 int sfail = 0; 1209 1210 /* 1211 * The state should never go backwards except 1212 * for syn-proxy states. Neither should the 1213 * sequence window slide backwards. 1214 */ 1215 if (st->src.state > src->state && 1216 (st->src.state < PF_TCPS_PROXY_SRC || 1217 src->state >= PF_TCPS_PROXY_SRC)) 1218 sfail = 1; 1219 else if (SEQ_GT(st->src.seqlo, ntohl(src->seqlo))) 1220 sfail = 3; 1221 else if (st->dst.state > dst->state) { 1222 /* There might still be useful 1223 * information about the src state here, 1224 * so import that part of the update, 1225 * then "fail" so we send the updated 1226 * state back to the peer who is missing 1227 * our what we know. */ 1228 pf_state_peer_ntoh(src, &st->src); 1229 /* XXX do anything with timeouts? */ 1230 sfail = 7; 1231 } else if (st->dst.state >= TCPS_SYN_SENT && 1232 SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo))) 1233 sfail = 4; 1234 1235 return (sfail); 1236} 1237 1238int 1239pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1240{ 1241 struct pfsync_state *sa, *sp; 1242 struct pf_state_cmp id_key; 1243 struct pf_state_key *sk; 1244 struct pf_state *st; 1245 int sfail; 1246 1247 struct mbuf *mp; 1248 int len = count * sizeof(*sp); 1249 int offp, i; 1250 int s; 1251 1252 mp = m_pulldown(m, offset, len, &offp); 1253 if (mp == NULL) { 1254 V_pfsyncstats.pfsyncs_badlen++; 1255 return (-1); 1256 } 1257 sa = (struct pfsync_state *)(mp->m_data + offp); 1258 1259 s = splsoftnet(); 1260#ifdef __FreeBSD__ 1261 PF_LOCK(); 1262#endif 1263 for (i = 0; i < count; i++) { 1264 sp = &sa[i]; 1265 1266 /* check for invalid values */ 1267 if (sp->timeout >= PFTM_MAX || 1268 sp->src.state > PF_TCPS_PROXY_DST || 1269 sp->dst.state > PF_TCPS_PROXY_DST) { 1270#ifdef __FreeBSD__ 1271 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1272#else 1273 if (pf_status.debug >= PF_DEBUG_MISC) { 1274#endif 1275 printf("pfsync_input: PFSYNC_ACT_UPD: " 1276 "invalid value\n"); 1277 } 1278 V_pfsyncstats.pfsyncs_badval++; 1279 continue; 1280 } 1281 1282 bcopy(sp->id, &id_key.id, sizeof(id_key.id)); 1283 id_key.creatorid = sp->creatorid; 1284 1285 st = pf_find_state_byid(&id_key); 1286 if (st == NULL) { 1287 /* insert the update */ 1288 if (pfsync_state_import(sp, 0)) 1289 V_pfsyncstats.pfsyncs_badstate++; 1290 continue; 1291 } 1292 1293 if (ISSET(st->state_flags, PFSTATE_ACK)) 1294 pfsync_deferred(st, 1); 1295 1296 sk = st->key[PF_SK_WIRE]; /* XXX right one? */ 1297 sfail = 0; 1298 if (sk->proto == IPPROTO_TCP) 1299 sfail = pfsync_upd_tcp(st, &sp->src, &sp->dst); 1300 else { 1301 /* 1302 * Non-TCP protocol state machine always go 1303 * forwards 1304 */ 1305 if (st->src.state > sp->src.state) 1306 sfail = 5; 1307 else if (st->dst.state > sp->dst.state) 1308 sfail = 6; 1309 } 1310 1311 if (sfail) { 1312#ifdef __FreeBSD__ 1313 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1314#else 1315 if (pf_status.debug >= PF_DEBUG_MISC) { 1316#endif 1317 printf("pfsync: %s stale update (%d)" 1318 " id: %016llx creatorid: %08x\n", 1319 (sfail < 7 ? "ignoring" : "partial"), 1320 sfail, betoh64(st->id), 1321 ntohl(st->creatorid)); 1322 } 1323 V_pfsyncstats.pfsyncs_stale++; 1324 1325 pfsync_update_state(st); 1326#ifdef __FreeBSD__ 1327 pfsync_sendout(); 1328#else 1329 schednetisr(NETISR_PFSYNC); 1330#endif 1331 continue; 1332 } 1333 pfsync_alloc_scrub_memory(&sp->dst, &st->dst); 1334 pf_state_peer_ntoh(&sp->src, &st->src); 1335 pf_state_peer_ntoh(&sp->dst, &st->dst); 1336 st->expire = ntohl(sp->expire) + time_second; 1337 st->timeout = sp->timeout; 1338 st->pfsync_time = time_uptime; 1339 } 1340#ifdef __FreeBSD__ 1341 PF_UNLOCK(); 1342#endif 1343 splx(s); 1344 1345 return (len); 1346} 1347 1348int 1349pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1350{ 1351 struct pfsync_upd_c *ua, *up; 1352 struct pf_state_key *sk; 1353 struct pf_state_cmp id_key; 1354 struct pf_state *st; 1355 1356 int len = count * sizeof(*up); 1357 int sfail; 1358 1359 struct mbuf *mp; 1360 int offp, i; 1361 int s; 1362 1363 mp = m_pulldown(m, offset, len, &offp); 1364 if (mp == NULL) { 1365 V_pfsyncstats.pfsyncs_badlen++; 1366 return (-1); 1367 } 1368 ua = (struct pfsync_upd_c *)(mp->m_data + offp); 1369 1370 s = splsoftnet(); 1371#ifdef __FreeBSD__ 1372 PF_LOCK(); 1373#endif 1374 for (i = 0; i < count; i++) { 1375 up = &ua[i]; 1376 1377 /* check for invalid values */ 1378 if (up->timeout >= PFTM_MAX || 1379 up->src.state > PF_TCPS_PROXY_DST || 1380 up->dst.state > PF_TCPS_PROXY_DST) { 1381#ifdef __FreeBSD__ 1382 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1383#else 1384 if (pf_status.debug >= PF_DEBUG_MISC) { 1385#endif 1386 printf("pfsync_input: " 1387 "PFSYNC_ACT_UPD_C: " 1388 "invalid value\n"); 1389 } 1390 V_pfsyncstats.pfsyncs_badval++; 1391 continue; 1392 } 1393 1394 bcopy(&up->id, &id_key.id, sizeof(id_key.id)); 1395 id_key.creatorid = up->creatorid; 1396 1397 st = pf_find_state_byid(&id_key); 1398 if (st == NULL) { 1399 /* We don't have this state. Ask for it. */ 1400 pfsync_request_update(id_key.creatorid, id_key.id); 1401 continue; 1402 } 1403 1404 if (ISSET(st->state_flags, PFSTATE_ACK)) 1405 pfsync_deferred(st, 1); 1406 1407 sk = st->key[PF_SK_WIRE]; /* XXX right one? */ 1408 sfail = 0; 1409 if (sk->proto == IPPROTO_TCP) 1410 sfail = pfsync_upd_tcp(st, &up->src, &up->dst); 1411 else { 1412 /* 1413 * Non-TCP protocol state machine always go forwards 1414 */ 1415 if (st->src.state > up->src.state) 1416 sfail = 5; 1417 else if (st->dst.state > up->dst.state) 1418 sfail = 6; 1419 } 1420 1421 if (sfail) { 1422#ifdef __FreeBSD__ 1423 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1424#else 1425 if (pf_status.debug >= PF_DEBUG_MISC) { 1426#endif 1427 printf("pfsync: ignoring stale update " 1428 "(%d) id: %016llx " 1429 "creatorid: %08x\n", sfail, 1430 betoh64(st->id), 1431 ntohl(st->creatorid)); 1432 } 1433 V_pfsyncstats.pfsyncs_stale++; 1434 1435 pfsync_update_state(st); 1436#ifdef __FreeBSD__ 1437 pfsync_sendout(); 1438#else 1439 schednetisr(NETISR_PFSYNC); 1440#endif 1441 continue; 1442 } 1443 pfsync_alloc_scrub_memory(&up->dst, &st->dst); 1444 pf_state_peer_ntoh(&up->src, &st->src); 1445 pf_state_peer_ntoh(&up->dst, &st->dst); 1446 st->expire = ntohl(up->expire) + time_second; 1447 st->timeout = up->timeout; 1448 st->pfsync_time = time_uptime; 1449 } 1450#ifdef __FreeBSD__ 1451 PF_UNLOCK(); 1452#endif 1453 splx(s); 1454 1455 return (len); 1456} 1457 1458int 1459pfsync_in_ureq(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1460{ 1461 struct pfsync_upd_req *ur, *ura; 1462 struct mbuf *mp; 1463 int len = count * sizeof(*ur); 1464 int i, offp; 1465 1466 struct pf_state_cmp id_key; 1467 struct pf_state *st; 1468 1469 mp = m_pulldown(m, offset, len, &offp); 1470 if (mp == NULL) { 1471 V_pfsyncstats.pfsyncs_badlen++; 1472 return (-1); 1473 } 1474 ura = (struct pfsync_upd_req *)(mp->m_data + offp); 1475 1476 for (i = 0; i < count; i++) { 1477 ur = &ura[i]; 1478 1479 bcopy(&ur->id, &id_key.id, sizeof(id_key.id)); 1480 id_key.creatorid = ur->creatorid; 1481 1482 if (id_key.id == 0 && id_key.creatorid == 0) 1483 pfsync_bulk_start(); 1484 else { 1485 st = pf_find_state_byid(&id_key); 1486 if (st == NULL) { 1487 V_pfsyncstats.pfsyncs_badstate++; 1488 continue; 1489 } 1490 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) 1491 continue; 1492 1493 PF_LOCK(); 1494 pfsync_update_state_req(st); 1495 PF_UNLOCK(); 1496 } 1497 } 1498 1499 return (len); 1500} 1501 1502int 1503pfsync_in_del(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1504{ 1505 struct mbuf *mp; 1506 struct pfsync_state *sa, *sp; 1507 struct pf_state_cmp id_key; 1508 struct pf_state *st; 1509 int len = count * sizeof(*sp); 1510 int offp, i; 1511 int s; 1512 1513 mp = m_pulldown(m, offset, len, &offp); 1514 if (mp == NULL) { 1515 V_pfsyncstats.pfsyncs_badlen++; 1516 return (-1); 1517 } 1518 sa = (struct pfsync_state *)(mp->m_data + offp); 1519 1520 s = splsoftnet(); 1521#ifdef __FreeBSD__ 1522 PF_LOCK(); 1523#endif 1524 for (i = 0; i < count; i++) { 1525 sp = &sa[i]; 1526 1527 bcopy(sp->id, &id_key.id, sizeof(id_key.id)); 1528 id_key.creatorid = sp->creatorid; 1529 1530 st = pf_find_state_byid(&id_key); 1531 if (st == NULL) { 1532 V_pfsyncstats.pfsyncs_badstate++; 1533 continue; 1534 } 1535 SET(st->state_flags, PFSTATE_NOSYNC); 1536 pf_unlink_state(st); 1537 } 1538#ifdef __FreeBSD__ 1539 PF_UNLOCK(); 1540#endif 1541 splx(s); 1542 1543 return (len); 1544} 1545 1546int 1547pfsync_in_del_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1548{ 1549 struct mbuf *mp; 1550 struct pfsync_del_c *sa, *sp; 1551 struct pf_state_cmp id_key; 1552 struct pf_state *st; 1553 int len = count * sizeof(*sp); 1554 int offp, i; 1555 int s; 1556 1557 mp = m_pulldown(m, offset, len, &offp); 1558 if (mp == NULL) { 1559 V_pfsyncstats.pfsyncs_badlen++; 1560 return (-1); 1561 } 1562 sa = (struct pfsync_del_c *)(mp->m_data + offp); 1563 1564 s = splsoftnet(); 1565#ifdef __FreeBSD__ 1566 PF_LOCK(); 1567#endif 1568 for (i = 0; i < count; i++) { 1569 sp = &sa[i]; 1570 1571 bcopy(&sp->id, &id_key.id, sizeof(id_key.id)); 1572 id_key.creatorid = sp->creatorid; 1573 1574 st = pf_find_state_byid(&id_key); 1575 if (st == NULL) { 1576 V_pfsyncstats.pfsyncs_badstate++; 1577 continue; 1578 } 1579 1580 SET(st->state_flags, PFSTATE_NOSYNC); 1581 pf_unlink_state(st); 1582 } 1583#ifdef __FreeBSD__ 1584 PF_UNLOCK(); 1585#endif 1586 splx(s); 1587 1588 return (len); 1589} 1590 1591int 1592pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1593{ 1594#ifdef __FreeBSD__ 1595 struct pfsync_softc *sc = V_pfsyncif; 1596#else 1597 struct pfsync_softc *sc = pfsyncif; 1598#endif 1599 struct pfsync_bus *bus; 1600 struct mbuf *mp; 1601 int len = count * sizeof(*bus); 1602 int offp; 1603 1604 /* If we're not waiting for a bulk update, who cares. */ 1605 if (sc->sc_ureq_sent == 0) 1606 return (len); 1607 1608 mp = m_pulldown(m, offset, len, &offp); 1609 if (mp == NULL) { 1610 V_pfsyncstats.pfsyncs_badlen++; 1611 return (-1); 1612 } 1613 bus = (struct pfsync_bus *)(mp->m_data + offp); 1614 1615 switch (bus->status) { 1616 case PFSYNC_BUS_START: 1617#ifdef __FreeBSD__ 1618 callout_reset(&sc->sc_bulkfail_tmo, 4 * hz + 1619 V_pf_pool_limits[PF_LIMIT_STATES].limit / 1620 ((sc->sc_sync_if->if_mtu - PFSYNC_MINPKT) / 1621 sizeof(struct pfsync_state)), 1622 pfsync_bulk_fail, V_pfsyncif); 1623#else 1624 timeout_add(&sc->sc_bulkfail_tmo, 4 * hz + 1625 pf_pool_limits[PF_LIMIT_STATES].limit / 1626 ((sc->sc_if.if_mtu - PFSYNC_MINPKT) / 1627 sizeof(struct pfsync_state))); 1628#endif 1629#ifdef __FreeBSD__ 1630 if (V_pf_status.debug >= PF_DEBUG_MISC) 1631#else 1632 if (pf_status.debug >= PF_DEBUG_MISC) 1633#endif 1634 printf("pfsync: received bulk update start\n"); 1635 break; 1636 1637 case PFSYNC_BUS_END: 1638 if (time_uptime - ntohl(bus->endtime) >= 1639 sc->sc_ureq_sent) { 1640 /* that's it, we're happy */ 1641 sc->sc_ureq_sent = 0; 1642 sc->sc_bulk_tries = 0; 1643 timeout_del(&sc->sc_bulkfail_tmo); 1644#if NCARP > 0 1645#ifdef notyet 1646#ifdef __FreeBSD__ 1647 if (!sc->pfsync_sync_ok) 1648#else 1649 if (!pfsync_sync_ok) 1650#endif 1651 carp_group_demote_adj(&sc->sc_if, -1); 1652#endif 1653#endif 1654#ifdef __FreeBSD__ 1655 sc->pfsync_sync_ok = 1; 1656#else 1657 pfsync_sync_ok = 1; 1658#endif 1659#ifdef __FreeBSD__ 1660 if (V_pf_status.debug >= PF_DEBUG_MISC) 1661#else 1662 if (pf_status.debug >= PF_DEBUG_MISC) 1663#endif 1664 printf("pfsync: received valid " 1665 "bulk update end\n"); 1666 } else { 1667#ifdef __FreeBSD__ 1668 if (V_pf_status.debug >= PF_DEBUG_MISC) 1669#else 1670 if (pf_status.debug >= PF_DEBUG_MISC) 1671#endif 1672 printf("pfsync: received invalid " 1673 "bulk update end: bad timestamp\n"); 1674 } 1675 break; 1676 } 1677 1678 return (len); 1679} 1680 1681int 1682pfsync_in_tdb(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1683{ 1684 int len = count * sizeof(struct pfsync_tdb); 1685 1686#if defined(IPSEC) 1687 struct pfsync_tdb *tp; 1688 struct mbuf *mp; 1689 int offp; 1690 int i; 1691 int s; 1692 1693 mp = m_pulldown(m, offset, len, &offp); 1694 if (mp == NULL) { 1695 V_pfsyncstats.pfsyncs_badlen++; 1696 return (-1); 1697 } 1698 tp = (struct pfsync_tdb *)(mp->m_data + offp); 1699 1700 s = splsoftnet(); 1701#ifdef __FreeBSD__ 1702 PF_LOCK(); 1703#endif 1704 for (i = 0; i < count; i++) 1705 pfsync_update_net_tdb(&tp[i]); 1706#ifdef __FreeBSD__ 1707 PF_UNLOCK(); 1708#endif 1709 splx(s); 1710#endif 1711 1712 return (len); 1713} 1714 1715#if defined(IPSEC) 1716/* Update an in-kernel tdb. Silently fail if no tdb is found. */ 1717void 1718pfsync_update_net_tdb(struct pfsync_tdb *pt) 1719{ 1720 struct tdb *tdb; 1721 int s; 1722 1723 /* check for invalid values */ 1724 if (ntohl(pt->spi) <= SPI_RESERVED_MAX || 1725 (pt->dst.sa.sa_family != AF_INET && 1726 pt->dst.sa.sa_family != AF_INET6)) 1727 goto bad; 1728 1729 s = spltdb(); 1730 tdb = gettdb(pt->spi, &pt->dst, pt->sproto); 1731 if (tdb) { 1732 pt->rpl = ntohl(pt->rpl); 1733 pt->cur_bytes = betoh64(pt->cur_bytes); 1734 1735 /* Neither replay nor byte counter should ever decrease. */ 1736 if (pt->rpl < tdb->tdb_rpl || 1737 pt->cur_bytes < tdb->tdb_cur_bytes) { 1738 splx(s); 1739 goto bad; 1740 } 1741 1742 tdb->tdb_rpl = pt->rpl; 1743 tdb->tdb_cur_bytes = pt->cur_bytes; 1744 } 1745 splx(s); 1746 return; 1747 1748bad: 1749#ifdef __FreeBSD__ 1750 if (V_pf_status.debug >= PF_DEBUG_MISC) 1751#else 1752 if (pf_status.debug >= PF_DEBUG_MISC) 1753#endif 1754 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: " 1755 "invalid value\n"); 1756 V_pfsyncstats.pfsyncs_badstate++; 1757 return; 1758} 1759#endif 1760 1761 1762int 1763pfsync_in_eof(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1764{ 1765 /* check if we are at the right place in the packet */ 1766 if (offset != m->m_pkthdr.len - sizeof(struct pfsync_eof)) 1767 V_pfsyncstats.pfsyncs_badact++; 1768 1769 /* we're done. free and let the caller return */ 1770 m_freem(m); 1771 return (-1); 1772} 1773 1774int 1775pfsync_in_error(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1776{ 1777 V_pfsyncstats.pfsyncs_badact++; 1778 1779 m_freem(m); 1780 return (-1); 1781} 1782 1783int 1784pfsyncoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, 1785#ifdef __FreeBSD__ 1786 struct route *rt) 1787#else 1788 struct rtentry *rt) 1789#endif 1790{ 1791 m_freem(m); 1792 return (0); 1793} 1794 1795/* ARGSUSED */ 1796int 1797pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1798{ 1799#ifndef __FreeBSD__ 1800 struct proc *p = curproc; 1801#endif 1802 struct pfsync_softc *sc = ifp->if_softc; 1803 struct ifreq *ifr = (struct ifreq *)data; 1804 struct ip_moptions *imo = &sc->sc_imo; 1805 struct pfsyncreq pfsyncr; 1806 struct ifnet *sifp; 1807 struct ip *ip; 1808 int s, error; 1809 1810 switch (cmd) { 1811#if 0 1812 case SIOCSIFADDR: 1813 case SIOCAIFADDR: 1814 case SIOCSIFDSTADDR: 1815#endif 1816 case SIOCSIFFLAGS: 1817#ifdef __FreeBSD__ 1818 if (ifp->if_flags & IFF_UP) 1819 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1820 else 1821 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1822#else 1823 if (ifp->if_flags & IFF_UP) 1824 ifp->if_flags |= IFF_RUNNING; 1825 else 1826 ifp->if_flags &= ~IFF_RUNNING; 1827#endif 1828 break; 1829 case SIOCSIFMTU: 1830 if (ifr->ifr_mtu <= PFSYNC_MINPKT) 1831 return (EINVAL); 1832 if (ifr->ifr_mtu > MCLBYTES) /* XXX could be bigger */ 1833 ifr->ifr_mtu = MCLBYTES; 1834 if (ifr->ifr_mtu < ifp->if_mtu) { 1835 s = splnet(); 1836#ifdef __FreeBSD__ 1837 PF_LOCK(); 1838#endif 1839 pfsync_sendout(); 1840#ifdef __FreeBSD__ 1841 PF_UNLOCK(); 1842#endif 1843 splx(s); 1844 } 1845 ifp->if_mtu = ifr->ifr_mtu; 1846 break; 1847 case SIOCGETPFSYNC: 1848 bzero(&pfsyncr, sizeof(pfsyncr)); 1849 if (sc->sc_sync_if) { 1850 strlcpy(pfsyncr.pfsyncr_syncdev, 1851 sc->sc_sync_if->if_xname, IFNAMSIZ); 1852 } 1853 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer; 1854 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates; 1855 return (copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr))); 1856 1857 case SIOCSETPFSYNC: 1858#ifdef __FreeBSD__ 1859 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0) 1860#else 1861 if ((error = suser(p, p->p_acflag)) != 0) 1862#endif 1863 return (error); 1864 if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr)))) 1865 return (error); 1866 1867#ifdef __FreeBSD__ 1868 PF_LOCK(); 1869#endif 1870 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0) 1871#ifdef __FreeBSD__ 1872 sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP); 1873#else 1874 sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP; 1875#endif 1876 else 1877 sc->sc_sync_peer.s_addr = 1878 pfsyncr.pfsyncr_syncpeer.s_addr; 1879 1880 if (pfsyncr.pfsyncr_maxupdates > 255) 1881#ifdef __FreeBSD__ 1882 { 1883 PF_UNLOCK(); 1884#endif 1885 return (EINVAL); 1886#ifdef __FreeBSD__ 1887 } 1888#endif 1889 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates; 1890 1891 if (pfsyncr.pfsyncr_syncdev[0] == 0) { 1892 sc->sc_sync_if = NULL; 1893#ifdef __FreeBSD__ 1894 PF_UNLOCK(); 1895#endif 1896 if (imo->imo_num_memberships > 0) { 1897 in_delmulti(imo->imo_membership[ 1898 --imo->imo_num_memberships]); 1899 imo->imo_multicast_ifp = NULL; 1900 } 1901 break; 1902 } 1903 1904#ifdef __FreeBSD__ 1905 PF_UNLOCK(); 1906#endif 1907 if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL) 1908 return (EINVAL); 1909 1910#ifdef __FreeBSD__ 1911 PF_LOCK(); 1912#endif 1913 s = splnet(); 1914#ifdef __FreeBSD__ 1915 if (sifp->if_mtu < sc->sc_ifp->if_mtu || 1916#else 1917 if (sifp->if_mtu < sc->sc_if.if_mtu || 1918#endif 1919 (sc->sc_sync_if != NULL && 1920 sifp->if_mtu < sc->sc_sync_if->if_mtu) || 1921 sifp->if_mtu < MCLBYTES - sizeof(struct ip)) 1922 pfsync_sendout(); 1923 sc->sc_sync_if = sifp; 1924 1925 if (imo->imo_num_memberships > 0) { 1926#ifdef __FreeBSD__ 1927 PF_UNLOCK(); 1928#endif 1929 in_delmulti(imo->imo_membership[--imo->imo_num_memberships]); 1930#ifdef __FreeBSD__ 1931 PF_LOCK(); 1932#endif 1933 imo->imo_multicast_ifp = NULL; 1934 } 1935 1936 if (sc->sc_sync_if && 1937#ifdef __FreeBSD__ 1938 sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) { 1939#else 1940 sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) { 1941#endif 1942 struct in_addr addr; 1943 1944 if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) { 1945 sc->sc_sync_if = NULL; 1946#ifdef __FreeBSD__ 1947 PF_UNLOCK(); 1948#endif 1949 splx(s); 1950 return (EADDRNOTAVAIL); 1951 } 1952 1953#ifdef __FreeBSD__ 1954 addr.s_addr = htonl(INADDR_PFSYNC_GROUP); 1955#else 1956 addr.s_addr = INADDR_PFSYNC_GROUP; 1957#endif 1958 1959#ifdef __FreeBSD__ 1960 PF_UNLOCK(); 1961#endif 1962 if ((imo->imo_membership[0] = 1963 in_addmulti(&addr, sc->sc_sync_if)) == NULL) { 1964 sc->sc_sync_if = NULL; 1965 splx(s); 1966 return (ENOBUFS); 1967 } 1968#ifdef __FreeBSD__ 1969 PF_LOCK(); 1970#endif 1971 imo->imo_num_memberships++; 1972 imo->imo_multicast_ifp = sc->sc_sync_if; 1973 imo->imo_multicast_ttl = PFSYNC_DFLTTL; 1974 imo->imo_multicast_loop = 0; 1975 } 1976 1977 ip = &sc->sc_template; 1978 bzero(ip, sizeof(*ip)); 1979 ip->ip_v = IPVERSION; 1980 ip->ip_hl = sizeof(sc->sc_template) >> 2; 1981 ip->ip_tos = IPTOS_LOWDELAY; 1982 /* len and id are set later */ 1983#ifdef __FreeBSD__ 1984 ip->ip_off = IP_DF; 1985#else 1986 ip->ip_off = htons(IP_DF); 1987#endif 1988 ip->ip_ttl = PFSYNC_DFLTTL; 1989 ip->ip_p = IPPROTO_PFSYNC; 1990 ip->ip_src.s_addr = INADDR_ANY; 1991 ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr; 1992 1993 if (sc->sc_sync_if) { 1994 /* Request a full state table update. */ 1995 sc->sc_ureq_sent = time_uptime; 1996#if NCARP > 0 1997#ifdef notyet 1998#ifdef __FreeBSD__ 1999 if (sc->pfsync_sync_ok) 2000#else 2001 if (pfsync_sync_ok) 2002#endif 2003 carp_group_demote_adj(&sc->sc_if, 1); 2004#endif 2005#endif 2006#ifdef __FreeBSD__ 2007 sc->pfsync_sync_ok = 0; 2008#else 2009 pfsync_sync_ok = 0; 2010#endif 2011#ifdef __FreeBSD__ 2012 if (V_pf_status.debug >= PF_DEBUG_MISC) 2013#else 2014 if (pf_status.debug >= PF_DEBUG_MISC) 2015#endif 2016 printf("pfsync: requesting bulk update\n"); 2017#ifdef __FreeBSD__ 2018 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, 2019 pfsync_bulk_fail, V_pfsyncif); 2020#else 2021 timeout_add_sec(&sc->sc_bulkfail_tmo, 5); 2022#endif 2023 pfsync_request_update(0, 0); 2024 } 2025#ifdef __FreeBSD__ 2026 PF_UNLOCK(); 2027#endif 2028 splx(s); 2029 2030 break; 2031 2032 default: 2033 return (ENOTTY); 2034 } 2035 2036 return (0); 2037} 2038 2039int 2040pfsync_out_state(struct pf_state *st, struct mbuf *m, int offset) 2041{ 2042 struct pfsync_state *sp = (struct pfsync_state *)(m->m_data + offset); 2043 2044 pfsync_state_export(sp, st); 2045 2046 return (sizeof(*sp)); 2047} 2048 2049int 2050pfsync_out_iack(struct pf_state *st, struct mbuf *m, int offset) 2051{ 2052 struct pfsync_ins_ack *iack = 2053 (struct pfsync_ins_ack *)(m->m_data + offset); 2054 2055 iack->id = st->id; 2056 iack->creatorid = st->creatorid; 2057 2058 return (sizeof(*iack)); 2059} 2060 2061int 2062pfsync_out_upd_c(struct pf_state *st, struct mbuf *m, int offset) 2063{ 2064 struct pfsync_upd_c *up = (struct pfsync_upd_c *)(m->m_data + offset); 2065 2066 up->id = st->id; 2067 pf_state_peer_hton(&st->src, &up->src); 2068 pf_state_peer_hton(&st->dst, &up->dst); 2069 up->creatorid = st->creatorid; 2070 2071 up->expire = pf_state_expires(st); 2072 if (up->expire <= time_second) 2073 up->expire = htonl(0); 2074 else 2075 up->expire = htonl(up->expire - time_second); 2076 up->timeout = st->timeout; 2077 2078 bzero(up->_pad, sizeof(up->_pad)); /* XXX */ 2079 2080 return (sizeof(*up)); 2081} 2082 2083int 2084pfsync_out_del(struct pf_state *st, struct mbuf *m, int offset) 2085{ 2086 struct pfsync_del_c *dp = (struct pfsync_del_c *)(m->m_data + offset); 2087 2088 dp->id = st->id; 2089 dp->creatorid = st->creatorid; 2090 2091 SET(st->state_flags, PFSTATE_NOSYNC); 2092 2093 return (sizeof(*dp)); 2094} 2095 2096void 2097pfsync_drop(struct pfsync_softc *sc) 2098{ 2099 struct pf_state *st; 2100 struct pfsync_upd_req_item *ur; 2101#ifdef notyet 2102 struct tdb *t; 2103#endif 2104 int q; 2105 2106 for (q = 0; q < PFSYNC_S_COUNT; q++) { 2107 if (TAILQ_EMPTY(&sc->sc_qs[q])) 2108 continue; 2109 2110 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) { 2111#ifdef PFSYNC_DEBUG 2112#ifdef __FreeBSD__ 2113 KASSERT(st->sync_state == q, 2114 ("%s: st->sync_state == q", 2115 __FUNCTION__)); 2116#else 2117 KASSERT(st->sync_state == q); 2118#endif 2119#endif 2120 st->sync_state = PFSYNC_S_NONE; 2121 } 2122 TAILQ_INIT(&sc->sc_qs[q]); 2123 } 2124 2125 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) { 2126 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry); 2127 pool_put(&sc->sc_pool, ur); 2128 } 2129 2130 sc->sc_plus = NULL; 2131 2132#ifdef notyet 2133 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) { 2134 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry) 2135 CLR(t->tdb_flags, TDBF_PFSYNC); 2136 2137 TAILQ_INIT(&sc->sc_tdb_q); 2138 } 2139#endif 2140 2141 sc->sc_len = PFSYNC_MINPKT; 2142} 2143 2144void 2145pfsync_sendout(void) 2146{ 2147#ifdef __FreeBSD__ 2148 struct pfsync_softc *sc = V_pfsyncif; 2149#else 2150 struct pfsync_softc *sc = pfsyncif; 2151#endif 2152#if NBPFILTER > 0 2153#ifdef __FreeBSD__ 2154 struct ifnet *ifp = sc->sc_ifp; 2155#else 2156 struct ifnet *ifp = &sc->sc_if; 2157#endif 2158#endif 2159 struct mbuf *m; 2160 struct ip *ip; 2161 struct pfsync_header *ph; 2162 struct pfsync_subheader *subh; 2163 struct pf_state *st; 2164 struct pfsync_upd_req_item *ur; 2165#ifdef notyet 2166 struct tdb *t; 2167#endif 2168#ifdef __FreeBSD__ 2169 size_t pktlen; 2170 int dummy_error; 2171#endif 2172 int offset; 2173 int q, count = 0; 2174 2175#ifdef __FreeBSD__ 2176 PF_LOCK_ASSERT(); 2177#else 2178 splassert(IPL_NET); 2179#endif 2180 2181 if (sc == NULL || sc->sc_len == PFSYNC_MINPKT) 2182 return; 2183 2184#if NBPFILTER > 0 2185 if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) { 2186#else 2187 if (sc->sc_sync_if == NULL) { 2188#endif 2189 pfsync_drop(sc); 2190 return; 2191 } 2192 2193 MGETHDR(m, M_DONTWAIT, MT_DATA); 2194 if (m == NULL) { 2195#ifdef __FreeBSD__ 2196 sc->sc_ifp->if_oerrors++; 2197#else 2198 sc->sc_if.if_oerrors++; 2199#endif 2200 V_pfsyncstats.pfsyncs_onomem++; 2201 pfsync_drop(sc); 2202 return; 2203 } 2204 2205#ifdef __FreeBSD__ 2206 pktlen = max_linkhdr + sc->sc_len; 2207 if (pktlen > MHLEN) { 2208 /* Find the right pool to allocate from. */ 2209 /* XXX: This is ugly. */ 2210 m_cljget(m, M_DONTWAIT, pktlen <= MSIZE ? MSIZE : 2211 pktlen <= MCLBYTES ? MCLBYTES : 2212#if MJUMPAGESIZE != MCLBYTES 2213 pktlen <= MJUMPAGESIZE ? MJUMPAGESIZE : 2214#endif 2215 pktlen <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES); 2216#else 2217 if (max_linkhdr + sc->sc_len > MHLEN) { 2218 MCLGETI(m, M_DONTWAIT, NULL, max_linkhdr + sc->sc_len); 2219#endif 2220 if (!ISSET(m->m_flags, M_EXT)) { 2221 m_free(m); 2222#ifdef __FreeBSD__ 2223 sc->sc_ifp->if_oerrors++; 2224#else 2225 sc->sc_if.if_oerrors++; 2226#endif 2227 V_pfsyncstats.pfsyncs_onomem++; 2228 pfsync_drop(sc); 2229 return; 2230 } 2231 } 2232 m->m_data += max_linkhdr; 2233 m->m_len = m->m_pkthdr.len = sc->sc_len; 2234 2235 /* build the ip header */ 2236 ip = (struct ip *)m->m_data; 2237 bcopy(&sc->sc_template, ip, sizeof(*ip)); 2238 offset = sizeof(*ip); 2239 2240#ifdef __FreeBSD__ 2241 ip->ip_len = m->m_pkthdr.len; 2242#else 2243 ip->ip_len = htons(m->m_pkthdr.len); 2244#endif 2245 ip->ip_id = htons(ip_randomid()); 2246 2247 /* build the pfsync header */ 2248 ph = (struct pfsync_header *)(m->m_data + offset); 2249 bzero(ph, sizeof(*ph)); 2250 offset += sizeof(*ph); 2251 2252 ph->version = PFSYNC_VERSION; 2253 ph->len = htons(sc->sc_len - sizeof(*ip)); 2254#ifdef __FreeBSD__ 2255 bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH); 2256#else 2257 bcopy(pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH); 2258#endif 2259 2260 /* walk the queues */ 2261 for (q = 0; q < PFSYNC_S_COUNT; q++) { 2262 if (TAILQ_EMPTY(&sc->sc_qs[q])) 2263 continue; 2264 2265 subh = (struct pfsync_subheader *)(m->m_data + offset); 2266 offset += sizeof(*subh); 2267 2268 count = 0; 2269 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) { 2270#ifdef PFSYNC_DEBUG 2271#ifdef __FreeBSD__ 2272 KASSERT(st->sync_state == q, 2273 ("%s: st->sync_state == q", 2274 __FUNCTION__)); 2275#else 2276 KASSERT(st->sync_state == q); 2277#endif 2278#endif 2279 2280 offset += pfsync_qs[q].write(st, m, offset); 2281 st->sync_state = PFSYNC_S_NONE; 2282 count++; 2283 } 2284 TAILQ_INIT(&sc->sc_qs[q]); 2285 2286 bzero(subh, sizeof(*subh)); 2287 subh->action = pfsync_qs[q].action; 2288 subh->count = htons(count); 2289 } 2290 2291 if (!TAILQ_EMPTY(&sc->sc_upd_req_list)) { 2292 subh = (struct pfsync_subheader *)(m->m_data + offset); 2293 offset += sizeof(*subh); 2294 2295 count = 0; 2296 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) { 2297 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry); 2298 2299 bcopy(&ur->ur_msg, m->m_data + offset, 2300 sizeof(ur->ur_msg)); 2301 offset += sizeof(ur->ur_msg); 2302 2303 pool_put(&sc->sc_pool, ur); 2304 2305 count++; 2306 } 2307 2308 bzero(subh, sizeof(*subh)); 2309 subh->action = PFSYNC_ACT_UPD_REQ; 2310 subh->count = htons(count); 2311 } 2312 2313 /* has someone built a custom region for us to add? */ 2314 if (sc->sc_plus != NULL) { 2315 bcopy(sc->sc_plus, m->m_data + offset, sc->sc_pluslen); 2316 offset += sc->sc_pluslen; 2317 2318 sc->sc_plus = NULL; 2319 } 2320 2321#ifdef notyet 2322 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) { 2323 subh = (struct pfsync_subheader *)(m->m_data + offset); 2324 offset += sizeof(*subh); 2325 2326 count = 0; 2327 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry) { 2328 offset += pfsync_out_tdb(t, m, offset); 2329 CLR(t->tdb_flags, TDBF_PFSYNC); 2330 2331 count++; 2332 } 2333 TAILQ_INIT(&sc->sc_tdb_q); 2334 2335 bzero(subh, sizeof(*subh)); 2336 subh->action = PFSYNC_ACT_TDB; 2337 subh->count = htons(count); 2338 } 2339#endif 2340 2341 subh = (struct pfsync_subheader *)(m->m_data + offset); 2342 offset += sizeof(*subh); 2343 2344 bzero(subh, sizeof(*subh)); 2345 subh->action = PFSYNC_ACT_EOF; 2346 subh->count = htons(1); 2347 2348 /* XXX write checksum in EOF here */ 2349 2350 /* we're done, let's put it on the wire */ 2351#if NBPFILTER > 0 2352 if (ifp->if_bpf) { 2353 m->m_data += sizeof(*ip); 2354 m->m_len = m->m_pkthdr.len = sc->sc_len - sizeof(*ip); 2355#ifdef __FreeBSD__ 2356 BPF_MTAP(ifp, m); 2357#else 2358 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 2359#endif 2360 m->m_data -= sizeof(*ip); 2361 m->m_len = m->m_pkthdr.len = sc->sc_len; 2362 } 2363 2364 if (sc->sc_sync_if == NULL) { 2365 sc->sc_len = PFSYNC_MINPKT; 2366 m_freem(m); 2367 return; 2368 } 2369#endif 2370 2371#ifdef __FreeBSD__ 2372 sc->sc_ifp->if_opackets++; 2373 sc->sc_ifp->if_obytes += m->m_pkthdr.len; 2374 sc->sc_len = PFSYNC_MINPKT; 2375 2376 IFQ_ENQUEUE(&sc->sc_ifp->if_snd, m, dummy_error); 2377 schednetisr(NETISR_PFSYNC); 2378#else 2379 sc->sc_if.if_opackets++; 2380 sc->sc_if.if_obytes += m->m_pkthdr.len; 2381 2382 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL) == 0) 2383 pfsyncstats.pfsyncs_opackets++; 2384 else 2385 pfsyncstats.pfsyncs_oerrors++; 2386 2387 /* start again */ 2388 sc->sc_len = PFSYNC_MINPKT; 2389#endif 2390} 2391 2392void 2393pfsync_insert_state(struct pf_state *st) 2394{ 2395#ifdef __FreeBSD__ 2396 struct pfsync_softc *sc = V_pfsyncif; 2397#else 2398 struct pfsync_softc *sc = pfsyncif; 2399#endif 2400 2401#ifdef __FreeBSD__ 2402 PF_LOCK_ASSERT(); 2403#else 2404 splassert(IPL_SOFTNET); 2405#endif 2406 2407 if (ISSET(st->rule.ptr->rule_flag, PFRULE_NOSYNC) || 2408 st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) { 2409 SET(st->state_flags, PFSTATE_NOSYNC); 2410 return; 2411 } 2412 2413 if (sc == NULL || ISSET(st->state_flags, PFSTATE_NOSYNC)) 2414 return; 2415 2416#ifdef PFSYNC_DEBUG 2417#ifdef __FreeBSD__ 2418 KASSERT(st->sync_state == PFSYNC_S_NONE, 2419 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__)); 2420#else 2421 KASSERT(st->sync_state == PFSYNC_S_NONE); 2422#endif 2423#endif 2424 2425 if (sc->sc_len == PFSYNC_MINPKT) 2426#ifdef __FreeBSD__ 2427 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, 2428 V_pfsyncif); 2429#else 2430 timeout_add_sec(&sc->sc_tmo, 1); 2431#endif 2432 2433 pfsync_q_ins(st, PFSYNC_S_INS); 2434 2435 if (ISSET(st->state_flags, PFSTATE_ACK)) 2436#ifdef __FreeBSD__ 2437 pfsync_sendout(); 2438#else 2439 schednetisr(NETISR_PFSYNC); 2440#endif 2441 else 2442 st->sync_updates = 0; 2443} 2444 2445int defer = 10; 2446 2447int 2448pfsync_defer(struct pf_state *st, struct mbuf *m) 2449{ 2450#ifdef __FreeBSD__ 2451 struct pfsync_softc *sc = V_pfsyncif; 2452#else 2453 struct pfsync_softc *sc = pfsyncif; 2454#endif 2455 struct pfsync_deferral *pd; 2456 2457#ifdef __FreeBSD__ 2458 PF_LOCK_ASSERT(); 2459#else 2460 splassert(IPL_SOFTNET); 2461#endif 2462 2463 if (sc->sc_deferred >= 128) 2464 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0); 2465 2466 pd = pool_get(&sc->sc_pool, M_NOWAIT); 2467 if (pd == NULL) 2468 return (0); 2469 sc->sc_deferred++; 2470 2471#ifdef __FreeBSD__ 2472 m->m_flags |= M_SKIP_FIREWALL; 2473#else 2474 m->m_pkthdr.pf.flags |= PF_TAG_GENERATED; 2475#endif 2476 SET(st->state_flags, PFSTATE_ACK); 2477 2478 pd->pd_st = st; 2479 pd->pd_m = m; 2480 2481 TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry); 2482#ifdef __FreeBSD__ 2483 callout_init(&pd->pd_tmo, CALLOUT_MPSAFE); 2484 callout_reset(&pd->pd_tmo, defer, pfsync_defer_tmo, 2485 pd); 2486#else 2487 timeout_set(&pd->pd_tmo, pfsync_defer_tmo, pd); 2488 timeout_add(&pd->pd_tmo, defer); 2489#endif 2490 2491 return (1); 2492} 2493 2494void 2495pfsync_undefer(struct pfsync_deferral *pd, int drop) 2496{ 2497#ifdef __FreeBSD__ 2498 struct pfsync_softc *sc = V_pfsyncif; 2499#else 2500 struct pfsync_softc *sc = pfsyncif; 2501#endif 2502 int s; 2503 2504#ifdef __FreeBSD__ 2505 PF_LOCK_ASSERT(); 2506#else 2507 splassert(IPL_SOFTNET); 2508#endif 2509 2510 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry); 2511 sc->sc_deferred--; 2512 2513 CLR(pd->pd_st->state_flags, PFSTATE_ACK); 2514 timeout_del(&pd->pd_tmo); /* bah */ 2515 if (drop) 2516 m_freem(pd->pd_m); 2517 else { 2518 s = splnet(); 2519#ifdef __FreeBSD__ 2520 /* XXX: use pf_defered?! */ 2521 PF_UNLOCK(); 2522#endif 2523 ip_output(pd->pd_m, (void *)NULL, (void *)NULL, 0, 2524 (void *)NULL, (void *)NULL); 2525#ifdef __FreeBSD__ 2526 PF_LOCK(); 2527#endif 2528 splx(s); 2529 } 2530 2531 pool_put(&sc->sc_pool, pd); 2532} 2533 2534void 2535pfsync_defer_tmo(void *arg) 2536{ 2537#if defined(__FreeBSD__) && defined(VIMAGE) 2538 struct pfsync_deferral *pd = arg; 2539#endif 2540 int s; 2541 2542 s = splsoftnet(); 2543#ifdef __FreeBSD__ 2544 CURVNET_SET(pd->pd_m->m_pkthdr.rcvif->if_vnet); /* XXX */ 2545 PF_LOCK(); 2546#endif 2547 pfsync_undefer(arg, 0); 2548#ifdef __FreeBSD__ 2549 PF_UNLOCK(); 2550 CURVNET_RESTORE(); 2551#endif 2552 splx(s); 2553} 2554 2555void 2556pfsync_deferred(struct pf_state *st, int drop) 2557{ 2558#ifdef __FreeBSD__ 2559 struct pfsync_softc *sc = V_pfsyncif; 2560#else 2561 struct pfsync_softc *sc = pfsyncif; 2562#endif 2563 struct pfsync_deferral *pd; 2564 2565 TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) { 2566 if (pd->pd_st == st) { 2567 pfsync_undefer(pd, drop); 2568 return; 2569 } 2570 } 2571 2572 panic("pfsync_send_deferred: unable to find deferred state"); 2573} 2574 2575u_int pfsync_upds = 0; 2576 2577void 2578pfsync_update_state(struct pf_state *st) 2579{ 2580#ifdef __FreeBSD__ 2581 struct pfsync_softc *sc = V_pfsyncif; 2582#else 2583 struct pfsync_softc *sc = pfsyncif; 2584#endif 2585 int sync = 0; 2586 2587#ifdef __FreeBSD__ 2588 PF_LOCK_ASSERT(); 2589#else 2590 splassert(IPL_SOFTNET); 2591#endif 2592 2593 if (sc == NULL) 2594 return; 2595 2596 if (ISSET(st->state_flags, PFSTATE_ACK)) 2597 pfsync_deferred(st, 0); 2598 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) { 2599 if (st->sync_state != PFSYNC_S_NONE) 2600 pfsync_q_del(st); 2601 return; 2602 } 2603 2604 if (sc->sc_len == PFSYNC_MINPKT) 2605#ifdef __FreeBSD__ 2606 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, 2607 V_pfsyncif); 2608#else 2609 timeout_add_sec(&sc->sc_tmo, 1); 2610#endif 2611 2612 switch (st->sync_state) { 2613 case PFSYNC_S_UPD_C: 2614 case PFSYNC_S_UPD: 2615 case PFSYNC_S_INS: 2616 /* we're already handling it */ 2617 2618 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) { 2619 st->sync_updates++; 2620 if (st->sync_updates >= sc->sc_maxupdates) 2621 sync = 1; 2622 } 2623 break; 2624 2625 case PFSYNC_S_IACK: 2626 pfsync_q_del(st); 2627 case PFSYNC_S_NONE: 2628 pfsync_q_ins(st, PFSYNC_S_UPD_C); 2629 st->sync_updates = 0; 2630 break; 2631 2632 default: 2633 panic("pfsync_update_state: unexpected sync state %d", 2634 st->sync_state); 2635 } 2636 2637 if (sync || (time_uptime - st->pfsync_time) < 2) { 2638 pfsync_upds++; 2639#ifdef __FreeBSD__ 2640 pfsync_sendout(); 2641#else 2642 schednetisr(NETISR_PFSYNC); 2643#endif 2644 } 2645} 2646 2647void 2648pfsync_request_update(u_int32_t creatorid, u_int64_t id) 2649{ 2650#ifdef __FreeBSD__ 2651 struct pfsync_softc *sc = V_pfsyncif; 2652#else 2653 struct pfsync_softc *sc = pfsyncif; 2654#endif 2655 struct pfsync_upd_req_item *item; 2656 size_t nlen = sizeof(struct pfsync_upd_req); 2657 int s; 2658 2659 PF_LOCK_ASSERT(); 2660 2661 /* 2662 * this code does nothing to prevent multiple update requests for the 2663 * same state being generated. 2664 */ 2665 2666 item = pool_get(&sc->sc_pool, PR_NOWAIT); 2667 if (item == NULL) { 2668 /* XXX stats */ 2669 return; 2670 } 2671 2672 item->ur_msg.id = id; 2673 item->ur_msg.creatorid = creatorid; 2674 2675 if (TAILQ_EMPTY(&sc->sc_upd_req_list)) 2676 nlen += sizeof(struct pfsync_subheader); 2677 2678#ifdef __FreeBSD__ 2679 if (sc->sc_len + nlen > sc->sc_sync_if->if_mtu) { 2680#else 2681 if (sc->sc_len + nlen > sc->sc_if.if_mtu) { 2682#endif 2683 s = splnet(); 2684 pfsync_sendout(); 2685 splx(s); 2686 2687 nlen = sizeof(struct pfsync_subheader) + 2688 sizeof(struct pfsync_upd_req); 2689 } 2690 2691 TAILQ_INSERT_TAIL(&sc->sc_upd_req_list, item, ur_entry); 2692 sc->sc_len += nlen; 2693 2694#ifdef __FreeBSD__ 2695 pfsync_sendout(); 2696#else 2697 schednetisr(NETISR_PFSYNC); 2698#endif 2699} 2700 2701void 2702pfsync_update_state_req(struct pf_state *st) 2703{ 2704#ifdef __FreeBSD__ 2705 struct pfsync_softc *sc = V_pfsyncif; 2706#else 2707 struct pfsync_softc *sc = pfsyncif; 2708#endif 2709 2710 PF_LOCK_ASSERT(); 2711 2712 if (sc == NULL) 2713 panic("pfsync_update_state_req: nonexistant instance"); 2714 2715 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) { 2716 if (st->sync_state != PFSYNC_S_NONE) 2717 pfsync_q_del(st); 2718 return; 2719 } 2720 2721 switch (st->sync_state) { 2722 case PFSYNC_S_UPD_C: 2723 case PFSYNC_S_IACK: 2724 pfsync_q_del(st); 2725 case PFSYNC_S_NONE: 2726 pfsync_q_ins(st, PFSYNC_S_UPD); 2727#ifdef __FreeBSD__ 2728 pfsync_sendout(); 2729#else 2730 schednetisr(NETISR_PFSYNC); 2731#endif 2732 return; 2733 2734 case PFSYNC_S_INS: 2735 case PFSYNC_S_UPD: 2736 case PFSYNC_S_DEL: 2737 /* we're already handling it */ 2738 return; 2739 2740 default: 2741 panic("pfsync_update_state_req: unexpected sync state %d", 2742 st->sync_state); 2743 } 2744} 2745 2746void 2747pfsync_delete_state(struct pf_state *st) 2748{ 2749#ifdef __FreeBSD__ 2750 struct pfsync_softc *sc = V_pfsyncif; 2751#else 2752 struct pfsync_softc *sc = pfsyncif; 2753#endif 2754 2755#ifdef __FreeBSD__ 2756 PF_LOCK_ASSERT(); 2757#else 2758 splassert(IPL_SOFTNET); 2759#endif 2760 2761 if (sc == NULL) 2762 return; 2763 2764 if (ISSET(st->state_flags, PFSTATE_ACK)) 2765 pfsync_deferred(st, 1); 2766 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) { 2767 if (st->sync_state != PFSYNC_S_NONE) 2768 pfsync_q_del(st); 2769 return; 2770 } 2771 2772 if (sc->sc_len == PFSYNC_MINPKT) 2773#ifdef __FreeBSD__ 2774 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, 2775 V_pfsyncif); 2776#else 2777 timeout_add_sec(&sc->sc_tmo, 1); 2778#endif 2779 2780 switch (st->sync_state) { 2781 case PFSYNC_S_INS: 2782 /* we never got to tell the world so just forget about it */ 2783 pfsync_q_del(st); 2784 return; 2785 2786 case PFSYNC_S_UPD_C: 2787 case PFSYNC_S_UPD: 2788 case PFSYNC_S_IACK: 2789 pfsync_q_del(st); 2790 /* FALLTHROUGH to putting it on the del list */ 2791 2792 case PFSYNC_S_NONE: 2793 pfsync_q_ins(st, PFSYNC_S_DEL); 2794 return; 2795 2796 default: 2797 panic("pfsync_delete_state: unexpected sync state %d", 2798 st->sync_state); 2799 } 2800} 2801 2802void 2803pfsync_clear_states(u_int32_t creatorid, const char *ifname) 2804{ 2805 struct { 2806 struct pfsync_subheader subh; 2807 struct pfsync_clr clr; 2808 } __packed r; 2809 2810#ifdef __FreeBSD__ 2811 struct pfsync_softc *sc = V_pfsyncif; 2812#else 2813 struct pfsync_softc *sc = pfsyncif; 2814#endif 2815 2816#ifdef __FreeBSD__ 2817 PF_LOCK_ASSERT(); 2818#else 2819 splassert(IPL_SOFTNET); 2820#endif 2821 2822 if (sc == NULL) 2823 return; 2824 2825 bzero(&r, sizeof(r)); 2826 2827 r.subh.action = PFSYNC_ACT_CLR; 2828 r.subh.count = htons(1); 2829 2830 strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname)); 2831 r.clr.creatorid = creatorid; 2832 2833 pfsync_send_plus(&r, sizeof(r)); 2834} 2835 2836void 2837pfsync_q_ins(struct pf_state *st, int q) 2838{ 2839#ifdef __FreeBSD__ 2840 struct pfsync_softc *sc = V_pfsyncif; 2841#else 2842 struct pfsync_softc *sc = pfsyncif; 2843#endif 2844 size_t nlen = pfsync_qs[q].len; 2845 int s; 2846 2847 PF_LOCK_ASSERT(); 2848 2849#ifdef __FreeBSD__ 2850 KASSERT(st->sync_state == PFSYNC_S_NONE, 2851 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__)); 2852#else 2853 KASSERT(st->sync_state == PFSYNC_S_NONE); 2854#endif 2855 2856#if 1 || defined(PFSYNC_DEBUG) 2857 if (sc->sc_len < PFSYNC_MINPKT) 2858#ifdef __FreeBSD__ 2859 panic("pfsync pkt len is too low %zu", sc->sc_len); 2860#else 2861 panic("pfsync pkt len is too low %d", sc->sc_len); 2862#endif 2863#endif 2864 if (TAILQ_EMPTY(&sc->sc_qs[q])) 2865 nlen += sizeof(struct pfsync_subheader); 2866 2867#ifdef __FreeBSD__ 2868 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) { 2869#else 2870 if (sc->sc_len + nlen > sc->sc_if.if_mtu) { 2871#endif 2872 s = splnet(); 2873 pfsync_sendout(); 2874 splx(s); 2875 2876 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len; 2877 } 2878 2879 sc->sc_len += nlen; 2880 TAILQ_INSERT_TAIL(&sc->sc_qs[q], st, sync_list); 2881 st->sync_state = q; 2882} 2883 2884void 2885pfsync_q_del(struct pf_state *st) 2886{ 2887#ifdef __FreeBSD__ 2888 struct pfsync_softc *sc = V_pfsyncif; 2889#else 2890 struct pfsync_softc *sc = pfsyncif; 2891#endif 2892 int q = st->sync_state; 2893 2894#ifdef __FreeBSD__ 2895 KASSERT(st->sync_state != PFSYNC_S_NONE, 2896 ("%s: st->sync_state != PFSYNC_S_NONE", __FUNCTION__)); 2897#else 2898 KASSERT(st->sync_state != PFSYNC_S_NONE); 2899#endif 2900 2901 sc->sc_len -= pfsync_qs[q].len; 2902 TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list); 2903 st->sync_state = PFSYNC_S_NONE; 2904 2905 if (TAILQ_EMPTY(&sc->sc_qs[q])) 2906 sc->sc_len -= sizeof(struct pfsync_subheader); 2907} 2908 2909#ifdef notyet 2910void 2911pfsync_update_tdb(struct tdb *t, int output) 2912{ 2913#ifdef __FreeBSD__ 2914 struct pfsync_softc *sc = V_pfsyncif; 2915#else 2916 struct pfsync_softc *sc = pfsyncif; 2917#endif 2918 size_t nlen = sizeof(struct pfsync_tdb); 2919 int s; 2920 2921 if (sc == NULL) 2922 return; 2923 2924 if (!ISSET(t->tdb_flags, TDBF_PFSYNC)) { 2925 if (TAILQ_EMPTY(&sc->sc_tdb_q)) 2926 nlen += sizeof(struct pfsync_subheader); 2927 2928 if (sc->sc_len + nlen > sc->sc_if.if_mtu) { 2929 s = splnet(); 2930 PF_LOCK(); 2931 pfsync_sendout(); 2932 PF_UNLOCK(); 2933 splx(s); 2934 2935 nlen = sizeof(struct pfsync_subheader) + 2936 sizeof(struct pfsync_tdb); 2937 } 2938 2939 sc->sc_len += nlen; 2940 TAILQ_INSERT_TAIL(&sc->sc_tdb_q, t, tdb_sync_entry); 2941 SET(t->tdb_flags, TDBF_PFSYNC); 2942 t->tdb_updates = 0; 2943 } else { 2944 if (++t->tdb_updates >= sc->sc_maxupdates) 2945 schednetisr(NETISR_PFSYNC); 2946 } 2947 2948 if (output) 2949 SET(t->tdb_flags, TDBF_PFSYNC_RPL); 2950 else 2951 CLR(t->tdb_flags, TDBF_PFSYNC_RPL); 2952} 2953 2954void 2955pfsync_delete_tdb(struct tdb *t) 2956{ 2957#ifdef __FreeBSD__ 2958 struct pfsync_softc *sc = V_pfsyncif; 2959#else 2960 struct pfsync_softc *sc = pfsyncif; 2961#endif 2962 2963 if (sc == NULL || !ISSET(t->tdb_flags, TDBF_PFSYNC)) 2964 return; 2965 2966 sc->sc_len -= sizeof(struct pfsync_tdb); 2967 TAILQ_REMOVE(&sc->sc_tdb_q, t, tdb_sync_entry); 2968 CLR(t->tdb_flags, TDBF_PFSYNC); 2969 2970 if (TAILQ_EMPTY(&sc->sc_tdb_q)) 2971 sc->sc_len -= sizeof(struct pfsync_subheader); 2972} 2973 2974int 2975pfsync_out_tdb(struct tdb *t, struct mbuf *m, int offset) 2976{ 2977 struct pfsync_tdb *ut = (struct pfsync_tdb *)(m->m_data + offset); 2978 2979 bzero(ut, sizeof(*ut)); 2980 ut->spi = t->tdb_spi; 2981 bcopy(&t->tdb_dst, &ut->dst, sizeof(ut->dst)); 2982 /* 2983 * When a failover happens, the master's rpl is probably above 2984 * what we see here (we may be up to a second late), so 2985 * increase it a bit for outbound tdbs to manage most such 2986 * situations. 2987 * 2988 * For now, just add an offset that is likely to be larger 2989 * than the number of packets we can see in one second. The RFC 2990 * just says the next packet must have a higher seq value. 2991 * 2992 * XXX What is a good algorithm for this? We could use 2993 * a rate-determined increase, but to know it, we would have 2994 * to extend struct tdb. 2995 * XXX pt->rpl can wrap over MAXINT, but if so the real tdb 2996 * will soon be replaced anyway. For now, just don't handle 2997 * this edge case. 2998 */ 2999#define RPL_INCR 16384 3000 ut->rpl = htonl(t->tdb_rpl + (ISSET(t->tdb_flags, TDBF_PFSYNC_RPL) ? 3001 RPL_INCR : 0)); 3002 ut->cur_bytes = htobe64(t->tdb_cur_bytes); 3003 ut->sproto = t->tdb_sproto; 3004 3005 return (sizeof(*ut)); 3006} 3007#endif 3008 3009void 3010pfsync_bulk_start(void) 3011{ 3012#ifdef __FreeBSD__ 3013 struct pfsync_softc *sc = V_pfsyncif; 3014#else 3015 struct pfsync_softc *sc = pfsyncif; 3016#endif 3017 3018#ifdef __FreeBSD__ 3019 if (V_pf_status.debug >= PF_DEBUG_MISC) 3020#else 3021 if (pf_status.debug >= PF_DEBUG_MISC) 3022#endif 3023 printf("pfsync: received bulk update request\n"); 3024 3025#ifdef __FreeBSD__ 3026 PF_LOCK(); 3027 if (TAILQ_EMPTY(&V_state_list)) 3028#else 3029 if (TAILQ_EMPTY(&state_list)) 3030#endif 3031 pfsync_bulk_status(PFSYNC_BUS_END); 3032 else { 3033 sc->sc_ureq_received = time_uptime; 3034 if (sc->sc_bulk_next == NULL) 3035#ifdef __FreeBSD__ 3036 sc->sc_bulk_next = TAILQ_FIRST(&V_state_list); 3037#else 3038 sc->sc_bulk_next = TAILQ_FIRST(&state_list); 3039#endif 3040 sc->sc_bulk_last = sc->sc_bulk_next; 3041 3042 pfsync_bulk_status(PFSYNC_BUS_START); 3043 callout_reset(&sc->sc_bulk_tmo, 1, 3044 pfsync_bulk_update, sc); 3045 } 3046#ifdef __FreeBSD__ 3047 PF_UNLOCK(); 3048#endif 3049} 3050 3051void 3052pfsync_bulk_update(void *arg) 3053{ 3054 struct pfsync_softc *sc = arg; 3055 struct pf_state *st = sc->sc_bulk_next; 3056 int i = 0; 3057 int s; 3058 3059 PF_LOCK_ASSERT(); 3060 3061 s = splsoftnet(); 3062#ifdef __FreeBSD__ 3063 CURVNET_SET(sc->sc_ifp->if_vnet); 3064#endif 3065 for (;;) { 3066 if (st->sync_state == PFSYNC_S_NONE && 3067 st->timeout < PFTM_MAX && 3068 st->pfsync_time <= sc->sc_ureq_received) { 3069 pfsync_update_state_req(st); 3070 i++; 3071 } 3072 3073 st = TAILQ_NEXT(st, entry_list); 3074 if (st == NULL) 3075#ifdef __FreeBSD__ 3076 st = TAILQ_FIRST(&V_state_list); 3077#else 3078 st = TAILQ_FIRST(&state_list); 3079#endif 3080 3081 if (st == sc->sc_bulk_last) { 3082 /* we're done */ 3083 sc->sc_bulk_next = NULL; 3084 sc->sc_bulk_last = NULL; 3085 pfsync_bulk_status(PFSYNC_BUS_END); 3086 break; 3087 } 3088 3089#ifdef __FreeBSD__ 3090 if (i > 1 && (sc->sc_ifp->if_mtu - sc->sc_len) < 3091#else 3092 if (i > 1 && (sc->sc_if.if_mtu - sc->sc_len) < 3093#endif 3094 sizeof(struct pfsync_state)) { 3095 /* we've filled a packet */ 3096 sc->sc_bulk_next = st; 3097#ifdef __FreeBSD__ 3098 callout_reset(&sc->sc_bulk_tmo, 1, 3099 pfsync_bulk_update, sc); 3100#else 3101 timeout_add(&sc->sc_bulk_tmo, 1); 3102#endif 3103 break; 3104 } 3105 } 3106 3107#ifdef __FreeBSD__ 3108 CURVNET_RESTORE(); 3109#endif 3110 splx(s); 3111} 3112 3113void 3114pfsync_bulk_status(u_int8_t status) 3115{ 3116 struct { 3117 struct pfsync_subheader subh; 3118 struct pfsync_bus bus; 3119 } __packed r; 3120 3121#ifdef __FreeBSD__ 3122 struct pfsync_softc *sc = V_pfsyncif; 3123#else 3124 struct pfsync_softc *sc = pfsyncif; 3125#endif 3126 3127 PF_LOCK_ASSERT(); 3128 3129 bzero(&r, sizeof(r)); 3130 3131 r.subh.action = PFSYNC_ACT_BUS; 3132 r.subh.count = htons(1); 3133 3134#ifdef __FreeBSD__ 3135 r.bus.creatorid = V_pf_status.hostid; 3136#else 3137 r.bus.creatorid = pf_status.hostid; 3138#endif 3139 r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received); 3140 r.bus.status = status; 3141 3142 pfsync_send_plus(&r, sizeof(r)); 3143} 3144 3145void 3146pfsync_bulk_fail(void *arg) 3147{ 3148 struct pfsync_softc *sc = arg; 3149 3150#ifdef __FreeBSD__ 3151 CURVNET_SET(sc->sc_ifp->if_vnet); 3152#endif 3153 3154 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) { 3155 /* Try again */ 3156#ifdef __FreeBSD__ 3157 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, 3158 pfsync_bulk_fail, V_pfsyncif); 3159#else 3160 timeout_add_sec(&sc->sc_bulkfail_tmo, 5); 3161#endif 3162 PF_LOCK(); 3163 pfsync_request_update(0, 0); 3164 PF_UNLOCK(); 3165 } else { 3166 /* Pretend like the transfer was ok */ 3167 sc->sc_ureq_sent = 0; 3168 sc->sc_bulk_tries = 0; 3169#if NCARP > 0 3170#ifdef notyet 3171#ifdef __FreeBSD__ 3172 if (!sc->pfsync_sync_ok) 3173#else 3174 if (!pfsync_sync_ok) 3175#endif 3176 carp_group_demote_adj(&sc->sc_if, -1); 3177#endif 3178#endif 3179#ifdef __FreeBSD__ 3180 sc->pfsync_sync_ok = 1; 3181#else 3182 pfsync_sync_ok = 1; 3183#endif 3184#ifdef __FreeBSD__ 3185 if (V_pf_status.debug >= PF_DEBUG_MISC) 3186#else 3187 if (pf_status.debug >= PF_DEBUG_MISC) 3188#endif 3189 printf("pfsync: failed to receive bulk update\n"); 3190 } 3191 3192#ifdef __FreeBSD__ 3193 CURVNET_RESTORE(); 3194#endif 3195} 3196 3197void 3198pfsync_send_plus(void *plus, size_t pluslen) 3199{ 3200#ifdef __FreeBSD__ 3201 struct pfsync_softc *sc = V_pfsyncif; 3202#else 3203 struct pfsync_softc *sc = pfsyncif; 3204#endif 3205 int s; 3206 3207 PF_LOCK_ASSERT(); 3208 3209#ifdef __FreeBSD__ 3210 if (sc->sc_len + pluslen > sc->sc_ifp->if_mtu) { 3211#else 3212 if (sc->sc_len + pluslen > sc->sc_if.if_mtu) { 3213#endif 3214 s = splnet(); 3215 pfsync_sendout(); 3216 splx(s); 3217 } 3218 3219 sc->sc_plus = plus; 3220 sc->sc_len += (sc->sc_pluslen = pluslen); 3221 3222 s = splnet(); 3223 pfsync_sendout(); 3224 splx(s); 3225} 3226 3227int 3228pfsync_up(void) 3229{ 3230#ifdef __FreeBSD__ 3231 struct pfsync_softc *sc = V_pfsyncif; 3232#else 3233 struct pfsync_softc *sc = pfsyncif; 3234#endif 3235 3236#ifdef __FreeBSD__ 3237 if (sc == NULL || !ISSET(sc->sc_ifp->if_flags, IFF_DRV_RUNNING)) 3238#else 3239 if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING)) 3240#endif 3241 return (0); 3242 3243 return (1); 3244} 3245 3246int 3247pfsync_state_in_use(struct pf_state *st) 3248{ 3249#ifdef __FreeBSD__ 3250 struct pfsync_softc *sc = V_pfsyncif; 3251#else 3252 struct pfsync_softc *sc = pfsyncif; 3253#endif 3254 3255 if (sc == NULL) 3256 return (0); 3257 3258 if (st->sync_state != PFSYNC_S_NONE || 3259 st == sc->sc_bulk_next || 3260 st == sc->sc_bulk_last) 3261 return (1); 3262 3263 return (0); 3264} 3265 3266u_int pfsync_ints; 3267u_int pfsync_tmos; 3268 3269void 3270pfsync_timeout(void *arg) 3271{ 3272#if defined(__FreeBSD__) && defined(VIMAGE) 3273 struct pfsync_softc *sc = arg; 3274#endif 3275 int s; 3276 3277#ifdef __FreeBSD__ 3278 CURVNET_SET(sc->sc_ifp->if_vnet); 3279#endif 3280 3281 pfsync_tmos++; 3282 3283 s = splnet(); 3284#ifdef __FreeBSD__ 3285 PF_LOCK(); 3286#endif 3287 pfsync_sendout(); 3288#ifdef __FreeBSD__ 3289 PF_UNLOCK(); 3290#endif 3291 splx(s); 3292 3293#ifdef __FreeBSD__ 3294 CURVNET_RESTORE(); 3295#endif 3296} 3297 3298/* this is a softnet/netisr handler */ 3299void 3300#ifdef __FreeBSD__ 3301pfsyncintr(void *arg) 3302{ 3303 struct pfsync_softc *sc = arg; 3304 struct mbuf *m, *n; 3305 3306 CURVNET_SET(sc->sc_ifp->if_vnet); 3307 pfsync_ints++; 3308 3309 IF_DEQUEUE_ALL(&sc->sc_ifp->if_snd, m); 3310 3311 for (; m != NULL; m = n) { 3312 3313 n = m->m_nextpkt; 3314 m->m_nextpkt = NULL; 3315 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL) 3316 == 0) 3317 V_pfsyncstats.pfsyncs_opackets++; 3318 else 3319 V_pfsyncstats.pfsyncs_oerrors++; 3320 } 3321 CURVNET_RESTORE(); 3322} 3323#else 3324pfsyncintr(void) 3325{ 3326 int s; 3327 3328 pfsync_ints++; 3329 3330 s = splnet(); 3331 pfsync_sendout(); 3332 splx(s); 3333} 3334#endif 3335 3336int 3337pfsync_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 3338 size_t newlen) 3339{ 3340 3341#ifdef notyet 3342 /* All sysctl names at this level are terminal. */ 3343 if (namelen != 1) 3344 return (ENOTDIR); 3345 3346 switch (name[0]) { 3347 case PFSYNCCTL_STATS: 3348 if (newp != NULL) 3349 return (EPERM); 3350 return (sysctl_struct(oldp, oldlenp, newp, newlen, 3351 &V_pfsyncstats, sizeof(V_pfsyncstats))); 3352 } 3353#endif 3354 return (ENOPROTOOPT); 3355} 3356 3357#ifdef __FreeBSD__ 3358void 3359pfsync_ifdetach(void *arg, struct ifnet *ifp) 3360{ 3361 struct pfsync_softc *sc = (struct pfsync_softc *)arg; 3362 struct ip_moptions *imo; 3363 3364 if (sc == NULL || sc->sc_sync_if != ifp) 3365 return; /* not for us; unlocked read */ 3366 3367 CURVNET_SET(sc->sc_ifp->if_vnet); 3368 3369 PF_LOCK(); 3370 3371 /* Deal with a member interface going away from under us. */ 3372 sc->sc_sync_if = NULL; 3373 imo = &sc->sc_imo; 3374 if (imo->imo_num_memberships > 0) { 3375 KASSERT(imo->imo_num_memberships == 1, 3376 ("%s: imo_num_memberships != 1", __func__)); 3377 /* 3378 * Our event handler is always called after protocol 3379 * domains have been detached from the underlying ifnet. 3380 * Do not call in_delmulti(); we held a single reference 3381 * which the protocol domain has purged in in_purgemaddrs(). 3382 */ 3383 PF_UNLOCK(); 3384 imo->imo_membership[--imo->imo_num_memberships] = NULL; 3385 PF_LOCK(); 3386 imo->imo_multicast_ifp = NULL; 3387 } 3388 3389 PF_UNLOCK(); 3390 3391 CURVNET_RESTORE(); 3392} 3393 3394static int 3395vnet_pfsync_init(const void *unused) 3396{ 3397 int error = 0; 3398 3399 pfsyncattach(0); 3400 3401 error = swi_add(NULL, "pfsync", pfsyncintr, V_pfsyncif, 3402 SWI_NET, INTR_MPSAFE, &pfsync_swi.pfsync_swi_cookie); 3403 if (error) 3404 panic("%s: swi_add %d", __func__, error); 3405 3406 PF_LOCK(); 3407 pfsync_state_import_ptr = pfsync_state_import; 3408 pfsync_up_ptr = pfsync_up; 3409 pfsync_insert_state_ptr = pfsync_insert_state; 3410 pfsync_update_state_ptr = pfsync_update_state; 3411 pfsync_delete_state_ptr = pfsync_delete_state; 3412 pfsync_clear_states_ptr = pfsync_clear_states; 3413 pfsync_state_in_use_ptr = pfsync_state_in_use; 3414 pfsync_defer_ptr = pfsync_defer; 3415 PF_UNLOCK(); 3416 3417 return (0); 3418} 3419 3420static int 3421vnet_pfsync_uninit(const void *unused) 3422{ 3423 3424 swi_remove(pfsync_swi.pfsync_swi_cookie); 3425 3426 PF_LOCK(); 3427 pfsync_state_import_ptr = NULL; 3428 pfsync_up_ptr = NULL; 3429 pfsync_insert_state_ptr = NULL; 3430 pfsync_update_state_ptr = NULL; 3431 pfsync_delete_state_ptr = NULL; 3432 pfsync_clear_states_ptr = NULL; 3433 pfsync_state_in_use_ptr = NULL; 3434 pfsync_defer_ptr = NULL; 3435 PF_UNLOCK(); 3436 3437 if_clone_detach(&pfsync_cloner); 3438 3439 return (0); 3440} 3441 3442/* Define startup order. */ 3443#define PFSYNC_SYSINIT_ORDER SI_SUB_PROTO_IF 3444#define PFSYNC_MODEVENT_ORDER (SI_ORDER_FIRST) /* On boot slot in here. */ 3445#define PFSYNC_VNET_ORDER (PFSYNC_MODEVENT_ORDER + 2) /* Later still. */ 3446 3447/* 3448 * Starting up. 3449 * VNET_SYSINIT is called for each existing vnet and each new vnet. 3450 */ 3451VNET_SYSINIT(vnet_pfsync_init, PFSYNC_SYSINIT_ORDER, PFSYNC_VNET_ORDER, 3452 vnet_pfsync_init, NULL); 3453 3454/* 3455 * Closing up shop. These are done in REVERSE ORDER, 3456 * Not called on reboot. 3457 * VNET_SYSUNINIT is called for each exiting vnet as it exits. 3458 */ 3459VNET_SYSUNINIT(vnet_pfsync_uninit, PFSYNC_SYSINIT_ORDER, PFSYNC_VNET_ORDER, 3460 vnet_pfsync_uninit, NULL); 3461static int 3462pfsync_modevent(module_t mod, int type, void *data) 3463{ 3464 int error = 0; 3465 3466 switch (type) { 3467 case MOD_LOAD: 3468#ifndef __FreeBSD__ 3469 pfsyncattach(0); 3470#endif 3471 break; 3472 case MOD_UNLOAD: 3473#ifndef __FreeBSD__ 3474 if_clone_detach(&pfsync_cloner); 3475#endif 3476 break; 3477 default: 3478 error = EINVAL; 3479 break; 3480 } 3481 3482 return error; 3483} 3484 3485static moduledata_t pfsync_mod = { 3486 "pfsync", 3487 pfsync_modevent, 3488 0 3489}; 3490 3491#define PFSYNC_MODVER 1 3492 3493DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 3494MODULE_VERSION(pfsync, PFSYNC_MODVER); 3495MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER); 3496#endif /* __FreeBSD__ */ 3497