if_pfsync.c revision 228855
1/* $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $ */ 2 3/* 4 * Copyright (c) 2002 Michael Shalayeff 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 26 * THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29/* 30 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org> 31 * 32 * Permission to use, copy, modify, and distribute this software for any 33 * purpose with or without fee is hereby granted, provided that the above 34 * copyright notice and this permission notice appear in all copies. 35 * 36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 43 */ 44 45/* 46 * Revisions picked from OpenBSD after revision 1.110 import: 47 * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates 48 * 1.120, 1.175 - use monotonic time_uptime 49 * 1.122 - reduce number of updates for non-TCP sessions 50 */ 51 52#ifdef __FreeBSD__ 53#include "opt_inet.h" 54#include "opt_inet6.h" 55#include "opt_pf.h" 56 57#include <sys/cdefs.h> 58__FBSDID("$FreeBSD: head/sys/contrib/pf/net/if_pfsync.c 228855 2011-12-24 00:23:27Z pluknet $"); 59 60#define NBPFILTER 1 61 62#ifdef DEV_PFSYNC 63#define NPFSYNC DEV_PFSYNC 64#else 65#define NPFSYNC 0 66#endif 67#endif /* __FreeBSD__ */ 68 69#include <sys/param.h> 70#include <sys/kernel.h> 71#ifdef __FreeBSD__ 72#include <sys/bus.h> 73#include <sys/interrupt.h> 74#include <sys/priv.h> 75#endif 76#include <sys/proc.h> 77#include <sys/systm.h> 78#include <sys/time.h> 79#include <sys/mbuf.h> 80#include <sys/socket.h> 81#ifdef __FreeBSD__ 82#include <sys/endian.h> 83#include <sys/malloc.h> 84#include <sys/module.h> 85#include <sys/sockio.h> 86#include <sys/taskqueue.h> 87#include <sys/lock.h> 88#include <sys/mutex.h> 89#else 90#include <sys/ioctl.h> 91#include <sys/timeout.h> 92#endif 93#include <sys/sysctl.h> 94#ifndef __FreeBSD__ 95#include <sys/pool.h> 96#endif 97 98#include <net/if.h> 99#ifdef __FreeBSD__ 100#include <net/if_clone.h> 101#endif 102#include <net/if_types.h> 103#include <net/route.h> 104#include <net/bpf.h> 105#include <net/netisr.h> 106#ifdef __FreeBSD__ 107#include <net/vnet.h> 108#endif 109 110#include <netinet/in.h> 111#include <netinet/if_ether.h> 112#include <netinet/tcp.h> 113#include <netinet/tcp_seq.h> 114 115#ifdef INET 116#include <netinet/in_systm.h> 117#include <netinet/in_var.h> 118#include <netinet/ip.h> 119#include <netinet/ip_var.h> 120#endif 121 122#ifdef INET6 123#include <netinet6/nd6.h> 124#endif /* INET6 */ 125 126#ifdef __FreeBSD__ 127#include <netinet/ip_carp.h> 128#else 129#include "carp.h" 130#if NCARP > 0 131#include <netinet/ip_carp.h> 132#endif 133#endif 134 135#include <net/pfvar.h> 136#include <net/if_pfsync.h> 137 138#ifndef __FreeBSD__ 139#include "bpfilter.h" 140#include "pfsync.h" 141#endif 142 143#define PFSYNC_MINPKT ( \ 144 sizeof(struct ip) + \ 145 sizeof(struct pfsync_header) + \ 146 sizeof(struct pfsync_subheader) + \ 147 sizeof(struct pfsync_eof)) 148 149struct pfsync_pkt { 150 struct ip *ip; 151 struct in_addr src; 152 u_int8_t flags; 153}; 154 155int pfsync_input_hmac(struct mbuf *, int); 156 157int pfsync_upd_tcp(struct pf_state *, struct pfsync_state_peer *, 158 struct pfsync_state_peer *); 159 160int pfsync_in_clr(struct pfsync_pkt *, struct mbuf *, int, int); 161int pfsync_in_ins(struct pfsync_pkt *, struct mbuf *, int, int); 162int pfsync_in_iack(struct pfsync_pkt *, struct mbuf *, int, int); 163int pfsync_in_upd(struct pfsync_pkt *, struct mbuf *, int, int); 164int pfsync_in_upd_c(struct pfsync_pkt *, struct mbuf *, int, int); 165int pfsync_in_ureq(struct pfsync_pkt *, struct mbuf *, int, int); 166int pfsync_in_del(struct pfsync_pkt *, struct mbuf *, int, int); 167int pfsync_in_del_c(struct pfsync_pkt *, struct mbuf *, int, int); 168int pfsync_in_bus(struct pfsync_pkt *, struct mbuf *, int, int); 169int pfsync_in_tdb(struct pfsync_pkt *, struct mbuf *, int, int); 170int pfsync_in_eof(struct pfsync_pkt *, struct mbuf *, int, int); 171 172int pfsync_in_error(struct pfsync_pkt *, struct mbuf *, int, int); 173 174int (*pfsync_acts[])(struct pfsync_pkt *, struct mbuf *, int, int) = { 175 pfsync_in_clr, /* PFSYNC_ACT_CLR */ 176 pfsync_in_ins, /* PFSYNC_ACT_INS */ 177 pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */ 178 pfsync_in_upd, /* PFSYNC_ACT_UPD */ 179 pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */ 180 pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */ 181 pfsync_in_del, /* PFSYNC_ACT_DEL */ 182 pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */ 183 pfsync_in_error, /* PFSYNC_ACT_INS_F */ 184 pfsync_in_error, /* PFSYNC_ACT_DEL_F */ 185 pfsync_in_bus, /* PFSYNC_ACT_BUS */ 186 pfsync_in_tdb, /* PFSYNC_ACT_TDB */ 187 pfsync_in_eof /* PFSYNC_ACT_EOF */ 188}; 189 190struct pfsync_q { 191 int (*write)(struct pf_state *, struct mbuf *, int); 192 size_t len; 193 u_int8_t action; 194}; 195 196/* we have one of these for every PFSYNC_S_ */ 197int pfsync_out_state(struct pf_state *, struct mbuf *, int); 198int pfsync_out_iack(struct pf_state *, struct mbuf *, int); 199int pfsync_out_upd_c(struct pf_state *, struct mbuf *, int); 200int pfsync_out_del(struct pf_state *, struct mbuf *, int); 201 202struct pfsync_q pfsync_qs[] = { 203 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS }, 204 { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK }, 205 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_UPD }, 206 { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C }, 207 { pfsync_out_del, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C } 208}; 209 210void pfsync_q_ins(struct pf_state *, int); 211void pfsync_q_del(struct pf_state *); 212 213struct pfsync_upd_req_item { 214 TAILQ_ENTRY(pfsync_upd_req_item) ur_entry; 215 struct pfsync_upd_req ur_msg; 216}; 217TAILQ_HEAD(pfsync_upd_reqs, pfsync_upd_req_item); 218 219struct pfsync_deferral { 220 TAILQ_ENTRY(pfsync_deferral) pd_entry; 221 struct pf_state *pd_st; 222 struct mbuf *pd_m; 223#ifdef __FreeBSD__ 224 struct callout pd_tmo; 225#else 226 struct timeout pd_tmo; 227#endif 228}; 229TAILQ_HEAD(pfsync_deferrals, pfsync_deferral); 230 231#define PFSYNC_PLSIZE MAX(sizeof(struct pfsync_upd_req_item), \ 232 sizeof(struct pfsync_deferral)) 233 234#ifdef notyet 235int pfsync_out_tdb(struct tdb *, struct mbuf *, int); 236#endif 237 238struct pfsync_softc { 239#ifdef __FreeBSD__ 240 struct ifnet *sc_ifp; 241#else 242 struct ifnet sc_if; 243#endif 244 struct ifnet *sc_sync_if; 245 246#ifdef __FreeBSD__ 247 uma_zone_t sc_pool; 248#else 249 struct pool sc_pool; 250#endif 251 252 struct ip_moptions sc_imo; 253 254 struct in_addr sc_sync_peer; 255 u_int8_t sc_maxupdates; 256#ifdef __FreeBSD__ 257 int pfsync_sync_ok; 258#endif 259 260 struct ip sc_template; 261 262 struct pf_state_queue sc_qs[PFSYNC_S_COUNT]; 263 size_t sc_len; 264 265 struct pfsync_upd_reqs sc_upd_req_list; 266 267 struct pfsync_deferrals sc_deferrals; 268 u_int sc_deferred; 269 270 void *sc_plus; 271 size_t sc_pluslen; 272 273 u_int32_t sc_ureq_sent; 274 int sc_bulk_tries; 275#ifdef __FreeBSD__ 276 struct callout sc_bulkfail_tmo; 277#else 278 struct timeout sc_bulkfail_tmo; 279#endif 280 281 u_int32_t sc_ureq_received; 282 struct pf_state *sc_bulk_next; 283 struct pf_state *sc_bulk_last; 284#ifdef __FreeBSD__ 285 struct callout sc_bulk_tmo; 286#else 287 struct timeout sc_bulk_tmo; 288#endif 289 290 TAILQ_HEAD(, tdb) sc_tdb_q; 291 292#ifdef __FreeBSD__ 293 struct callout sc_tmo; 294#else 295 struct timeout sc_tmo; 296#endif 297#ifdef __FreeBSD__ 298 eventhandler_tag sc_detachtag; 299#endif 300 301}; 302 303#ifdef __FreeBSD__ 304static VNET_DEFINE(struct pfsync_softc *, pfsyncif) = NULL; 305#define V_pfsyncif VNET(pfsyncif) 306 307static VNET_DEFINE(struct pfsyncstats, pfsyncstats); 308#define V_pfsyncstats VNET(pfsyncstats) 309static VNET_DEFINE(int, pfsync_carp_adj) = CARP_MAXSKEW; 310#define V_pfsync_carp_adj VNET(pfsync_carp_adj) 311 312SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC"); 313SYSCTL_VNET_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_RW, 314 &VNET_NAME(pfsyncstats), pfsyncstats, 315 "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)"); 316SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_RW, 317 &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment"); 318#else 319struct pfsync_softc *pfsyncif = NULL; 320struct pfsyncstats pfsyncstats; 321#define V_pfsyncstats pfsyncstats 322#endif 323 324#ifdef __FreeBSD__ 325static void pfsyncintr(void *); 326struct pfsync_swi { 327 void * pfsync_swi_cookie; 328}; 329static struct pfsync_swi pfsync_swi; 330#define schednetisr(p) swi_sched(pfsync_swi.pfsync_swi_cookie, 0) 331#define NETISR_PFSYNC 332#endif 333 334void pfsyncattach(int); 335#ifdef __FreeBSD__ 336int pfsync_clone_create(struct if_clone *, int, caddr_t); 337void pfsync_clone_destroy(struct ifnet *); 338#else 339int pfsync_clone_create(struct if_clone *, int); 340int pfsync_clone_destroy(struct ifnet *); 341#endif 342int pfsync_alloc_scrub_memory(struct pfsync_state_peer *, 343 struct pf_state_peer *); 344void pfsync_update_net_tdb(struct pfsync_tdb *); 345int pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *, 346#ifdef __FreeBSD__ 347 struct route *); 348#else 349 struct rtentry *); 350#endif 351int pfsyncioctl(struct ifnet *, u_long, caddr_t); 352void pfsyncstart(struct ifnet *); 353 354struct mbuf *pfsync_if_dequeue(struct ifnet *); 355struct mbuf *pfsync_get_mbuf(struct pfsync_softc *); 356 357void pfsync_deferred(struct pf_state *, int); 358void pfsync_undefer(struct pfsync_deferral *, int); 359void pfsync_defer_tmo(void *); 360 361void pfsync_request_update(u_int32_t, u_int64_t); 362void pfsync_update_state_req(struct pf_state *); 363 364void pfsync_drop(struct pfsync_softc *); 365void pfsync_sendout(void); 366void pfsync_send_plus(void *, size_t); 367int pfsync_tdb_sendout(struct pfsync_softc *); 368int pfsync_sendout_mbuf(struct pfsync_softc *, struct mbuf *); 369void pfsync_timeout(void *); 370void pfsync_tdb_timeout(void *); 371void pfsync_send_bus(struct pfsync_softc *, u_int8_t); 372 373void pfsync_bulk_start(void); 374void pfsync_bulk_status(u_int8_t); 375void pfsync_bulk_update(void *); 376void pfsync_bulk_fail(void *); 377 378#ifdef __FreeBSD__ 379void pfsync_ifdetach(void *, struct ifnet *); 380 381/* XXX: ugly */ 382#define betoh64 (unsigned long long)be64toh 383#define timeout_del callout_stop 384#endif 385 386#define PFSYNC_MAX_BULKTRIES 12 387#ifndef __FreeBSD__ 388int pfsync_sync_ok; 389#endif 390 391#ifdef __FreeBSD__ 392IFC_SIMPLE_DECLARE(pfsync, 1); 393#else 394struct if_clone pfsync_cloner = 395 IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy); 396#endif 397 398void 399pfsyncattach(int npfsync) 400{ 401 if_clone_attach(&pfsync_cloner); 402} 403int 404#ifdef __FreeBSD__ 405pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param) 406#else 407pfsync_clone_create(struct if_clone *ifc, int unit) 408#endif 409{ 410 struct pfsync_softc *sc; 411 struct ifnet *ifp; 412 int q; 413 414 if (unit != 0) 415 return (EINVAL); 416 417#ifndef __FreeBSD__ 418 pfsync_sync_ok = 1; 419#endif 420 421 sc = malloc(sizeof(struct pfsync_softc), M_DEVBUF, M_NOWAIT | M_ZERO); 422 if (sc == NULL) 423 return (ENOMEM); 424 425 for (q = 0; q < PFSYNC_S_COUNT; q++) 426 TAILQ_INIT(&sc->sc_qs[q]); 427 428#ifdef __FreeBSD__ 429 sc->pfsync_sync_ok = 1; 430 sc->sc_pool = uma_zcreate("pfsync", PFSYNC_PLSIZE, 431 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 432 if (sc->sc_pool == NULL) { 433 free(sc, M_DEVBUF); 434 return (ENOMEM); 435 } 436#else 437 pool_init(&sc->sc_pool, PFSYNC_PLSIZE, 0, 0, 0, "pfsync", NULL); 438#endif 439 TAILQ_INIT(&sc->sc_upd_req_list); 440 TAILQ_INIT(&sc->sc_deferrals); 441 sc->sc_deferred = 0; 442 443 TAILQ_INIT(&sc->sc_tdb_q); 444 445 sc->sc_len = PFSYNC_MINPKT; 446 sc->sc_maxupdates = 128; 447 448#ifdef __FreeBSD__ 449 sc->sc_imo.imo_membership = (struct in_multi **)malloc( 450 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_DEVBUF, 451 M_NOWAIT | M_ZERO); 452 sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS; 453 sc->sc_imo.imo_multicast_vif = -1; 454#else 455 sc->sc_imo.imo_membership = (struct in_multi **)malloc( 456 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_IPMOPTS, 457 M_WAITOK | M_ZERO); 458 sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS; 459#endif 460 461#ifdef __FreeBSD__ 462 ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC); 463 if (ifp == NULL) { 464 free(sc->sc_imo.imo_membership, M_DEVBUF); 465 uma_zdestroy(sc->sc_pool); 466 free(sc, M_DEVBUF); 467 return (ENOSPC); 468 } 469 if_initname(ifp, ifc->ifc_name, unit); 470 471 sc->sc_detachtag = EVENTHANDLER_REGISTER(ifnet_departure_event, 472#ifdef __FreeBSD__ 473 pfsync_ifdetach, V_pfsyncif, EVENTHANDLER_PRI_ANY); 474#else 475 pfsync_ifdetach, pfsyncif, EVENTHANDLER_PRI_ANY); 476#endif 477 if (sc->sc_detachtag == NULL) { 478 if_free(ifp); 479 free(sc->sc_imo.imo_membership, M_DEVBUF); 480 uma_zdestroy(sc->sc_pool); 481 free(sc, M_DEVBUF); 482 return (ENOSPC); 483 } 484#else 485 ifp = &sc->sc_if; 486 snprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit); 487#endif 488 ifp->if_softc = sc; 489 ifp->if_ioctl = pfsyncioctl; 490 ifp->if_output = pfsyncoutput; 491 ifp->if_start = pfsyncstart; 492 ifp->if_type = IFT_PFSYNC; 493 ifp->if_snd.ifq_maxlen = ifqmaxlen; 494 ifp->if_hdrlen = sizeof(struct pfsync_header); 495 ifp->if_mtu = 1500; /* XXX */ 496#ifdef __FreeBSD__ 497 callout_init(&sc->sc_tmo, CALLOUT_MPSAFE); 498 callout_init_mtx(&sc->sc_bulk_tmo, &pf_task_mtx, 0); 499 callout_init(&sc->sc_bulkfail_tmo, CALLOUT_MPSAFE); 500#else 501 ifp->if_hardmtu = MCLBYTES; /* XXX */ 502 timeout_set(&sc->sc_tmo, pfsync_timeout, sc); 503 timeout_set(&sc->sc_bulk_tmo, pfsync_bulk_update, sc); 504 timeout_set(&sc->sc_bulkfail_tmo, pfsync_bulk_fail, sc); 505#endif 506 507 if_attach(ifp); 508#ifndef __FreeBSD__ 509 if_alloc_sadl(ifp); 510 511#if NCARP > 0 512 if_addgroup(ifp, "carp"); 513#endif 514#endif 515 516#if NBPFILTER > 0 517#ifdef __FreeBSD__ 518 bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN); 519#else 520 bpfattach(&sc->sc_if.if_bpf, ifp, DLT_PFSYNC, PFSYNC_HDRLEN); 521#endif 522#endif 523 524#ifdef __FreeBSD__ 525 V_pfsyncif = sc; 526#else 527 pfsyncif = sc; 528#endif 529 530 return (0); 531} 532 533#ifdef __FreeBSD__ 534void 535#else 536int 537#endif 538pfsync_clone_destroy(struct ifnet *ifp) 539{ 540 struct pfsync_softc *sc = ifp->if_softc; 541 542#ifdef __FreeBSD__ 543 EVENTHANDLER_DEREGISTER(ifnet_departure_event, sc->sc_detachtag); 544 PF_LOCK(); 545#endif 546 timeout_del(&sc->sc_bulkfail_tmo); 547 timeout_del(&sc->sc_bulk_tmo); 548 timeout_del(&sc->sc_tmo); 549#ifdef __FreeBSD__ 550 PF_UNLOCK(); 551 if (!sc->pfsync_sync_ok && carp_demote_adj_p) 552 (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy"); 553#else 554#if NCARP > 0 555 if (!pfsync_sync_ok) 556 carp_group_demote_adj(&sc->sc_if, -1); 557#endif 558#endif 559#if NBPFILTER > 0 560 bpfdetach(ifp); 561#endif 562 if_detach(ifp); 563 564 pfsync_drop(sc); 565 566 while (sc->sc_deferred > 0) 567 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0); 568 569#ifdef __FreeBSD__ 570 UMA_DESTROY(sc->sc_pool); 571#else 572 pool_destroy(&sc->sc_pool); 573#endif 574#ifdef __FreeBSD__ 575 if_free(ifp); 576 free(sc->sc_imo.imo_membership, M_DEVBUF); 577#else 578 free(sc->sc_imo.imo_membership, M_IPMOPTS); 579#endif 580 free(sc, M_DEVBUF); 581 582#ifdef __FreeBSD__ 583 V_pfsyncif = NULL; 584#else 585 pfsyncif = NULL; 586#endif 587 588#ifndef __FreeBSD__ 589 return (0); 590#endif 591} 592 593struct mbuf * 594pfsync_if_dequeue(struct ifnet *ifp) 595{ 596 struct mbuf *m; 597#ifndef __FreeBSD__ 598 int s; 599#endif 600 601#ifdef __FreeBSD__ 602 IF_LOCK(&ifp->if_snd); 603 _IF_DROP(&ifp->if_snd); 604 _IF_DEQUEUE(&ifp->if_snd, m); 605 IF_UNLOCK(&ifp->if_snd); 606#else 607 s = splnet(); 608 IF_DEQUEUE(&ifp->if_snd, m); 609 splx(s); 610#endif 611 612 return (m); 613} 614 615/* 616 * Start output on the pfsync interface. 617 */ 618void 619pfsyncstart(struct ifnet *ifp) 620{ 621 struct mbuf *m; 622 623 while ((m = pfsync_if_dequeue(ifp)) != NULL) { 624#ifndef __FreeBSD__ 625 IF_DROP(&ifp->if_snd); 626#endif 627 m_freem(m); 628 } 629} 630 631int 632pfsync_alloc_scrub_memory(struct pfsync_state_peer *s, 633 struct pf_state_peer *d) 634{ 635 if (s->scrub.scrub_flag && d->scrub == NULL) { 636#ifdef __FreeBSD__ 637 d->scrub = pool_get(&V_pf_state_scrub_pl, PR_NOWAIT | PR_ZERO); 638#else 639 d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO); 640#endif 641 if (d->scrub == NULL) 642 return (ENOMEM); 643 } 644 645 return (0); 646} 647 648#ifndef __FreeBSD__ 649void 650pfsync_state_export(struct pfsync_state *sp, struct pf_state *st) 651{ 652 bzero(sp, sizeof(struct pfsync_state)); 653 654 /* copy from state key */ 655 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 656 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 657 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 658 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 659 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 660 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 661 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 662 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 663 sp->proto = st->key[PF_SK_WIRE]->proto; 664 sp->af = st->key[PF_SK_WIRE]->af; 665 666 /* copy from state */ 667 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 668 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 669 sp->creation = htonl(time_uptime - st->creation); 670 sp->expire = pf_state_expires(st); 671 if (sp->expire <= time_second) 672 sp->expire = htonl(0); 673 else 674 sp->expire = htonl(sp->expire - time_second); 675 676 sp->direction = st->direction; 677 sp->log = st->log; 678 sp->timeout = st->timeout; 679 sp->state_flags = st->state_flags; 680 if (st->src_node) 681 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 682 if (st->nat_src_node) 683 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 684 685 bcopy(&st->id, &sp->id, sizeof(sp->id)); 686 sp->creatorid = st->creatorid; 687 pf_state_peer_hton(&st->src, &sp->src); 688 pf_state_peer_hton(&st->dst, &sp->dst); 689 690 if (st->rule.ptr == NULL) 691 sp->rule = htonl(-1); 692 else 693 sp->rule = htonl(st->rule.ptr->nr); 694 if (st->anchor.ptr == NULL) 695 sp->anchor = htonl(-1); 696 else 697 sp->anchor = htonl(st->anchor.ptr->nr); 698 if (st->nat_rule.ptr == NULL) 699 sp->nat_rule = htonl(-1); 700 else 701 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 702 703 pf_state_counter_hton(st->packets[0], sp->packets[0]); 704 pf_state_counter_hton(st->packets[1], sp->packets[1]); 705 pf_state_counter_hton(st->bytes[0], sp->bytes[0]); 706 pf_state_counter_hton(st->bytes[1], sp->bytes[1]); 707 708} 709#endif 710 711int 712pfsync_state_import(struct pfsync_state *sp, u_int8_t flags) 713{ 714 struct pf_state *st = NULL; 715 struct pf_state_key *skw = NULL, *sks = NULL; 716 struct pf_rule *r = NULL; 717 struct pfi_kif *kif; 718 int pool_flags; 719 int error; 720 721 PF_LOCK_ASSERT(); 722 723#ifdef __FreeBSD__ 724 if (sp->creatorid == 0 && V_pf_status.debug >= PF_DEBUG_MISC) { 725#else 726 if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) { 727#endif 728 printf("pfsync_state_import: invalid creator id:" 729 " %08x\n", ntohl(sp->creatorid)); 730 return (EINVAL); 731 } 732 733 if ((kif = pfi_kif_get(sp->ifname)) == NULL) { 734#ifdef __FreeBSD__ 735 if (V_pf_status.debug >= PF_DEBUG_MISC) 736#else 737 if (pf_status.debug >= PF_DEBUG_MISC) 738#endif 739 printf("pfsync_state_import: " 740 "unknown interface: %s\n", sp->ifname); 741 if (flags & PFSYNC_SI_IOCTL) 742 return (EINVAL); 743 return (0); /* skip this state */ 744 } 745 746 /* 747 * If the ruleset checksums match or the state is coming from the ioctl, 748 * it's safe to associate the state with the rule of that number. 749 */ 750 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) && 751 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) < 752 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount) 753 r = pf_main_ruleset.rules[ 754 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)]; 755 else 756#ifdef __FreeBSD__ 757 r = &V_pf_default_rule; 758#else 759 r = &pf_default_rule; 760#endif 761 762 if ((r->max_states && r->states_cur >= r->max_states)) 763 goto cleanup; 764 765#ifdef __FreeBSD__ 766 if (flags & PFSYNC_SI_IOCTL) 767 pool_flags = PR_WAITOK | PR_ZERO; 768 else 769 pool_flags = PR_NOWAIT | PR_ZERO; 770 771 if ((st = pool_get(&V_pf_state_pl, pool_flags)) == NULL) 772 goto cleanup; 773#else 774 if (flags & PFSYNC_SI_IOCTL) 775 pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO; 776 else 777 pool_flags = PR_LIMITFAIL | PR_ZERO; 778 779 if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL) 780 goto cleanup; 781#endif 782 783 if ((skw = pf_alloc_state_key(pool_flags)) == NULL) 784 goto cleanup; 785 786 if (PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0], 787 &sp->key[PF_SK_STACK].addr[0], sp->af) || 788 PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1], 789 &sp->key[PF_SK_STACK].addr[1], sp->af) || 790 sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] || 791 sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1]) { 792 if ((sks = pf_alloc_state_key(pool_flags)) == NULL) 793 goto cleanup; 794 } else 795 sks = skw; 796 797 /* allocate memory for scrub info */ 798 if (pfsync_alloc_scrub_memory(&sp->src, &st->src) || 799 pfsync_alloc_scrub_memory(&sp->dst, &st->dst)) 800 goto cleanup; 801 802 /* copy to state key(s) */ 803 skw->addr[0] = sp->key[PF_SK_WIRE].addr[0]; 804 skw->addr[1] = sp->key[PF_SK_WIRE].addr[1]; 805 skw->port[0] = sp->key[PF_SK_WIRE].port[0]; 806 skw->port[1] = sp->key[PF_SK_WIRE].port[1]; 807 skw->proto = sp->proto; 808 skw->af = sp->af; 809 if (sks != skw) { 810 sks->addr[0] = sp->key[PF_SK_STACK].addr[0]; 811 sks->addr[1] = sp->key[PF_SK_STACK].addr[1]; 812 sks->port[0] = sp->key[PF_SK_STACK].port[0]; 813 sks->port[1] = sp->key[PF_SK_STACK].port[1]; 814 sks->proto = sp->proto; 815 sks->af = sp->af; 816 } 817 818 /* copy to state */ 819 bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr)); 820 st->creation = time_uptime - ntohl(sp->creation); 821 st->expire = time_second; 822 if (sp->expire) { 823 /* XXX No adaptive scaling. */ 824 st->expire -= r->timeout[sp->timeout] - ntohl(sp->expire); 825 } 826 827 st->expire = ntohl(sp->expire) + time_second; 828 st->direction = sp->direction; 829 st->log = sp->log; 830 st->timeout = sp->timeout; 831 st->state_flags = sp->state_flags; 832 833 bcopy(sp->id, &st->id, sizeof(st->id)); 834 st->creatorid = sp->creatorid; 835 pf_state_peer_ntoh(&sp->src, &st->src); 836 pf_state_peer_ntoh(&sp->dst, &st->dst); 837 838 st->rule.ptr = r; 839 st->nat_rule.ptr = NULL; 840 st->anchor.ptr = NULL; 841 st->rt_kif = NULL; 842 843 st->pfsync_time = time_uptime; 844 st->sync_state = PFSYNC_S_NONE; 845 846 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */ 847 r->states_cur++; 848 r->states_tot++; 849 850 if (!ISSET(flags, PFSYNC_SI_IOCTL)) 851 SET(st->state_flags, PFSTATE_NOSYNC); 852 853 if ((error = pf_state_insert(kif, skw, sks, st)) != 0) { 854 /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */ 855 r->states_cur--; 856 goto cleanup_state; 857 } 858 859 if (!ISSET(flags, PFSYNC_SI_IOCTL)) { 860 CLR(st->state_flags, PFSTATE_NOSYNC); 861 if (ISSET(st->state_flags, PFSTATE_ACK)) { 862 pfsync_q_ins(st, PFSYNC_S_IACK); 863#ifdef __FreeBSD__ 864 pfsync_sendout(); 865#else 866 schednetisr(NETISR_PFSYNC); 867#endif 868 } 869 } 870 CLR(st->state_flags, PFSTATE_ACK); 871 872 return (0); 873 874cleanup: 875 error = ENOMEM; 876 if (skw == sks) 877 sks = NULL; 878#ifdef __FreeBSD__ 879 if (skw != NULL) 880 pool_put(&V_pf_state_key_pl, skw); 881 if (sks != NULL) 882 pool_put(&V_pf_state_key_pl, sks); 883#else 884 if (skw != NULL) 885 pool_put(&pf_state_key_pl, skw); 886 if (sks != NULL) 887 pool_put(&pf_state_key_pl, sks); 888#endif 889 890cleanup_state: /* pf_state_insert frees the state keys */ 891 if (st) { 892#ifdef __FreeBSD__ 893 if (st->dst.scrub) 894 pool_put(&V_pf_state_scrub_pl, st->dst.scrub); 895 if (st->src.scrub) 896 pool_put(&V_pf_state_scrub_pl, st->src.scrub); 897 pool_put(&V_pf_state_pl, st); 898#else 899 if (st->dst.scrub) 900 pool_put(&pf_state_scrub_pl, st->dst.scrub); 901 if (st->src.scrub) 902 pool_put(&pf_state_scrub_pl, st->src.scrub); 903 pool_put(&pf_state_pl, st); 904#endif 905 } 906 return (error); 907} 908 909void 910#ifdef __FreeBSD__ 911pfsync_input(struct mbuf *m, __unused int off) 912#else 913pfsync_input(struct mbuf *m, ...) 914#endif 915{ 916#ifdef __FreeBSD__ 917 struct pfsync_softc *sc = V_pfsyncif; 918#else 919 struct pfsync_softc *sc = pfsyncif; 920#endif 921 struct pfsync_pkt pkt; 922 struct ip *ip = mtod(m, struct ip *); 923 struct pfsync_header *ph; 924 struct pfsync_subheader subh; 925 926 int offset; 927 int rv; 928 929 V_pfsyncstats.pfsyncs_ipackets++; 930 931 /* verify that we have a sync interface configured */ 932#ifdef __FreeBSD__ 933 if (!sc || !sc->sc_sync_if || !V_pf_status.running) 934#else 935 if (!sc || !sc->sc_sync_if || !pf_status.running) 936#endif 937 goto done; 938 939 /* verify that the packet came in on the right interface */ 940 if (sc->sc_sync_if != m->m_pkthdr.rcvif) { 941 V_pfsyncstats.pfsyncs_badif++; 942 goto done; 943 } 944 945#ifdef __FreeBSD__ 946 sc->sc_ifp->if_ipackets++; 947 sc->sc_ifp->if_ibytes += m->m_pkthdr.len; 948#else 949 sc->sc_if.if_ipackets++; 950 sc->sc_if.if_ibytes += m->m_pkthdr.len; 951#endif 952 /* verify that the IP TTL is 255. */ 953 if (ip->ip_ttl != PFSYNC_DFLTTL) { 954 V_pfsyncstats.pfsyncs_badttl++; 955 goto done; 956 } 957 958 offset = ip->ip_hl << 2; 959 if (m->m_pkthdr.len < offset + sizeof(*ph)) { 960 V_pfsyncstats.pfsyncs_hdrops++; 961 goto done; 962 } 963 964 if (offset + sizeof(*ph) > m->m_len) { 965 if (m_pullup(m, offset + sizeof(*ph)) == NULL) { 966 V_pfsyncstats.pfsyncs_hdrops++; 967 return; 968 } 969 ip = mtod(m, struct ip *); 970 } 971 ph = (struct pfsync_header *)((char *)ip + offset); 972 973 /* verify the version */ 974 if (ph->version != PFSYNC_VERSION) { 975 V_pfsyncstats.pfsyncs_badver++; 976 goto done; 977 } 978 979#if 0 980 if (pfsync_input_hmac(m, offset) != 0) { 981 /* XXX stats */ 982 goto done; 983 } 984#endif 985 986 /* Cheaper to grab this now than having to mess with mbufs later */ 987 pkt.ip = ip; 988 pkt.src = ip->ip_src; 989 pkt.flags = 0; 990 991#ifdef __FreeBSD__ 992 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) 993#else 994 if (!bcmp(&ph->pfcksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) 995#endif 996 pkt.flags |= PFSYNC_SI_CKSUM; 997 998 offset += sizeof(*ph); 999 for (;;) { 1000 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh); 1001 offset += sizeof(subh); 1002 1003 if (subh.action >= PFSYNC_ACT_MAX) { 1004 V_pfsyncstats.pfsyncs_badact++; 1005 goto done; 1006 } 1007 1008 rv = (*pfsync_acts[subh.action])(&pkt, m, offset, 1009 ntohs(subh.count)); 1010 if (rv == -1) 1011 return; 1012 1013 offset += rv; 1014 } 1015 1016done: 1017 m_freem(m); 1018} 1019 1020int 1021pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1022{ 1023 struct pfsync_clr *clr; 1024 struct mbuf *mp; 1025 int len = sizeof(*clr) * count; 1026 int i, offp; 1027 1028 struct pf_state *st, *nexts; 1029 struct pf_state_key *sk, *nextsk; 1030 struct pf_state_item *si; 1031 u_int32_t creatorid; 1032 int s; 1033 1034 mp = m_pulldown(m, offset, len, &offp); 1035 if (mp == NULL) { 1036 V_pfsyncstats.pfsyncs_badlen++; 1037 return (-1); 1038 } 1039 clr = (struct pfsync_clr *)(mp->m_data + offp); 1040 1041 s = splsoftnet(); 1042#ifdef __FreeBSD__ 1043 PF_LOCK(); 1044#endif 1045 for (i = 0; i < count; i++) { 1046 creatorid = clr[i].creatorid; 1047 1048 if (clr[i].ifname[0] == '\0') { 1049#ifdef __FreeBSD__ 1050 for (st = RB_MIN(pf_state_tree_id, &V_tree_id); 1051 st; st = nexts) { 1052 nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, st); 1053#else 1054 for (st = RB_MIN(pf_state_tree_id, &tree_id); 1055 st; st = nexts) { 1056 nexts = RB_NEXT(pf_state_tree_id, &tree_id, st); 1057#endif 1058 if (st->creatorid == creatorid) { 1059 SET(st->state_flags, PFSTATE_NOSYNC); 1060 pf_unlink_state(st); 1061 } 1062 } 1063 } else { 1064 if (pfi_kif_get(clr[i].ifname) == NULL) 1065 continue; 1066 1067 /* XXX correct? */ 1068#ifdef __FreeBSD__ 1069 for (sk = RB_MIN(pf_state_tree, &V_pf_statetbl); 1070#else 1071 for (sk = RB_MIN(pf_state_tree, &pf_statetbl); 1072#endif 1073 sk; sk = nextsk) { 1074 nextsk = RB_NEXT(pf_state_tree, 1075#ifdef __FreeBSD__ 1076 &V_pf_statetbl, sk); 1077#else 1078 &pf_statetbl, sk); 1079#endif 1080 TAILQ_FOREACH(si, &sk->states, entry) { 1081 if (si->s->creatorid == creatorid) { 1082 SET(si->s->state_flags, 1083 PFSTATE_NOSYNC); 1084 pf_unlink_state(si->s); 1085 } 1086 } 1087 } 1088 } 1089 } 1090#ifdef __FreeBSD__ 1091 PF_UNLOCK(); 1092#endif 1093 splx(s); 1094 1095 return (len); 1096} 1097 1098int 1099pfsync_in_ins(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1100{ 1101 struct mbuf *mp; 1102 struct pfsync_state *sa, *sp; 1103 int len = sizeof(*sp) * count; 1104 int i, offp; 1105 1106 int s; 1107 1108 mp = m_pulldown(m, offset, len, &offp); 1109 if (mp == NULL) { 1110 V_pfsyncstats.pfsyncs_badlen++; 1111 return (-1); 1112 } 1113 sa = (struct pfsync_state *)(mp->m_data + offp); 1114 1115 s = splsoftnet(); 1116#ifdef __FreeBSD__ 1117 PF_LOCK(); 1118#endif 1119 for (i = 0; i < count; i++) { 1120 sp = &sa[i]; 1121 1122 /* check for invalid values */ 1123 if (sp->timeout >= PFTM_MAX || 1124 sp->src.state > PF_TCPS_PROXY_DST || 1125 sp->dst.state > PF_TCPS_PROXY_DST || 1126 sp->direction > PF_OUT || 1127 (sp->af != AF_INET && sp->af != AF_INET6)) { 1128#ifdef __FreeBSD__ 1129 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1130#else 1131 if (pf_status.debug >= PF_DEBUG_MISC) { 1132#endif 1133 printf("pfsync_input: PFSYNC5_ACT_INS: " 1134 "invalid value\n"); 1135 } 1136 V_pfsyncstats.pfsyncs_badval++; 1137 continue; 1138 } 1139 1140 if (pfsync_state_import(sp, pkt->flags) == ENOMEM) { 1141 /* drop out, but process the rest of the actions */ 1142 break; 1143 } 1144 } 1145#ifdef __FreeBSD__ 1146 PF_UNLOCK(); 1147#endif 1148 splx(s); 1149 1150 return (len); 1151} 1152 1153int 1154pfsync_in_iack(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1155{ 1156 struct pfsync_ins_ack *ia, *iaa; 1157 struct pf_state_cmp id_key; 1158 struct pf_state *st; 1159 1160 struct mbuf *mp; 1161 int len = count * sizeof(*ia); 1162 int offp, i; 1163 int s; 1164 1165 mp = m_pulldown(m, offset, len, &offp); 1166 if (mp == NULL) { 1167 V_pfsyncstats.pfsyncs_badlen++; 1168 return (-1); 1169 } 1170 iaa = (struct pfsync_ins_ack *)(mp->m_data + offp); 1171 1172 s = splsoftnet(); 1173#ifdef __FreeBSD__ 1174 PF_LOCK(); 1175#endif 1176 for (i = 0; i < count; i++) { 1177 ia = &iaa[i]; 1178 1179 bcopy(&ia->id, &id_key.id, sizeof(id_key.id)); 1180 id_key.creatorid = ia->creatorid; 1181 1182 st = pf_find_state_byid(&id_key); 1183 if (st == NULL) 1184 continue; 1185 1186 if (ISSET(st->state_flags, PFSTATE_ACK)) 1187 pfsync_deferred(st, 0); 1188 } 1189#ifdef __FreeBSD__ 1190 PF_UNLOCK(); 1191#endif 1192 splx(s); 1193 /* 1194 * XXX this is not yet implemented, but we know the size of the 1195 * message so we can skip it. 1196 */ 1197 1198 return (count * sizeof(struct pfsync_ins_ack)); 1199} 1200 1201int 1202pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src, 1203 struct pfsync_state_peer *dst) 1204{ 1205 int sfail = 0; 1206 1207 /* 1208 * The state should never go backwards except 1209 * for syn-proxy states. Neither should the 1210 * sequence window slide backwards. 1211 */ 1212 if (st->src.state > src->state && 1213 (st->src.state < PF_TCPS_PROXY_SRC || 1214 src->state >= PF_TCPS_PROXY_SRC)) 1215 sfail = 1; 1216 else if (SEQ_GT(st->src.seqlo, ntohl(src->seqlo))) 1217 sfail = 3; 1218 else if (st->dst.state > dst->state) { 1219 /* There might still be useful 1220 * information about the src state here, 1221 * so import that part of the update, 1222 * then "fail" so we send the updated 1223 * state back to the peer who is missing 1224 * our what we know. */ 1225 pf_state_peer_ntoh(src, &st->src); 1226 /* XXX do anything with timeouts? */ 1227 sfail = 7; 1228 } else if (st->dst.state >= TCPS_SYN_SENT && 1229 SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo))) 1230 sfail = 4; 1231 1232 return (sfail); 1233} 1234 1235int 1236pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1237{ 1238 struct pfsync_state *sa, *sp; 1239 struct pf_state_cmp id_key; 1240 struct pf_state_key *sk; 1241 struct pf_state *st; 1242 int sfail; 1243 1244 struct mbuf *mp; 1245 int len = count * sizeof(*sp); 1246 int offp, i; 1247 int s; 1248 1249 mp = m_pulldown(m, offset, len, &offp); 1250 if (mp == NULL) { 1251 V_pfsyncstats.pfsyncs_badlen++; 1252 return (-1); 1253 } 1254 sa = (struct pfsync_state *)(mp->m_data + offp); 1255 1256 s = splsoftnet(); 1257#ifdef __FreeBSD__ 1258 PF_LOCK(); 1259#endif 1260 for (i = 0; i < count; i++) { 1261 sp = &sa[i]; 1262 1263 /* check for invalid values */ 1264 if (sp->timeout >= PFTM_MAX || 1265 sp->src.state > PF_TCPS_PROXY_DST || 1266 sp->dst.state > PF_TCPS_PROXY_DST) { 1267#ifdef __FreeBSD__ 1268 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1269#else 1270 if (pf_status.debug >= PF_DEBUG_MISC) { 1271#endif 1272 printf("pfsync_input: PFSYNC_ACT_UPD: " 1273 "invalid value\n"); 1274 } 1275 V_pfsyncstats.pfsyncs_badval++; 1276 continue; 1277 } 1278 1279 bcopy(sp->id, &id_key.id, sizeof(id_key.id)); 1280 id_key.creatorid = sp->creatorid; 1281 1282 st = pf_find_state_byid(&id_key); 1283 if (st == NULL) { 1284 /* insert the update */ 1285 if (pfsync_state_import(sp, 0)) 1286 V_pfsyncstats.pfsyncs_badstate++; 1287 continue; 1288 } 1289 1290 if (ISSET(st->state_flags, PFSTATE_ACK)) 1291 pfsync_deferred(st, 1); 1292 1293 sk = st->key[PF_SK_WIRE]; /* XXX right one? */ 1294 sfail = 0; 1295 if (sk->proto == IPPROTO_TCP) 1296 sfail = pfsync_upd_tcp(st, &sp->src, &sp->dst); 1297 else { 1298 /* 1299 * Non-TCP protocol state machine always go 1300 * forwards 1301 */ 1302 if (st->src.state > sp->src.state) 1303 sfail = 5; 1304 else if (st->dst.state > sp->dst.state) 1305 sfail = 6; 1306 } 1307 1308 if (sfail) { 1309#ifdef __FreeBSD__ 1310 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1311#else 1312 if (pf_status.debug >= PF_DEBUG_MISC) { 1313#endif 1314 printf("pfsync: %s stale update (%d)" 1315 " id: %016llx creatorid: %08x\n", 1316 (sfail < 7 ? "ignoring" : "partial"), 1317 sfail, betoh64(st->id), 1318 ntohl(st->creatorid)); 1319 } 1320 V_pfsyncstats.pfsyncs_stale++; 1321 1322 pfsync_update_state(st); 1323#ifdef __FreeBSD__ 1324 pfsync_sendout(); 1325#else 1326 schednetisr(NETISR_PFSYNC); 1327#endif 1328 continue; 1329 } 1330 pfsync_alloc_scrub_memory(&sp->dst, &st->dst); 1331 pf_state_peer_ntoh(&sp->src, &st->src); 1332 pf_state_peer_ntoh(&sp->dst, &st->dst); 1333 st->expire = ntohl(sp->expire) + time_second; 1334 st->timeout = sp->timeout; 1335 st->pfsync_time = time_uptime; 1336 } 1337#ifdef __FreeBSD__ 1338 PF_UNLOCK(); 1339#endif 1340 splx(s); 1341 1342 return (len); 1343} 1344 1345int 1346pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1347{ 1348 struct pfsync_upd_c *ua, *up; 1349 struct pf_state_key *sk; 1350 struct pf_state_cmp id_key; 1351 struct pf_state *st; 1352 1353 int len = count * sizeof(*up); 1354 int sfail; 1355 1356 struct mbuf *mp; 1357 int offp, i; 1358 int s; 1359 1360 mp = m_pulldown(m, offset, len, &offp); 1361 if (mp == NULL) { 1362 V_pfsyncstats.pfsyncs_badlen++; 1363 return (-1); 1364 } 1365 ua = (struct pfsync_upd_c *)(mp->m_data + offp); 1366 1367 s = splsoftnet(); 1368#ifdef __FreeBSD__ 1369 PF_LOCK(); 1370#endif 1371 for (i = 0; i < count; i++) { 1372 up = &ua[i]; 1373 1374 /* check for invalid values */ 1375 if (up->timeout >= PFTM_MAX || 1376 up->src.state > PF_TCPS_PROXY_DST || 1377 up->dst.state > PF_TCPS_PROXY_DST) { 1378#ifdef __FreeBSD__ 1379 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1380#else 1381 if (pf_status.debug >= PF_DEBUG_MISC) { 1382#endif 1383 printf("pfsync_input: " 1384 "PFSYNC_ACT_UPD_C: " 1385 "invalid value\n"); 1386 } 1387 V_pfsyncstats.pfsyncs_badval++; 1388 continue; 1389 } 1390 1391 bcopy(&up->id, &id_key.id, sizeof(id_key.id)); 1392 id_key.creatorid = up->creatorid; 1393 1394 st = pf_find_state_byid(&id_key); 1395 if (st == NULL) { 1396 /* We don't have this state. Ask for it. */ 1397 pfsync_request_update(id_key.creatorid, id_key.id); 1398 continue; 1399 } 1400 1401 if (ISSET(st->state_flags, PFSTATE_ACK)) 1402 pfsync_deferred(st, 1); 1403 1404 sk = st->key[PF_SK_WIRE]; /* XXX right one? */ 1405 sfail = 0; 1406 if (sk->proto == IPPROTO_TCP) 1407 sfail = pfsync_upd_tcp(st, &up->src, &up->dst); 1408 else { 1409 /* 1410 * Non-TCP protocol state machine always go forwards 1411 */ 1412 if (st->src.state > up->src.state) 1413 sfail = 5; 1414 else if (st->dst.state > up->dst.state) 1415 sfail = 6; 1416 } 1417 1418 if (sfail) { 1419#ifdef __FreeBSD__ 1420 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1421#else 1422 if (pf_status.debug >= PF_DEBUG_MISC) { 1423#endif 1424 printf("pfsync: ignoring stale update " 1425 "(%d) id: %016llx " 1426 "creatorid: %08x\n", sfail, 1427 betoh64(st->id), 1428 ntohl(st->creatorid)); 1429 } 1430 V_pfsyncstats.pfsyncs_stale++; 1431 1432 pfsync_update_state(st); 1433#ifdef __FreeBSD__ 1434 pfsync_sendout(); 1435#else 1436 schednetisr(NETISR_PFSYNC); 1437#endif 1438 continue; 1439 } 1440 pfsync_alloc_scrub_memory(&up->dst, &st->dst); 1441 pf_state_peer_ntoh(&up->src, &st->src); 1442 pf_state_peer_ntoh(&up->dst, &st->dst); 1443 st->expire = ntohl(up->expire) + time_second; 1444 st->timeout = up->timeout; 1445 st->pfsync_time = time_uptime; 1446 } 1447#ifdef __FreeBSD__ 1448 PF_UNLOCK(); 1449#endif 1450 splx(s); 1451 1452 return (len); 1453} 1454 1455int 1456pfsync_in_ureq(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1457{ 1458 struct pfsync_upd_req *ur, *ura; 1459 struct mbuf *mp; 1460 int len = count * sizeof(*ur); 1461 int i, offp; 1462 1463 struct pf_state_cmp id_key; 1464 struct pf_state *st; 1465 1466 mp = m_pulldown(m, offset, len, &offp); 1467 if (mp == NULL) { 1468 V_pfsyncstats.pfsyncs_badlen++; 1469 return (-1); 1470 } 1471 ura = (struct pfsync_upd_req *)(mp->m_data + offp); 1472 1473 for (i = 0; i < count; i++) { 1474 ur = &ura[i]; 1475 1476 bcopy(&ur->id, &id_key.id, sizeof(id_key.id)); 1477 id_key.creatorid = ur->creatorid; 1478 1479 if (id_key.id == 0 && id_key.creatorid == 0) 1480 pfsync_bulk_start(); 1481 else { 1482 st = pf_find_state_byid(&id_key); 1483 if (st == NULL) { 1484 V_pfsyncstats.pfsyncs_badstate++; 1485 continue; 1486 } 1487 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) 1488 continue; 1489 1490 PF_LOCK(); 1491 pfsync_update_state_req(st); 1492 PF_UNLOCK(); 1493 } 1494 } 1495 1496 return (len); 1497} 1498 1499int 1500pfsync_in_del(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1501{ 1502 struct mbuf *mp; 1503 struct pfsync_state *sa, *sp; 1504 struct pf_state_cmp id_key; 1505 struct pf_state *st; 1506 int len = count * sizeof(*sp); 1507 int offp, i; 1508 int s; 1509 1510 mp = m_pulldown(m, offset, len, &offp); 1511 if (mp == NULL) { 1512 V_pfsyncstats.pfsyncs_badlen++; 1513 return (-1); 1514 } 1515 sa = (struct pfsync_state *)(mp->m_data + offp); 1516 1517 s = splsoftnet(); 1518#ifdef __FreeBSD__ 1519 PF_LOCK(); 1520#endif 1521 for (i = 0; i < count; i++) { 1522 sp = &sa[i]; 1523 1524 bcopy(sp->id, &id_key.id, sizeof(id_key.id)); 1525 id_key.creatorid = sp->creatorid; 1526 1527 st = pf_find_state_byid(&id_key); 1528 if (st == NULL) { 1529 V_pfsyncstats.pfsyncs_badstate++; 1530 continue; 1531 } 1532 SET(st->state_flags, PFSTATE_NOSYNC); 1533 pf_unlink_state(st); 1534 } 1535#ifdef __FreeBSD__ 1536 PF_UNLOCK(); 1537#endif 1538 splx(s); 1539 1540 return (len); 1541} 1542 1543int 1544pfsync_in_del_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1545{ 1546 struct mbuf *mp; 1547 struct pfsync_del_c *sa, *sp; 1548 struct pf_state_cmp id_key; 1549 struct pf_state *st; 1550 int len = count * sizeof(*sp); 1551 int offp, i; 1552 int s; 1553 1554 mp = m_pulldown(m, offset, len, &offp); 1555 if (mp == NULL) { 1556 V_pfsyncstats.pfsyncs_badlen++; 1557 return (-1); 1558 } 1559 sa = (struct pfsync_del_c *)(mp->m_data + offp); 1560 1561 s = splsoftnet(); 1562#ifdef __FreeBSD__ 1563 PF_LOCK(); 1564#endif 1565 for (i = 0; i < count; i++) { 1566 sp = &sa[i]; 1567 1568 bcopy(&sp->id, &id_key.id, sizeof(id_key.id)); 1569 id_key.creatorid = sp->creatorid; 1570 1571 st = pf_find_state_byid(&id_key); 1572 if (st == NULL) { 1573 V_pfsyncstats.pfsyncs_badstate++; 1574 continue; 1575 } 1576 1577 SET(st->state_flags, PFSTATE_NOSYNC); 1578 pf_unlink_state(st); 1579 } 1580#ifdef __FreeBSD__ 1581 PF_UNLOCK(); 1582#endif 1583 splx(s); 1584 1585 return (len); 1586} 1587 1588int 1589pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1590{ 1591#ifdef __FreeBSD__ 1592 struct pfsync_softc *sc = V_pfsyncif; 1593#else 1594 struct pfsync_softc *sc = pfsyncif; 1595#endif 1596 struct pfsync_bus *bus; 1597 struct mbuf *mp; 1598 int len = count * sizeof(*bus); 1599 int offp; 1600 1601 /* If we're not waiting for a bulk update, who cares. */ 1602 if (sc->sc_ureq_sent == 0) 1603 return (len); 1604 1605 mp = m_pulldown(m, offset, len, &offp); 1606 if (mp == NULL) { 1607 V_pfsyncstats.pfsyncs_badlen++; 1608 return (-1); 1609 } 1610 bus = (struct pfsync_bus *)(mp->m_data + offp); 1611 1612 switch (bus->status) { 1613 case PFSYNC_BUS_START: 1614#ifdef __FreeBSD__ 1615 callout_reset(&sc->sc_bulkfail_tmo, 4 * hz + 1616 V_pf_pool_limits[PF_LIMIT_STATES].limit / 1617 ((sc->sc_sync_if->if_mtu - PFSYNC_MINPKT) / 1618 sizeof(struct pfsync_state)), 1619 pfsync_bulk_fail, V_pfsyncif); 1620#else 1621 timeout_add(&sc->sc_bulkfail_tmo, 4 * hz + 1622 pf_pool_limits[PF_LIMIT_STATES].limit / 1623 ((sc->sc_if.if_mtu - PFSYNC_MINPKT) / 1624 sizeof(struct pfsync_state))); 1625#endif 1626#ifdef __FreeBSD__ 1627 if (V_pf_status.debug >= PF_DEBUG_MISC) 1628#else 1629 if (pf_status.debug >= PF_DEBUG_MISC) 1630#endif 1631 printf("pfsync: received bulk update start\n"); 1632 break; 1633 1634 case PFSYNC_BUS_END: 1635 if (time_uptime - ntohl(bus->endtime) >= 1636 sc->sc_ureq_sent) { 1637 /* that's it, we're happy */ 1638 sc->sc_ureq_sent = 0; 1639 sc->sc_bulk_tries = 0; 1640 timeout_del(&sc->sc_bulkfail_tmo); 1641#ifdef __FreeBSD__ 1642 if (!sc->pfsync_sync_ok && carp_demote_adj_p) 1643 (*carp_demote_adj_p)(-V_pfsync_carp_adj, 1644 "pfsync bulk done"); 1645 sc->pfsync_sync_ok = 1; 1646#else 1647#if NCARP > 0 1648 if (!pfsync_sync_ok) 1649 carp_group_demote_adj(&sc->sc_if, -1); 1650#endif 1651 pfsync_sync_ok = 1; 1652#endif 1653#ifdef __FreeBSD__ 1654 if (V_pf_status.debug >= PF_DEBUG_MISC) 1655#else 1656 if (pf_status.debug >= PF_DEBUG_MISC) 1657#endif 1658 printf("pfsync: received valid " 1659 "bulk update end\n"); 1660 } else { 1661#ifdef __FreeBSD__ 1662 if (V_pf_status.debug >= PF_DEBUG_MISC) 1663#else 1664 if (pf_status.debug >= PF_DEBUG_MISC) 1665#endif 1666 printf("pfsync: received invalid " 1667 "bulk update end: bad timestamp\n"); 1668 } 1669 break; 1670 } 1671 1672 return (len); 1673} 1674 1675int 1676pfsync_in_tdb(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1677{ 1678 int len = count * sizeof(struct pfsync_tdb); 1679 1680#if defined(IPSEC) 1681 struct pfsync_tdb *tp; 1682 struct mbuf *mp; 1683 int offp; 1684 int i; 1685 int s; 1686 1687 mp = m_pulldown(m, offset, len, &offp); 1688 if (mp == NULL) { 1689 V_pfsyncstats.pfsyncs_badlen++; 1690 return (-1); 1691 } 1692 tp = (struct pfsync_tdb *)(mp->m_data + offp); 1693 1694 s = splsoftnet(); 1695#ifdef __FreeBSD__ 1696 PF_LOCK(); 1697#endif 1698 for (i = 0; i < count; i++) 1699 pfsync_update_net_tdb(&tp[i]); 1700#ifdef __FreeBSD__ 1701 PF_UNLOCK(); 1702#endif 1703 splx(s); 1704#endif 1705 1706 return (len); 1707} 1708 1709#if defined(IPSEC) 1710/* Update an in-kernel tdb. Silently fail if no tdb is found. */ 1711void 1712pfsync_update_net_tdb(struct pfsync_tdb *pt) 1713{ 1714 struct tdb *tdb; 1715 int s; 1716 1717 /* check for invalid values */ 1718 if (ntohl(pt->spi) <= SPI_RESERVED_MAX || 1719 (pt->dst.sa.sa_family != AF_INET && 1720 pt->dst.sa.sa_family != AF_INET6)) 1721 goto bad; 1722 1723 s = spltdb(); 1724 tdb = gettdb(pt->spi, &pt->dst, pt->sproto); 1725 if (tdb) { 1726 pt->rpl = ntohl(pt->rpl); 1727 pt->cur_bytes = betoh64(pt->cur_bytes); 1728 1729 /* Neither replay nor byte counter should ever decrease. */ 1730 if (pt->rpl < tdb->tdb_rpl || 1731 pt->cur_bytes < tdb->tdb_cur_bytes) { 1732 splx(s); 1733 goto bad; 1734 } 1735 1736 tdb->tdb_rpl = pt->rpl; 1737 tdb->tdb_cur_bytes = pt->cur_bytes; 1738 } 1739 splx(s); 1740 return; 1741 1742bad: 1743#ifdef __FreeBSD__ 1744 if (V_pf_status.debug >= PF_DEBUG_MISC) 1745#else 1746 if (pf_status.debug >= PF_DEBUG_MISC) 1747#endif 1748 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: " 1749 "invalid value\n"); 1750 V_pfsyncstats.pfsyncs_badstate++; 1751 return; 1752} 1753#endif 1754 1755 1756int 1757pfsync_in_eof(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1758{ 1759 /* check if we are at the right place in the packet */ 1760 if (offset != m->m_pkthdr.len - sizeof(struct pfsync_eof)) 1761 V_pfsyncstats.pfsyncs_badact++; 1762 1763 /* we're done. free and let the caller return */ 1764 m_freem(m); 1765 return (-1); 1766} 1767 1768int 1769pfsync_in_error(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1770{ 1771 V_pfsyncstats.pfsyncs_badact++; 1772 1773 m_freem(m); 1774 return (-1); 1775} 1776 1777int 1778pfsyncoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, 1779#ifdef __FreeBSD__ 1780 struct route *rt) 1781#else 1782 struct rtentry *rt) 1783#endif 1784{ 1785 m_freem(m); 1786 return (0); 1787} 1788 1789/* ARGSUSED */ 1790int 1791pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1792{ 1793#ifndef __FreeBSD__ 1794 struct proc *p = curproc; 1795#endif 1796 struct pfsync_softc *sc = ifp->if_softc; 1797 struct ifreq *ifr = (struct ifreq *)data; 1798 struct ip_moptions *imo = &sc->sc_imo; 1799 struct pfsyncreq pfsyncr; 1800 struct ifnet *sifp; 1801 struct ip *ip; 1802 int s, error; 1803 1804 switch (cmd) { 1805#if 0 1806 case SIOCSIFADDR: 1807 case SIOCAIFADDR: 1808 case SIOCSIFDSTADDR: 1809#endif 1810 case SIOCSIFFLAGS: 1811#ifdef __FreeBSD__ 1812 if (ifp->if_flags & IFF_UP) 1813 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1814 else 1815 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1816#else 1817 if (ifp->if_flags & IFF_UP) 1818 ifp->if_flags |= IFF_RUNNING; 1819 else 1820 ifp->if_flags &= ~IFF_RUNNING; 1821#endif 1822 break; 1823 case SIOCSIFMTU: 1824 if (ifr->ifr_mtu <= PFSYNC_MINPKT) 1825 return (EINVAL); 1826 if (ifr->ifr_mtu > MCLBYTES) /* XXX could be bigger */ 1827 ifr->ifr_mtu = MCLBYTES; 1828 if (ifr->ifr_mtu < ifp->if_mtu) { 1829 s = splnet(); 1830#ifdef __FreeBSD__ 1831 PF_LOCK(); 1832#endif 1833 pfsync_sendout(); 1834#ifdef __FreeBSD__ 1835 PF_UNLOCK(); 1836#endif 1837 splx(s); 1838 } 1839 ifp->if_mtu = ifr->ifr_mtu; 1840 break; 1841 case SIOCGETPFSYNC: 1842 bzero(&pfsyncr, sizeof(pfsyncr)); 1843 if (sc->sc_sync_if) { 1844 strlcpy(pfsyncr.pfsyncr_syncdev, 1845 sc->sc_sync_if->if_xname, IFNAMSIZ); 1846 } 1847 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer; 1848 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates; 1849 return (copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr))); 1850 1851 case SIOCSETPFSYNC: 1852#ifdef __FreeBSD__ 1853 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0) 1854#else 1855 if ((error = suser(p, p->p_acflag)) != 0) 1856#endif 1857 return (error); 1858 if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr)))) 1859 return (error); 1860 1861#ifdef __FreeBSD__ 1862 PF_LOCK(); 1863#endif 1864 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0) 1865#ifdef __FreeBSD__ 1866 sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP); 1867#else 1868 sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP; 1869#endif 1870 else 1871 sc->sc_sync_peer.s_addr = 1872 pfsyncr.pfsyncr_syncpeer.s_addr; 1873 1874 if (pfsyncr.pfsyncr_maxupdates > 255) 1875#ifdef __FreeBSD__ 1876 { 1877 PF_UNLOCK(); 1878#endif 1879 return (EINVAL); 1880#ifdef __FreeBSD__ 1881 } 1882#endif 1883 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates; 1884 1885 if (pfsyncr.pfsyncr_syncdev[0] == 0) { 1886 sc->sc_sync_if = NULL; 1887#ifdef __FreeBSD__ 1888 PF_UNLOCK(); 1889#endif 1890 if (imo->imo_num_memberships > 0) { 1891 in_delmulti(imo->imo_membership[ 1892 --imo->imo_num_memberships]); 1893 imo->imo_multicast_ifp = NULL; 1894 } 1895 break; 1896 } 1897 1898#ifdef __FreeBSD__ 1899 PF_UNLOCK(); 1900#endif 1901 if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL) 1902 return (EINVAL); 1903 1904#ifdef __FreeBSD__ 1905 PF_LOCK(); 1906#endif 1907 s = splnet(); 1908#ifdef __FreeBSD__ 1909 if (sifp->if_mtu < sc->sc_ifp->if_mtu || 1910#else 1911 if (sifp->if_mtu < sc->sc_if.if_mtu || 1912#endif 1913 (sc->sc_sync_if != NULL && 1914 sifp->if_mtu < sc->sc_sync_if->if_mtu) || 1915 sifp->if_mtu < MCLBYTES - sizeof(struct ip)) 1916 pfsync_sendout(); 1917 sc->sc_sync_if = sifp; 1918 1919 if (imo->imo_num_memberships > 0) { 1920#ifdef __FreeBSD__ 1921 PF_UNLOCK(); 1922#endif 1923 in_delmulti(imo->imo_membership[--imo->imo_num_memberships]); 1924#ifdef __FreeBSD__ 1925 PF_LOCK(); 1926#endif 1927 imo->imo_multicast_ifp = NULL; 1928 } 1929 1930 if (sc->sc_sync_if && 1931#ifdef __FreeBSD__ 1932 sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) { 1933#else 1934 sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) { 1935#endif 1936 struct in_addr addr; 1937 1938 if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) { 1939 sc->sc_sync_if = NULL; 1940#ifdef __FreeBSD__ 1941 PF_UNLOCK(); 1942#endif 1943 splx(s); 1944 return (EADDRNOTAVAIL); 1945 } 1946 1947#ifdef __FreeBSD__ 1948 addr.s_addr = htonl(INADDR_PFSYNC_GROUP); 1949#else 1950 addr.s_addr = INADDR_PFSYNC_GROUP; 1951#endif 1952 1953#ifdef __FreeBSD__ 1954 PF_UNLOCK(); 1955#endif 1956 if ((imo->imo_membership[0] = 1957 in_addmulti(&addr, sc->sc_sync_if)) == NULL) { 1958 sc->sc_sync_if = NULL; 1959 splx(s); 1960 return (ENOBUFS); 1961 } 1962#ifdef __FreeBSD__ 1963 PF_LOCK(); 1964#endif 1965 imo->imo_num_memberships++; 1966 imo->imo_multicast_ifp = sc->sc_sync_if; 1967 imo->imo_multicast_ttl = PFSYNC_DFLTTL; 1968 imo->imo_multicast_loop = 0; 1969 } 1970 1971 ip = &sc->sc_template; 1972 bzero(ip, sizeof(*ip)); 1973 ip->ip_v = IPVERSION; 1974 ip->ip_hl = sizeof(sc->sc_template) >> 2; 1975 ip->ip_tos = IPTOS_LOWDELAY; 1976 /* len and id are set later */ 1977#ifdef __FreeBSD__ 1978 ip->ip_off = IP_DF; 1979#else 1980 ip->ip_off = htons(IP_DF); 1981#endif 1982 ip->ip_ttl = PFSYNC_DFLTTL; 1983 ip->ip_p = IPPROTO_PFSYNC; 1984 ip->ip_src.s_addr = INADDR_ANY; 1985 ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr; 1986 1987 if (sc->sc_sync_if) { 1988 /* Request a full state table update. */ 1989 sc->sc_ureq_sent = time_uptime; 1990#ifdef __FreeBSD__ 1991 if (sc->pfsync_sync_ok && carp_demote_adj_p) 1992 (*carp_demote_adj_p)(V_pfsync_carp_adj, 1993 "pfsync bulk start"); 1994 sc->pfsync_sync_ok = 0; 1995#else 1996#if NCARP > 0 1997 if (pfsync_sync_ok) 1998 carp_group_demote_adj(&sc->sc_if, 1); 1999#endif 2000 pfsync_sync_ok = 0; 2001#endif 2002#ifdef __FreeBSD__ 2003 if (V_pf_status.debug >= PF_DEBUG_MISC) 2004#else 2005 if (pf_status.debug >= PF_DEBUG_MISC) 2006#endif 2007 printf("pfsync: requesting bulk update\n"); 2008#ifdef __FreeBSD__ 2009 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, 2010 pfsync_bulk_fail, V_pfsyncif); 2011#else 2012 timeout_add_sec(&sc->sc_bulkfail_tmo, 5); 2013#endif 2014 pfsync_request_update(0, 0); 2015 } 2016#ifdef __FreeBSD__ 2017 PF_UNLOCK(); 2018#endif 2019 splx(s); 2020 2021 break; 2022 2023 default: 2024 return (ENOTTY); 2025 } 2026 2027 return (0); 2028} 2029 2030int 2031pfsync_out_state(struct pf_state *st, struct mbuf *m, int offset) 2032{ 2033 struct pfsync_state *sp = (struct pfsync_state *)(m->m_data + offset); 2034 2035 pfsync_state_export(sp, st); 2036 2037 return (sizeof(*sp)); 2038} 2039 2040int 2041pfsync_out_iack(struct pf_state *st, struct mbuf *m, int offset) 2042{ 2043 struct pfsync_ins_ack *iack = 2044 (struct pfsync_ins_ack *)(m->m_data + offset); 2045 2046 iack->id = st->id; 2047 iack->creatorid = st->creatorid; 2048 2049 return (sizeof(*iack)); 2050} 2051 2052int 2053pfsync_out_upd_c(struct pf_state *st, struct mbuf *m, int offset) 2054{ 2055 struct pfsync_upd_c *up = (struct pfsync_upd_c *)(m->m_data + offset); 2056 2057 up->id = st->id; 2058 pf_state_peer_hton(&st->src, &up->src); 2059 pf_state_peer_hton(&st->dst, &up->dst); 2060 up->creatorid = st->creatorid; 2061 2062 up->expire = pf_state_expires(st); 2063 if (up->expire <= time_second) 2064 up->expire = htonl(0); 2065 else 2066 up->expire = htonl(up->expire - time_second); 2067 up->timeout = st->timeout; 2068 2069 bzero(up->_pad, sizeof(up->_pad)); /* XXX */ 2070 2071 return (sizeof(*up)); 2072} 2073 2074int 2075pfsync_out_del(struct pf_state *st, struct mbuf *m, int offset) 2076{ 2077 struct pfsync_del_c *dp = (struct pfsync_del_c *)(m->m_data + offset); 2078 2079 dp->id = st->id; 2080 dp->creatorid = st->creatorid; 2081 2082 SET(st->state_flags, PFSTATE_NOSYNC); 2083 2084 return (sizeof(*dp)); 2085} 2086 2087void 2088pfsync_drop(struct pfsync_softc *sc) 2089{ 2090 struct pf_state *st; 2091 struct pfsync_upd_req_item *ur; 2092#ifdef notyet 2093 struct tdb *t; 2094#endif 2095 int q; 2096 2097 for (q = 0; q < PFSYNC_S_COUNT; q++) { 2098 if (TAILQ_EMPTY(&sc->sc_qs[q])) 2099 continue; 2100 2101 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) { 2102#ifdef PFSYNC_DEBUG 2103#ifdef __FreeBSD__ 2104 KASSERT(st->sync_state == q, 2105 ("%s: st->sync_state == q", 2106 __FUNCTION__)); 2107#else 2108 KASSERT(st->sync_state == q); 2109#endif 2110#endif 2111 st->sync_state = PFSYNC_S_NONE; 2112 } 2113 TAILQ_INIT(&sc->sc_qs[q]); 2114 } 2115 2116 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) { 2117 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry); 2118 pool_put(&sc->sc_pool, ur); 2119 } 2120 2121 sc->sc_plus = NULL; 2122 2123#ifdef notyet 2124 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) { 2125 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry) 2126 CLR(t->tdb_flags, TDBF_PFSYNC); 2127 2128 TAILQ_INIT(&sc->sc_tdb_q); 2129 } 2130#endif 2131 2132 sc->sc_len = PFSYNC_MINPKT; 2133} 2134 2135void 2136pfsync_sendout(void) 2137{ 2138#ifdef __FreeBSD__ 2139 struct pfsync_softc *sc = V_pfsyncif; 2140#else 2141 struct pfsync_softc *sc = pfsyncif; 2142#endif 2143#if NBPFILTER > 0 2144#ifdef __FreeBSD__ 2145 struct ifnet *ifp = sc->sc_ifp; 2146#else 2147 struct ifnet *ifp = &sc->sc_if; 2148#endif 2149#endif 2150 struct mbuf *m; 2151 struct ip *ip; 2152 struct pfsync_header *ph; 2153 struct pfsync_subheader *subh; 2154 struct pf_state *st; 2155 struct pfsync_upd_req_item *ur; 2156#ifdef notyet 2157 struct tdb *t; 2158#endif 2159#ifdef __FreeBSD__ 2160 size_t pktlen; 2161 int dummy_error; 2162#endif 2163 int offset; 2164 int q, count = 0; 2165 2166#ifdef __FreeBSD__ 2167 PF_LOCK_ASSERT(); 2168#else 2169 splassert(IPL_NET); 2170#endif 2171 2172 if (sc == NULL || sc->sc_len == PFSYNC_MINPKT) 2173 return; 2174 2175#if NBPFILTER > 0 2176 if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) { 2177#else 2178 if (sc->sc_sync_if == NULL) { 2179#endif 2180 pfsync_drop(sc); 2181 return; 2182 } 2183 2184 MGETHDR(m, M_DONTWAIT, MT_DATA); 2185 if (m == NULL) { 2186#ifdef __FreeBSD__ 2187 sc->sc_ifp->if_oerrors++; 2188#else 2189 sc->sc_if.if_oerrors++; 2190#endif 2191 V_pfsyncstats.pfsyncs_onomem++; 2192 pfsync_drop(sc); 2193 return; 2194 } 2195 2196#ifdef __FreeBSD__ 2197 pktlen = max_linkhdr + sc->sc_len; 2198 if (pktlen > MHLEN) { 2199 /* Find the right pool to allocate from. */ 2200 /* XXX: This is ugly. */ 2201 m_cljget(m, M_DONTWAIT, pktlen <= MSIZE ? MSIZE : 2202 pktlen <= MCLBYTES ? MCLBYTES : 2203#if MJUMPAGESIZE != MCLBYTES 2204 pktlen <= MJUMPAGESIZE ? MJUMPAGESIZE : 2205#endif 2206 pktlen <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES); 2207#else 2208 if (max_linkhdr + sc->sc_len > MHLEN) { 2209 MCLGETI(m, M_DONTWAIT, NULL, max_linkhdr + sc->sc_len); 2210#endif 2211 if (!ISSET(m->m_flags, M_EXT)) { 2212 m_free(m); 2213#ifdef __FreeBSD__ 2214 sc->sc_ifp->if_oerrors++; 2215#else 2216 sc->sc_if.if_oerrors++; 2217#endif 2218 V_pfsyncstats.pfsyncs_onomem++; 2219 pfsync_drop(sc); 2220 return; 2221 } 2222 } 2223 m->m_data += max_linkhdr; 2224 m->m_len = m->m_pkthdr.len = sc->sc_len; 2225 2226 /* build the ip header */ 2227 ip = (struct ip *)m->m_data; 2228 bcopy(&sc->sc_template, ip, sizeof(*ip)); 2229 offset = sizeof(*ip); 2230 2231#ifdef __FreeBSD__ 2232 ip->ip_len = m->m_pkthdr.len; 2233#else 2234 ip->ip_len = htons(m->m_pkthdr.len); 2235#endif 2236 ip->ip_id = htons(ip_randomid()); 2237 2238 /* build the pfsync header */ 2239 ph = (struct pfsync_header *)(m->m_data + offset); 2240 bzero(ph, sizeof(*ph)); 2241 offset += sizeof(*ph); 2242 2243 ph->version = PFSYNC_VERSION; 2244 ph->len = htons(sc->sc_len - sizeof(*ip)); 2245#ifdef __FreeBSD__ 2246 bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH); 2247#else 2248 bcopy(pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH); 2249#endif 2250 2251 /* walk the queues */ 2252 for (q = 0; q < PFSYNC_S_COUNT; q++) { 2253 if (TAILQ_EMPTY(&sc->sc_qs[q])) 2254 continue; 2255 2256 subh = (struct pfsync_subheader *)(m->m_data + offset); 2257 offset += sizeof(*subh); 2258 2259 count = 0; 2260 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) { 2261#ifdef PFSYNC_DEBUG 2262#ifdef __FreeBSD__ 2263 KASSERT(st->sync_state == q, 2264 ("%s: st->sync_state == q", 2265 __FUNCTION__)); 2266#else 2267 KASSERT(st->sync_state == q); 2268#endif 2269#endif 2270 2271 offset += pfsync_qs[q].write(st, m, offset); 2272 st->sync_state = PFSYNC_S_NONE; 2273 count++; 2274 } 2275 TAILQ_INIT(&sc->sc_qs[q]); 2276 2277 bzero(subh, sizeof(*subh)); 2278 subh->action = pfsync_qs[q].action; 2279 subh->count = htons(count); 2280 } 2281 2282 if (!TAILQ_EMPTY(&sc->sc_upd_req_list)) { 2283 subh = (struct pfsync_subheader *)(m->m_data + offset); 2284 offset += sizeof(*subh); 2285 2286 count = 0; 2287 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) { 2288 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry); 2289 2290 bcopy(&ur->ur_msg, m->m_data + offset, 2291 sizeof(ur->ur_msg)); 2292 offset += sizeof(ur->ur_msg); 2293 2294 pool_put(&sc->sc_pool, ur); 2295 2296 count++; 2297 } 2298 2299 bzero(subh, sizeof(*subh)); 2300 subh->action = PFSYNC_ACT_UPD_REQ; 2301 subh->count = htons(count); 2302 } 2303 2304 /* has someone built a custom region for us to add? */ 2305 if (sc->sc_plus != NULL) { 2306 bcopy(sc->sc_plus, m->m_data + offset, sc->sc_pluslen); 2307 offset += sc->sc_pluslen; 2308 2309 sc->sc_plus = NULL; 2310 } 2311 2312#ifdef notyet 2313 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) { 2314 subh = (struct pfsync_subheader *)(m->m_data + offset); 2315 offset += sizeof(*subh); 2316 2317 count = 0; 2318 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry) { 2319 offset += pfsync_out_tdb(t, m, offset); 2320 CLR(t->tdb_flags, TDBF_PFSYNC); 2321 2322 count++; 2323 } 2324 TAILQ_INIT(&sc->sc_tdb_q); 2325 2326 bzero(subh, sizeof(*subh)); 2327 subh->action = PFSYNC_ACT_TDB; 2328 subh->count = htons(count); 2329 } 2330#endif 2331 2332 subh = (struct pfsync_subheader *)(m->m_data + offset); 2333 offset += sizeof(*subh); 2334 2335 bzero(subh, sizeof(*subh)); 2336 subh->action = PFSYNC_ACT_EOF; 2337 subh->count = htons(1); 2338 2339 /* XXX write checksum in EOF here */ 2340 2341 /* we're done, let's put it on the wire */ 2342#if NBPFILTER > 0 2343 if (ifp->if_bpf) { 2344 m->m_data += sizeof(*ip); 2345 m->m_len = m->m_pkthdr.len = sc->sc_len - sizeof(*ip); 2346#ifdef __FreeBSD__ 2347 BPF_MTAP(ifp, m); 2348#else 2349 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 2350#endif 2351 m->m_data -= sizeof(*ip); 2352 m->m_len = m->m_pkthdr.len = sc->sc_len; 2353 } 2354 2355 if (sc->sc_sync_if == NULL) { 2356 sc->sc_len = PFSYNC_MINPKT; 2357 m_freem(m); 2358 return; 2359 } 2360#endif 2361 2362#ifdef __FreeBSD__ 2363 sc->sc_ifp->if_opackets++; 2364 sc->sc_ifp->if_obytes += m->m_pkthdr.len; 2365 sc->sc_len = PFSYNC_MINPKT; 2366 2367 IFQ_ENQUEUE(&sc->sc_ifp->if_snd, m, dummy_error); 2368 schednetisr(NETISR_PFSYNC); 2369#else 2370 sc->sc_if.if_opackets++; 2371 sc->sc_if.if_obytes += m->m_pkthdr.len; 2372 2373 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL) == 0) 2374 pfsyncstats.pfsyncs_opackets++; 2375 else 2376 pfsyncstats.pfsyncs_oerrors++; 2377 2378 /* start again */ 2379 sc->sc_len = PFSYNC_MINPKT; 2380#endif 2381} 2382 2383void 2384pfsync_insert_state(struct pf_state *st) 2385{ 2386#ifdef __FreeBSD__ 2387 struct pfsync_softc *sc = V_pfsyncif; 2388#else 2389 struct pfsync_softc *sc = pfsyncif; 2390#endif 2391 2392#ifdef __FreeBSD__ 2393 PF_LOCK_ASSERT(); 2394#else 2395 splassert(IPL_SOFTNET); 2396#endif 2397 2398 if (ISSET(st->rule.ptr->rule_flag, PFRULE_NOSYNC) || 2399 st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) { 2400 SET(st->state_flags, PFSTATE_NOSYNC); 2401 return; 2402 } 2403 2404 if (sc == NULL || ISSET(st->state_flags, PFSTATE_NOSYNC)) 2405 return; 2406 2407#ifdef PFSYNC_DEBUG 2408#ifdef __FreeBSD__ 2409 KASSERT(st->sync_state == PFSYNC_S_NONE, 2410 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__)); 2411#else 2412 KASSERT(st->sync_state == PFSYNC_S_NONE); 2413#endif 2414#endif 2415 2416 if (sc->sc_len == PFSYNC_MINPKT) 2417#ifdef __FreeBSD__ 2418 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, 2419 V_pfsyncif); 2420#else 2421 timeout_add_sec(&sc->sc_tmo, 1); 2422#endif 2423 2424 pfsync_q_ins(st, PFSYNC_S_INS); 2425 2426 if (ISSET(st->state_flags, PFSTATE_ACK)) 2427#ifdef __FreeBSD__ 2428 pfsync_sendout(); 2429#else 2430 schednetisr(NETISR_PFSYNC); 2431#endif 2432 else 2433 st->sync_updates = 0; 2434} 2435 2436int defer = 10; 2437 2438int 2439pfsync_defer(struct pf_state *st, struct mbuf *m) 2440{ 2441#ifdef __FreeBSD__ 2442 struct pfsync_softc *sc = V_pfsyncif; 2443#else 2444 struct pfsync_softc *sc = pfsyncif; 2445#endif 2446 struct pfsync_deferral *pd; 2447 2448#ifdef __FreeBSD__ 2449 PF_LOCK_ASSERT(); 2450#else 2451 splassert(IPL_SOFTNET); 2452#endif 2453 2454 if (sc->sc_deferred >= 128) 2455 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0); 2456 2457 pd = pool_get(&sc->sc_pool, M_NOWAIT); 2458 if (pd == NULL) 2459 return (0); 2460 sc->sc_deferred++; 2461 2462#ifdef __FreeBSD__ 2463 m->m_flags |= M_SKIP_FIREWALL; 2464#else 2465 m->m_pkthdr.pf.flags |= PF_TAG_GENERATED; 2466#endif 2467 SET(st->state_flags, PFSTATE_ACK); 2468 2469 pd->pd_st = st; 2470 pd->pd_m = m; 2471 2472 TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry); 2473#ifdef __FreeBSD__ 2474 callout_init(&pd->pd_tmo, CALLOUT_MPSAFE); 2475 callout_reset(&pd->pd_tmo, defer, pfsync_defer_tmo, 2476 pd); 2477#else 2478 timeout_set(&pd->pd_tmo, pfsync_defer_tmo, pd); 2479 timeout_add(&pd->pd_tmo, defer); 2480#endif 2481 2482 return (1); 2483} 2484 2485void 2486pfsync_undefer(struct pfsync_deferral *pd, int drop) 2487{ 2488#ifdef __FreeBSD__ 2489 struct pfsync_softc *sc = V_pfsyncif; 2490#else 2491 struct pfsync_softc *sc = pfsyncif; 2492#endif 2493 int s; 2494 2495#ifdef __FreeBSD__ 2496 PF_LOCK_ASSERT(); 2497#else 2498 splassert(IPL_SOFTNET); 2499#endif 2500 2501 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry); 2502 sc->sc_deferred--; 2503 2504 CLR(pd->pd_st->state_flags, PFSTATE_ACK); 2505 timeout_del(&pd->pd_tmo); /* bah */ 2506 if (drop) 2507 m_freem(pd->pd_m); 2508 else { 2509 s = splnet(); 2510#ifdef __FreeBSD__ 2511 /* XXX: use pf_defered?! */ 2512 PF_UNLOCK(); 2513#endif 2514 ip_output(pd->pd_m, (void *)NULL, (void *)NULL, 0, 2515 (void *)NULL, (void *)NULL); 2516#ifdef __FreeBSD__ 2517 PF_LOCK(); 2518#endif 2519 splx(s); 2520 } 2521 2522 pool_put(&sc->sc_pool, pd); 2523} 2524 2525void 2526pfsync_defer_tmo(void *arg) 2527{ 2528#if defined(__FreeBSD__) && defined(VIMAGE) 2529 struct pfsync_deferral *pd = arg; 2530#endif 2531 int s; 2532 2533 s = splsoftnet(); 2534#ifdef __FreeBSD__ 2535 CURVNET_SET(pd->pd_m->m_pkthdr.rcvif->if_vnet); /* XXX */ 2536 PF_LOCK(); 2537#endif 2538 pfsync_undefer(arg, 0); 2539#ifdef __FreeBSD__ 2540 PF_UNLOCK(); 2541 CURVNET_RESTORE(); 2542#endif 2543 splx(s); 2544} 2545 2546void 2547pfsync_deferred(struct pf_state *st, int drop) 2548{ 2549#ifdef __FreeBSD__ 2550 struct pfsync_softc *sc = V_pfsyncif; 2551#else 2552 struct pfsync_softc *sc = pfsyncif; 2553#endif 2554 struct pfsync_deferral *pd; 2555 2556 TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) { 2557 if (pd->pd_st == st) { 2558 pfsync_undefer(pd, drop); 2559 return; 2560 } 2561 } 2562 2563 panic("pfsync_send_deferred: unable to find deferred state"); 2564} 2565 2566u_int pfsync_upds = 0; 2567 2568void 2569pfsync_update_state(struct pf_state *st) 2570{ 2571#ifdef __FreeBSD__ 2572 struct pfsync_softc *sc = V_pfsyncif; 2573#else 2574 struct pfsync_softc *sc = pfsyncif; 2575#endif 2576 int sync = 0; 2577 2578#ifdef __FreeBSD__ 2579 PF_LOCK_ASSERT(); 2580#else 2581 splassert(IPL_SOFTNET); 2582#endif 2583 2584 if (sc == NULL) 2585 return; 2586 2587 if (ISSET(st->state_flags, PFSTATE_ACK)) 2588 pfsync_deferred(st, 0); 2589 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) { 2590 if (st->sync_state != PFSYNC_S_NONE) 2591 pfsync_q_del(st); 2592 return; 2593 } 2594 2595 if (sc->sc_len == PFSYNC_MINPKT) 2596#ifdef __FreeBSD__ 2597 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, 2598 V_pfsyncif); 2599#else 2600 timeout_add_sec(&sc->sc_tmo, 1); 2601#endif 2602 2603 switch (st->sync_state) { 2604 case PFSYNC_S_UPD_C: 2605 case PFSYNC_S_UPD: 2606 case PFSYNC_S_INS: 2607 /* we're already handling it */ 2608 2609 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) { 2610 st->sync_updates++; 2611 if (st->sync_updates >= sc->sc_maxupdates) 2612 sync = 1; 2613 } 2614 break; 2615 2616 case PFSYNC_S_IACK: 2617 pfsync_q_del(st); 2618 case PFSYNC_S_NONE: 2619 pfsync_q_ins(st, PFSYNC_S_UPD_C); 2620 st->sync_updates = 0; 2621 break; 2622 2623 default: 2624 panic("pfsync_update_state: unexpected sync state %d", 2625 st->sync_state); 2626 } 2627 2628 if (sync || (time_uptime - st->pfsync_time) < 2) { 2629 pfsync_upds++; 2630#ifdef __FreeBSD__ 2631 pfsync_sendout(); 2632#else 2633 schednetisr(NETISR_PFSYNC); 2634#endif 2635 } 2636} 2637 2638void 2639pfsync_request_update(u_int32_t creatorid, u_int64_t id) 2640{ 2641#ifdef __FreeBSD__ 2642 struct pfsync_softc *sc = V_pfsyncif; 2643#else 2644 struct pfsync_softc *sc = pfsyncif; 2645#endif 2646 struct pfsync_upd_req_item *item; 2647 size_t nlen = sizeof(struct pfsync_upd_req); 2648 int s; 2649 2650 PF_LOCK_ASSERT(); 2651 2652 /* 2653 * this code does nothing to prevent multiple update requests for the 2654 * same state being generated. 2655 */ 2656 2657 item = pool_get(&sc->sc_pool, PR_NOWAIT); 2658 if (item == NULL) { 2659 /* XXX stats */ 2660 return; 2661 } 2662 2663 item->ur_msg.id = id; 2664 item->ur_msg.creatorid = creatorid; 2665 2666 if (TAILQ_EMPTY(&sc->sc_upd_req_list)) 2667 nlen += sizeof(struct pfsync_subheader); 2668 2669#ifdef __FreeBSD__ 2670 if (sc->sc_len + nlen > sc->sc_sync_if->if_mtu) { 2671#else 2672 if (sc->sc_len + nlen > sc->sc_if.if_mtu) { 2673#endif 2674 s = splnet(); 2675 pfsync_sendout(); 2676 splx(s); 2677 2678 nlen = sizeof(struct pfsync_subheader) + 2679 sizeof(struct pfsync_upd_req); 2680 } 2681 2682 TAILQ_INSERT_TAIL(&sc->sc_upd_req_list, item, ur_entry); 2683 sc->sc_len += nlen; 2684 2685#ifdef __FreeBSD__ 2686 pfsync_sendout(); 2687#else 2688 schednetisr(NETISR_PFSYNC); 2689#endif 2690} 2691 2692void 2693pfsync_update_state_req(struct pf_state *st) 2694{ 2695#ifdef __FreeBSD__ 2696 struct pfsync_softc *sc = V_pfsyncif; 2697#else 2698 struct pfsync_softc *sc = pfsyncif; 2699#endif 2700 2701 PF_LOCK_ASSERT(); 2702 2703 if (sc == NULL) 2704 panic("pfsync_update_state_req: nonexistant instance"); 2705 2706 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) { 2707 if (st->sync_state != PFSYNC_S_NONE) 2708 pfsync_q_del(st); 2709 return; 2710 } 2711 2712 switch (st->sync_state) { 2713 case PFSYNC_S_UPD_C: 2714 case PFSYNC_S_IACK: 2715 pfsync_q_del(st); 2716 case PFSYNC_S_NONE: 2717 pfsync_q_ins(st, PFSYNC_S_UPD); 2718#ifdef __FreeBSD__ 2719 pfsync_sendout(); 2720#else 2721 schednetisr(NETISR_PFSYNC); 2722#endif 2723 return; 2724 2725 case PFSYNC_S_INS: 2726 case PFSYNC_S_UPD: 2727 case PFSYNC_S_DEL: 2728 /* we're already handling it */ 2729 return; 2730 2731 default: 2732 panic("pfsync_update_state_req: unexpected sync state %d", 2733 st->sync_state); 2734 } 2735} 2736 2737void 2738pfsync_delete_state(struct pf_state *st) 2739{ 2740#ifdef __FreeBSD__ 2741 struct pfsync_softc *sc = V_pfsyncif; 2742#else 2743 struct pfsync_softc *sc = pfsyncif; 2744#endif 2745 2746#ifdef __FreeBSD__ 2747 PF_LOCK_ASSERT(); 2748#else 2749 splassert(IPL_SOFTNET); 2750#endif 2751 2752 if (sc == NULL) 2753 return; 2754 2755 if (ISSET(st->state_flags, PFSTATE_ACK)) 2756 pfsync_deferred(st, 1); 2757 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) { 2758 if (st->sync_state != PFSYNC_S_NONE) 2759 pfsync_q_del(st); 2760 return; 2761 } 2762 2763 if (sc->sc_len == PFSYNC_MINPKT) 2764#ifdef __FreeBSD__ 2765 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, 2766 V_pfsyncif); 2767#else 2768 timeout_add_sec(&sc->sc_tmo, 1); 2769#endif 2770 2771 switch (st->sync_state) { 2772 case PFSYNC_S_INS: 2773 /* we never got to tell the world so just forget about it */ 2774 pfsync_q_del(st); 2775 return; 2776 2777 case PFSYNC_S_UPD_C: 2778 case PFSYNC_S_UPD: 2779 case PFSYNC_S_IACK: 2780 pfsync_q_del(st); 2781 /* FALLTHROUGH to putting it on the del list */ 2782 2783 case PFSYNC_S_NONE: 2784 pfsync_q_ins(st, PFSYNC_S_DEL); 2785 return; 2786 2787 default: 2788 panic("pfsync_delete_state: unexpected sync state %d", 2789 st->sync_state); 2790 } 2791} 2792 2793void 2794pfsync_clear_states(u_int32_t creatorid, const char *ifname) 2795{ 2796 struct { 2797 struct pfsync_subheader subh; 2798 struct pfsync_clr clr; 2799 } __packed r; 2800 2801#ifdef __FreeBSD__ 2802 struct pfsync_softc *sc = V_pfsyncif; 2803#else 2804 struct pfsync_softc *sc = pfsyncif; 2805#endif 2806 2807#ifdef __FreeBSD__ 2808 PF_LOCK_ASSERT(); 2809#else 2810 splassert(IPL_SOFTNET); 2811#endif 2812 2813 if (sc == NULL) 2814 return; 2815 2816 bzero(&r, sizeof(r)); 2817 2818 r.subh.action = PFSYNC_ACT_CLR; 2819 r.subh.count = htons(1); 2820 2821 strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname)); 2822 r.clr.creatorid = creatorid; 2823 2824 pfsync_send_plus(&r, sizeof(r)); 2825} 2826 2827void 2828pfsync_q_ins(struct pf_state *st, int q) 2829{ 2830#ifdef __FreeBSD__ 2831 struct pfsync_softc *sc = V_pfsyncif; 2832#else 2833 struct pfsync_softc *sc = pfsyncif; 2834#endif 2835 size_t nlen = pfsync_qs[q].len; 2836 int s; 2837 2838 PF_LOCK_ASSERT(); 2839 2840#ifdef __FreeBSD__ 2841 KASSERT(st->sync_state == PFSYNC_S_NONE, 2842 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__)); 2843#else 2844 KASSERT(st->sync_state == PFSYNC_S_NONE); 2845#endif 2846 2847#if 1 || defined(PFSYNC_DEBUG) 2848 if (sc->sc_len < PFSYNC_MINPKT) 2849#ifdef __FreeBSD__ 2850 panic("pfsync pkt len is too low %zu", sc->sc_len); 2851#else 2852 panic("pfsync pkt len is too low %d", sc->sc_len); 2853#endif 2854#endif 2855 if (TAILQ_EMPTY(&sc->sc_qs[q])) 2856 nlen += sizeof(struct pfsync_subheader); 2857 2858#ifdef __FreeBSD__ 2859 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) { 2860#else 2861 if (sc->sc_len + nlen > sc->sc_if.if_mtu) { 2862#endif 2863 s = splnet(); 2864 pfsync_sendout(); 2865 splx(s); 2866 2867 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len; 2868 } 2869 2870 sc->sc_len += nlen; 2871 TAILQ_INSERT_TAIL(&sc->sc_qs[q], st, sync_list); 2872 st->sync_state = q; 2873} 2874 2875void 2876pfsync_q_del(struct pf_state *st) 2877{ 2878#ifdef __FreeBSD__ 2879 struct pfsync_softc *sc = V_pfsyncif; 2880#else 2881 struct pfsync_softc *sc = pfsyncif; 2882#endif 2883 int q = st->sync_state; 2884 2885#ifdef __FreeBSD__ 2886 KASSERT(st->sync_state != PFSYNC_S_NONE, 2887 ("%s: st->sync_state != PFSYNC_S_NONE", __FUNCTION__)); 2888#else 2889 KASSERT(st->sync_state != PFSYNC_S_NONE); 2890#endif 2891 2892 sc->sc_len -= pfsync_qs[q].len; 2893 TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list); 2894 st->sync_state = PFSYNC_S_NONE; 2895 2896 if (TAILQ_EMPTY(&sc->sc_qs[q])) 2897 sc->sc_len -= sizeof(struct pfsync_subheader); 2898} 2899 2900#ifdef notyet 2901void 2902pfsync_update_tdb(struct tdb *t, int output) 2903{ 2904#ifdef __FreeBSD__ 2905 struct pfsync_softc *sc = V_pfsyncif; 2906#else 2907 struct pfsync_softc *sc = pfsyncif; 2908#endif 2909 size_t nlen = sizeof(struct pfsync_tdb); 2910 int s; 2911 2912 if (sc == NULL) 2913 return; 2914 2915 if (!ISSET(t->tdb_flags, TDBF_PFSYNC)) { 2916 if (TAILQ_EMPTY(&sc->sc_tdb_q)) 2917 nlen += sizeof(struct pfsync_subheader); 2918 2919 if (sc->sc_len + nlen > sc->sc_if.if_mtu) { 2920 s = splnet(); 2921 PF_LOCK(); 2922 pfsync_sendout(); 2923 PF_UNLOCK(); 2924 splx(s); 2925 2926 nlen = sizeof(struct pfsync_subheader) + 2927 sizeof(struct pfsync_tdb); 2928 } 2929 2930 sc->sc_len += nlen; 2931 TAILQ_INSERT_TAIL(&sc->sc_tdb_q, t, tdb_sync_entry); 2932 SET(t->tdb_flags, TDBF_PFSYNC); 2933 t->tdb_updates = 0; 2934 } else { 2935 if (++t->tdb_updates >= sc->sc_maxupdates) 2936 schednetisr(NETISR_PFSYNC); 2937 } 2938 2939 if (output) 2940 SET(t->tdb_flags, TDBF_PFSYNC_RPL); 2941 else 2942 CLR(t->tdb_flags, TDBF_PFSYNC_RPL); 2943} 2944 2945void 2946pfsync_delete_tdb(struct tdb *t) 2947{ 2948#ifdef __FreeBSD__ 2949 struct pfsync_softc *sc = V_pfsyncif; 2950#else 2951 struct pfsync_softc *sc = pfsyncif; 2952#endif 2953 2954 if (sc == NULL || !ISSET(t->tdb_flags, TDBF_PFSYNC)) 2955 return; 2956 2957 sc->sc_len -= sizeof(struct pfsync_tdb); 2958 TAILQ_REMOVE(&sc->sc_tdb_q, t, tdb_sync_entry); 2959 CLR(t->tdb_flags, TDBF_PFSYNC); 2960 2961 if (TAILQ_EMPTY(&sc->sc_tdb_q)) 2962 sc->sc_len -= sizeof(struct pfsync_subheader); 2963} 2964 2965int 2966pfsync_out_tdb(struct tdb *t, struct mbuf *m, int offset) 2967{ 2968 struct pfsync_tdb *ut = (struct pfsync_tdb *)(m->m_data + offset); 2969 2970 bzero(ut, sizeof(*ut)); 2971 ut->spi = t->tdb_spi; 2972 bcopy(&t->tdb_dst, &ut->dst, sizeof(ut->dst)); 2973 /* 2974 * When a failover happens, the master's rpl is probably above 2975 * what we see here (we may be up to a second late), so 2976 * increase it a bit for outbound tdbs to manage most such 2977 * situations. 2978 * 2979 * For now, just add an offset that is likely to be larger 2980 * than the number of packets we can see in one second. The RFC 2981 * just says the next packet must have a higher seq value. 2982 * 2983 * XXX What is a good algorithm for this? We could use 2984 * a rate-determined increase, but to know it, we would have 2985 * to extend struct tdb. 2986 * XXX pt->rpl can wrap over MAXINT, but if so the real tdb 2987 * will soon be replaced anyway. For now, just don't handle 2988 * this edge case. 2989 */ 2990#define RPL_INCR 16384 2991 ut->rpl = htonl(t->tdb_rpl + (ISSET(t->tdb_flags, TDBF_PFSYNC_RPL) ? 2992 RPL_INCR : 0)); 2993 ut->cur_bytes = htobe64(t->tdb_cur_bytes); 2994 ut->sproto = t->tdb_sproto; 2995 2996 return (sizeof(*ut)); 2997} 2998#endif 2999 3000void 3001pfsync_bulk_start(void) 3002{ 3003#ifdef __FreeBSD__ 3004 struct pfsync_softc *sc = V_pfsyncif; 3005#else 3006 struct pfsync_softc *sc = pfsyncif; 3007#endif 3008 3009#ifdef __FreeBSD__ 3010 if (V_pf_status.debug >= PF_DEBUG_MISC) 3011#else 3012 if (pf_status.debug >= PF_DEBUG_MISC) 3013#endif 3014 printf("pfsync: received bulk update request\n"); 3015 3016#ifdef __FreeBSD__ 3017 PF_LOCK(); 3018 if (TAILQ_EMPTY(&V_state_list)) 3019#else 3020 if (TAILQ_EMPTY(&state_list)) 3021#endif 3022 pfsync_bulk_status(PFSYNC_BUS_END); 3023 else { 3024 sc->sc_ureq_received = time_uptime; 3025 if (sc->sc_bulk_next == NULL) 3026#ifdef __FreeBSD__ 3027 sc->sc_bulk_next = TAILQ_FIRST(&V_state_list); 3028#else 3029 sc->sc_bulk_next = TAILQ_FIRST(&state_list); 3030#endif 3031 sc->sc_bulk_last = sc->sc_bulk_next; 3032 3033 pfsync_bulk_status(PFSYNC_BUS_START); 3034 callout_reset(&sc->sc_bulk_tmo, 1, 3035 pfsync_bulk_update, sc); 3036 } 3037#ifdef __FreeBSD__ 3038 PF_UNLOCK(); 3039#endif 3040} 3041 3042void 3043pfsync_bulk_update(void *arg) 3044{ 3045 struct pfsync_softc *sc = arg; 3046 struct pf_state *st = sc->sc_bulk_next; 3047 int i = 0; 3048 int s; 3049 3050 PF_LOCK_ASSERT(); 3051 3052 s = splsoftnet(); 3053#ifdef __FreeBSD__ 3054 CURVNET_SET(sc->sc_ifp->if_vnet); 3055#endif 3056 for (;;) { 3057 if (st->sync_state == PFSYNC_S_NONE && 3058 st->timeout < PFTM_MAX && 3059 st->pfsync_time <= sc->sc_ureq_received) { 3060 pfsync_update_state_req(st); 3061 i++; 3062 } 3063 3064 st = TAILQ_NEXT(st, entry_list); 3065 if (st == NULL) 3066#ifdef __FreeBSD__ 3067 st = TAILQ_FIRST(&V_state_list); 3068#else 3069 st = TAILQ_FIRST(&state_list); 3070#endif 3071 3072 if (st == sc->sc_bulk_last) { 3073 /* we're done */ 3074 sc->sc_bulk_next = NULL; 3075 sc->sc_bulk_last = NULL; 3076 pfsync_bulk_status(PFSYNC_BUS_END); 3077 break; 3078 } 3079 3080#ifdef __FreeBSD__ 3081 if (i > 1 && (sc->sc_ifp->if_mtu - sc->sc_len) < 3082#else 3083 if (i > 1 && (sc->sc_if.if_mtu - sc->sc_len) < 3084#endif 3085 sizeof(struct pfsync_state)) { 3086 /* we've filled a packet */ 3087 sc->sc_bulk_next = st; 3088#ifdef __FreeBSD__ 3089 callout_reset(&sc->sc_bulk_tmo, 1, 3090 pfsync_bulk_update, sc); 3091#else 3092 timeout_add(&sc->sc_bulk_tmo, 1); 3093#endif 3094 break; 3095 } 3096 } 3097 3098#ifdef __FreeBSD__ 3099 CURVNET_RESTORE(); 3100#endif 3101 splx(s); 3102} 3103 3104void 3105pfsync_bulk_status(u_int8_t status) 3106{ 3107 struct { 3108 struct pfsync_subheader subh; 3109 struct pfsync_bus bus; 3110 } __packed r; 3111 3112#ifdef __FreeBSD__ 3113 struct pfsync_softc *sc = V_pfsyncif; 3114#else 3115 struct pfsync_softc *sc = pfsyncif; 3116#endif 3117 3118 PF_LOCK_ASSERT(); 3119 3120 bzero(&r, sizeof(r)); 3121 3122 r.subh.action = PFSYNC_ACT_BUS; 3123 r.subh.count = htons(1); 3124 3125#ifdef __FreeBSD__ 3126 r.bus.creatorid = V_pf_status.hostid; 3127#else 3128 r.bus.creatorid = pf_status.hostid; 3129#endif 3130 r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received); 3131 r.bus.status = status; 3132 3133 pfsync_send_plus(&r, sizeof(r)); 3134} 3135 3136void 3137pfsync_bulk_fail(void *arg) 3138{ 3139 struct pfsync_softc *sc = arg; 3140 3141#ifdef __FreeBSD__ 3142 CURVNET_SET(sc->sc_ifp->if_vnet); 3143#endif 3144 3145 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) { 3146 /* Try again */ 3147#ifdef __FreeBSD__ 3148 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, 3149 pfsync_bulk_fail, V_pfsyncif); 3150#else 3151 timeout_add_sec(&sc->sc_bulkfail_tmo, 5); 3152#endif 3153 PF_LOCK(); 3154 pfsync_request_update(0, 0); 3155 PF_UNLOCK(); 3156 } else { 3157 /* Pretend like the transfer was ok */ 3158 sc->sc_ureq_sent = 0; 3159 sc->sc_bulk_tries = 0; 3160#ifdef __FreeBSD__ 3161 if (!sc->pfsync_sync_ok && carp_demote_adj_p) 3162 (*carp_demote_adj_p)(-V_pfsync_carp_adj, 3163 "pfsync bulk fail"); 3164 sc->pfsync_sync_ok = 1; 3165#else 3166#if NCARP > 0 3167 if (!pfsync_sync_ok) 3168 carp_group_demote_adj(&sc->sc_if, -1); 3169#endif 3170 pfsync_sync_ok = 1; 3171#endif 3172#ifdef __FreeBSD__ 3173 if (V_pf_status.debug >= PF_DEBUG_MISC) 3174#else 3175 if (pf_status.debug >= PF_DEBUG_MISC) 3176#endif 3177 printf("pfsync: failed to receive bulk update\n"); 3178 } 3179 3180#ifdef __FreeBSD__ 3181 CURVNET_RESTORE(); 3182#endif 3183} 3184 3185void 3186pfsync_send_plus(void *plus, size_t pluslen) 3187{ 3188#ifdef __FreeBSD__ 3189 struct pfsync_softc *sc = V_pfsyncif; 3190#else 3191 struct pfsync_softc *sc = pfsyncif; 3192#endif 3193 int s; 3194 3195 PF_LOCK_ASSERT(); 3196 3197#ifdef __FreeBSD__ 3198 if (sc->sc_len + pluslen > sc->sc_ifp->if_mtu) { 3199#else 3200 if (sc->sc_len + pluslen > sc->sc_if.if_mtu) { 3201#endif 3202 s = splnet(); 3203 pfsync_sendout(); 3204 splx(s); 3205 } 3206 3207 sc->sc_plus = plus; 3208 sc->sc_len += (sc->sc_pluslen = pluslen); 3209 3210 s = splnet(); 3211 pfsync_sendout(); 3212 splx(s); 3213} 3214 3215int 3216pfsync_up(void) 3217{ 3218#ifdef __FreeBSD__ 3219 struct pfsync_softc *sc = V_pfsyncif; 3220#else 3221 struct pfsync_softc *sc = pfsyncif; 3222#endif 3223 3224#ifdef __FreeBSD__ 3225 if (sc == NULL || !ISSET(sc->sc_ifp->if_flags, IFF_DRV_RUNNING)) 3226#else 3227 if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING)) 3228#endif 3229 return (0); 3230 3231 return (1); 3232} 3233 3234int 3235pfsync_state_in_use(struct pf_state *st) 3236{ 3237#ifdef __FreeBSD__ 3238 struct pfsync_softc *sc = V_pfsyncif; 3239#else 3240 struct pfsync_softc *sc = pfsyncif; 3241#endif 3242 3243 if (sc == NULL) 3244 return (0); 3245 3246 if (st->sync_state != PFSYNC_S_NONE || 3247 st == sc->sc_bulk_next || 3248 st == sc->sc_bulk_last) 3249 return (1); 3250 3251 return (0); 3252} 3253 3254u_int pfsync_ints; 3255u_int pfsync_tmos; 3256 3257void 3258pfsync_timeout(void *arg) 3259{ 3260#if defined(__FreeBSD__) && defined(VIMAGE) 3261 struct pfsync_softc *sc = arg; 3262#endif 3263 int s; 3264 3265#ifdef __FreeBSD__ 3266 CURVNET_SET(sc->sc_ifp->if_vnet); 3267#endif 3268 3269 pfsync_tmos++; 3270 3271 s = splnet(); 3272#ifdef __FreeBSD__ 3273 PF_LOCK(); 3274#endif 3275 pfsync_sendout(); 3276#ifdef __FreeBSD__ 3277 PF_UNLOCK(); 3278#endif 3279 splx(s); 3280 3281#ifdef __FreeBSD__ 3282 CURVNET_RESTORE(); 3283#endif 3284} 3285 3286/* this is a softnet/netisr handler */ 3287void 3288#ifdef __FreeBSD__ 3289pfsyncintr(void *arg) 3290{ 3291 struct pfsync_softc *sc = arg; 3292 struct mbuf *m, *n; 3293 3294 CURVNET_SET(sc->sc_ifp->if_vnet); 3295 pfsync_ints++; 3296 3297 IF_DEQUEUE_ALL(&sc->sc_ifp->if_snd, m); 3298 3299 for (; m != NULL; m = n) { 3300 3301 n = m->m_nextpkt; 3302 m->m_nextpkt = NULL; 3303 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL) 3304 == 0) 3305 V_pfsyncstats.pfsyncs_opackets++; 3306 else 3307 V_pfsyncstats.pfsyncs_oerrors++; 3308 } 3309 CURVNET_RESTORE(); 3310} 3311#else 3312pfsyncintr(void) 3313{ 3314 int s; 3315 3316 pfsync_ints++; 3317 3318 s = splnet(); 3319 pfsync_sendout(); 3320 splx(s); 3321} 3322#endif 3323 3324int 3325pfsync_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 3326 size_t newlen) 3327{ 3328 3329#ifdef notyet 3330 /* All sysctl names at this level are terminal. */ 3331 if (namelen != 1) 3332 return (ENOTDIR); 3333 3334 switch (name[0]) { 3335 case PFSYNCCTL_STATS: 3336 if (newp != NULL) 3337 return (EPERM); 3338 return (sysctl_struct(oldp, oldlenp, newp, newlen, 3339 &V_pfsyncstats, sizeof(V_pfsyncstats))); 3340 } 3341#endif 3342 return (ENOPROTOOPT); 3343} 3344 3345#ifdef __FreeBSD__ 3346void 3347pfsync_ifdetach(void *arg, struct ifnet *ifp) 3348{ 3349 struct pfsync_softc *sc = (struct pfsync_softc *)arg; 3350 struct ip_moptions *imo; 3351 3352 if (sc == NULL || sc->sc_sync_if != ifp) 3353 return; /* not for us; unlocked read */ 3354 3355 CURVNET_SET(sc->sc_ifp->if_vnet); 3356 3357 PF_LOCK(); 3358 3359 /* Deal with a member interface going away from under us. */ 3360 sc->sc_sync_if = NULL; 3361 imo = &sc->sc_imo; 3362 if (imo->imo_num_memberships > 0) { 3363 KASSERT(imo->imo_num_memberships == 1, 3364 ("%s: imo_num_memberships != 1", __func__)); 3365 /* 3366 * Our event handler is always called after protocol 3367 * domains have been detached from the underlying ifnet. 3368 * Do not call in_delmulti(); we held a single reference 3369 * which the protocol domain has purged in in_purgemaddrs(). 3370 */ 3371 PF_UNLOCK(); 3372 imo->imo_membership[--imo->imo_num_memberships] = NULL; 3373 PF_LOCK(); 3374 imo->imo_multicast_ifp = NULL; 3375 } 3376 3377 PF_UNLOCK(); 3378 3379 CURVNET_RESTORE(); 3380} 3381 3382static int 3383vnet_pfsync_init(const void *unused) 3384{ 3385 int error = 0; 3386 3387 pfsyncattach(0); 3388 3389 error = swi_add(NULL, "pfsync", pfsyncintr, V_pfsyncif, 3390 SWI_NET, INTR_MPSAFE, &pfsync_swi.pfsync_swi_cookie); 3391 if (error) 3392 panic("%s: swi_add %d", __func__, error); 3393 3394 PF_LOCK(); 3395 pfsync_state_import_ptr = pfsync_state_import; 3396 pfsync_up_ptr = pfsync_up; 3397 pfsync_insert_state_ptr = pfsync_insert_state; 3398 pfsync_update_state_ptr = pfsync_update_state; 3399 pfsync_delete_state_ptr = pfsync_delete_state; 3400 pfsync_clear_states_ptr = pfsync_clear_states; 3401 pfsync_state_in_use_ptr = pfsync_state_in_use; 3402 pfsync_defer_ptr = pfsync_defer; 3403 PF_UNLOCK(); 3404 3405 return (0); 3406} 3407 3408static int 3409vnet_pfsync_uninit(const void *unused) 3410{ 3411 3412 swi_remove(pfsync_swi.pfsync_swi_cookie); 3413 3414 PF_LOCK(); 3415 pfsync_state_import_ptr = NULL; 3416 pfsync_up_ptr = NULL; 3417 pfsync_insert_state_ptr = NULL; 3418 pfsync_update_state_ptr = NULL; 3419 pfsync_delete_state_ptr = NULL; 3420 pfsync_clear_states_ptr = NULL; 3421 pfsync_state_in_use_ptr = NULL; 3422 pfsync_defer_ptr = NULL; 3423 PF_UNLOCK(); 3424 3425 if_clone_detach(&pfsync_cloner); 3426 3427 return (0); 3428} 3429 3430/* Define startup order. */ 3431#define PFSYNC_SYSINIT_ORDER SI_SUB_PROTO_IF 3432#define PFSYNC_MODEVENT_ORDER (SI_ORDER_FIRST) /* On boot slot in here. */ 3433#define PFSYNC_VNET_ORDER (PFSYNC_MODEVENT_ORDER + 2) /* Later still. */ 3434 3435/* 3436 * Starting up. 3437 * VNET_SYSINIT is called for each existing vnet and each new vnet. 3438 */ 3439VNET_SYSINIT(vnet_pfsync_init, PFSYNC_SYSINIT_ORDER, PFSYNC_VNET_ORDER, 3440 vnet_pfsync_init, NULL); 3441 3442/* 3443 * Closing up shop. These are done in REVERSE ORDER, 3444 * Not called on reboot. 3445 * VNET_SYSUNINIT is called for each exiting vnet as it exits. 3446 */ 3447VNET_SYSUNINIT(vnet_pfsync_uninit, PFSYNC_SYSINIT_ORDER, PFSYNC_VNET_ORDER, 3448 vnet_pfsync_uninit, NULL); 3449static int 3450pfsync_modevent(module_t mod, int type, void *data) 3451{ 3452 int error = 0; 3453 3454 switch (type) { 3455 case MOD_LOAD: 3456#ifndef __FreeBSD__ 3457 pfsyncattach(0); 3458#endif 3459 break; 3460 case MOD_UNLOAD: 3461#ifndef __FreeBSD__ 3462 if_clone_detach(&pfsync_cloner); 3463#endif 3464 break; 3465 default: 3466 error = EINVAL; 3467 break; 3468 } 3469 3470 return error; 3471} 3472 3473static moduledata_t pfsync_mod = { 3474 "pfsync", 3475 pfsync_modevent, 3476 0 3477}; 3478 3479#define PFSYNC_MODVER 1 3480 3481DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 3482MODULE_VERSION(pfsync, PFSYNC_MODVER); 3483MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER); 3484#endif /* __FreeBSD__ */ 3485