if_pfsync.c revision 229964
1/* $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $ */ 2 3/* 4 * Copyright (c) 2002 Michael Shalayeff 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 26 * THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29/* 30 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org> 31 * 32 * Permission to use, copy, modify, and distribute this software for any 33 * purpose with or without fee is hereby granted, provided that the above 34 * copyright notice and this permission notice appear in all copies. 35 * 36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 43 */ 44 45/* 46 * Revisions picked from OpenBSD after revision 1.110 import: 47 * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates 48 * 1.120, 1.175 - use monotonic time_uptime 49 * 1.122 - reduce number of updates for non-TCP sessions 50 * 1.128 - cleanups 51 * 1.170 - SIOCSIFMTU checks 52 */ 53 54#ifdef __FreeBSD__ 55#include "opt_inet.h" 56#include "opt_inet6.h" 57#include "opt_pf.h" 58 59#include <sys/cdefs.h> 60__FBSDID("$FreeBSD: head/sys/contrib/pf/net/if_pfsync.c 229964 2012-01-11 14:24:03Z glebius $"); 61 62#define NBPFILTER 1 63#endif /* __FreeBSD__ */ 64 65#include <sys/param.h> 66#include <sys/kernel.h> 67#ifdef __FreeBSD__ 68#include <sys/bus.h> 69#include <sys/interrupt.h> 70#include <sys/priv.h> 71#endif 72#include <sys/proc.h> 73#include <sys/systm.h> 74#include <sys/time.h> 75#include <sys/mbuf.h> 76#include <sys/socket.h> 77#ifdef __FreeBSD__ 78#include <sys/endian.h> 79#include <sys/malloc.h> 80#include <sys/module.h> 81#include <sys/sockio.h> 82#include <sys/taskqueue.h> 83#include <sys/lock.h> 84#include <sys/mutex.h> 85#include <sys/protosw.h> 86#else 87#include <sys/ioctl.h> 88#include <sys/timeout.h> 89#endif 90#include <sys/sysctl.h> 91#ifndef __FreeBSD__ 92#include <sys/pool.h> 93#endif 94 95#include <net/if.h> 96#ifdef __FreeBSD__ 97#include <net/if_clone.h> 98#endif 99#include <net/if_types.h> 100#include <net/route.h> 101#include <net/bpf.h> 102#include <net/netisr.h> 103#ifdef __FreeBSD__ 104#include <net/vnet.h> 105#endif 106 107#include <netinet/in.h> 108#include <netinet/if_ether.h> 109#include <netinet/tcp.h> 110#include <netinet/tcp_seq.h> 111 112#ifdef INET 113#include <netinet/in_systm.h> 114#include <netinet/in_var.h> 115#include <netinet/ip.h> 116#include <netinet/ip_var.h> 117#endif 118 119#ifdef INET6 120#include <netinet6/nd6.h> 121#endif /* INET6 */ 122 123#ifdef __FreeBSD__ 124#include <netinet/ip_carp.h> 125#else 126#include "carp.h" 127#if NCARP > 0 128#include <netinet/ip_carp.h> 129#endif 130#endif 131 132#include <net/pfvar.h> 133#include <net/if_pfsync.h> 134 135#ifndef __FreeBSD__ 136#include "bpfilter.h" 137#include "pfsync.h" 138#endif 139 140#define PFSYNC_MINPKT ( \ 141 sizeof(struct ip) + \ 142 sizeof(struct pfsync_header) + \ 143 sizeof(struct pfsync_subheader) + \ 144 sizeof(struct pfsync_eof)) 145 146struct pfsync_pkt { 147 struct ip *ip; 148 struct in_addr src; 149 u_int8_t flags; 150}; 151 152int pfsync_input_hmac(struct mbuf *, int); 153 154int pfsync_upd_tcp(struct pf_state *, struct pfsync_state_peer *, 155 struct pfsync_state_peer *); 156 157int pfsync_in_clr(struct pfsync_pkt *, struct mbuf *, int, int); 158int pfsync_in_ins(struct pfsync_pkt *, struct mbuf *, int, int); 159int pfsync_in_iack(struct pfsync_pkt *, struct mbuf *, int, int); 160int pfsync_in_upd(struct pfsync_pkt *, struct mbuf *, int, int); 161int pfsync_in_upd_c(struct pfsync_pkt *, struct mbuf *, int, int); 162int pfsync_in_ureq(struct pfsync_pkt *, struct mbuf *, int, int); 163int pfsync_in_del(struct pfsync_pkt *, struct mbuf *, int, int); 164int pfsync_in_del_c(struct pfsync_pkt *, struct mbuf *, int, int); 165int pfsync_in_bus(struct pfsync_pkt *, struct mbuf *, int, int); 166int pfsync_in_tdb(struct pfsync_pkt *, struct mbuf *, int, int); 167int pfsync_in_eof(struct pfsync_pkt *, struct mbuf *, int, int); 168 169int pfsync_in_error(struct pfsync_pkt *, struct mbuf *, int, int); 170 171int (*pfsync_acts[])(struct pfsync_pkt *, struct mbuf *, int, int) = { 172 pfsync_in_clr, /* PFSYNC_ACT_CLR */ 173 pfsync_in_ins, /* PFSYNC_ACT_INS */ 174 pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */ 175 pfsync_in_upd, /* PFSYNC_ACT_UPD */ 176 pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */ 177 pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */ 178 pfsync_in_del, /* PFSYNC_ACT_DEL */ 179 pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */ 180 pfsync_in_error, /* PFSYNC_ACT_INS_F */ 181 pfsync_in_error, /* PFSYNC_ACT_DEL_F */ 182 pfsync_in_bus, /* PFSYNC_ACT_BUS */ 183 pfsync_in_tdb, /* PFSYNC_ACT_TDB */ 184 pfsync_in_eof /* PFSYNC_ACT_EOF */ 185}; 186 187struct pfsync_q { 188 int (*write)(struct pf_state *, struct mbuf *, int); 189 size_t len; 190 u_int8_t action; 191}; 192 193/* we have one of these for every PFSYNC_S_ */ 194int pfsync_out_state(struct pf_state *, struct mbuf *, int); 195int pfsync_out_iack(struct pf_state *, struct mbuf *, int); 196int pfsync_out_upd_c(struct pf_state *, struct mbuf *, int); 197int pfsync_out_del(struct pf_state *, struct mbuf *, int); 198 199struct pfsync_q pfsync_qs[] = { 200 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS }, 201 { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK }, 202 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_UPD }, 203 { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C }, 204 { pfsync_out_del, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C } 205}; 206 207void pfsync_q_ins(struct pf_state *, int); 208void pfsync_q_del(struct pf_state *); 209 210struct pfsync_upd_req_item { 211 TAILQ_ENTRY(pfsync_upd_req_item) ur_entry; 212 struct pfsync_upd_req ur_msg; 213}; 214TAILQ_HEAD(pfsync_upd_reqs, pfsync_upd_req_item); 215 216struct pfsync_deferral { 217 TAILQ_ENTRY(pfsync_deferral) pd_entry; 218 struct pf_state *pd_st; 219 struct mbuf *pd_m; 220#ifdef __FreeBSD__ 221 struct callout pd_tmo; 222#else 223 struct timeout pd_tmo; 224#endif 225}; 226TAILQ_HEAD(pfsync_deferrals, pfsync_deferral); 227 228#define PFSYNC_PLSIZE MAX(sizeof(struct pfsync_upd_req_item), \ 229 sizeof(struct pfsync_deferral)) 230 231#ifdef notyet 232int pfsync_out_tdb(struct tdb *, struct mbuf *, int); 233#endif 234 235struct pfsync_softc { 236#ifdef __FreeBSD__ 237 struct ifnet *sc_ifp; 238#else 239 struct ifnet sc_if; 240#endif 241 struct ifnet *sc_sync_if; 242 243#ifdef __FreeBSD__ 244 uma_zone_t sc_pool; 245#else 246 struct pool sc_pool; 247#endif 248 249 struct ip_moptions sc_imo; 250 251 struct in_addr sc_sync_peer; 252 u_int8_t sc_maxupdates; 253#ifdef __FreeBSD__ 254 int pfsync_sync_ok; 255#endif 256 257 struct ip sc_template; 258 259 struct pf_state_queue sc_qs[PFSYNC_S_COUNT]; 260 size_t sc_len; 261 262 struct pfsync_upd_reqs sc_upd_req_list; 263 264 struct pfsync_deferrals sc_deferrals; 265 u_int sc_deferred; 266 267 void *sc_plus; 268 size_t sc_pluslen; 269 270 u_int32_t sc_ureq_sent; 271 int sc_bulk_tries; 272#ifdef __FreeBSD__ 273 struct callout sc_bulkfail_tmo; 274#else 275 struct timeout sc_bulkfail_tmo; 276#endif 277 278 u_int32_t sc_ureq_received; 279 struct pf_state *sc_bulk_next; 280 struct pf_state *sc_bulk_last; 281#ifdef __FreeBSD__ 282 struct callout sc_bulk_tmo; 283#else 284 struct timeout sc_bulk_tmo; 285#endif 286 287 TAILQ_HEAD(, tdb) sc_tdb_q; 288 289#ifdef __FreeBSD__ 290 struct callout sc_tmo; 291#else 292 struct timeout sc_tmo; 293#endif 294}; 295 296#ifdef __FreeBSD__ 297static MALLOC_DEFINE(M_PFSYNC, "pfsync", "pfsync data"); 298static VNET_DEFINE(struct pfsync_softc *, pfsyncif) = NULL; 299#define V_pfsyncif VNET(pfsyncif) 300static VNET_DEFINE(void *, pfsync_swi_cookie) = NULL; 301#define V_pfsync_swi_cookie VNET(pfsync_swi_cookie) 302static VNET_DEFINE(struct pfsyncstats, pfsyncstats); 303#define V_pfsyncstats VNET(pfsyncstats) 304static VNET_DEFINE(int, pfsync_carp_adj) = CARP_MAXSKEW; 305#define V_pfsync_carp_adj VNET(pfsync_carp_adj) 306 307static void pfsyncintr(void *); 308static int pfsync_multicast_setup(struct pfsync_softc *); 309static void pfsync_multicast_cleanup(struct pfsync_softc *); 310static int pfsync_init(void); 311static void pfsync_uninit(void); 312 313SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC"); 314SYSCTL_VNET_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_RW, 315 &VNET_NAME(pfsyncstats), pfsyncstats, 316 "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)"); 317SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_RW, 318 &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment"); 319#else 320struct pfsync_softc *pfsyncif = NULL; 321struct pfsyncstats pfsyncstats; 322#define V_pfsyncstats pfsyncstats 323#endif 324 325void pfsyncattach(int); 326#ifdef __FreeBSD__ 327int pfsync_clone_create(struct if_clone *, int, caddr_t); 328void pfsync_clone_destroy(struct ifnet *); 329#else 330int pfsync_clone_create(struct if_clone *, int); 331int pfsync_clone_destroy(struct ifnet *); 332#endif 333int pfsync_alloc_scrub_memory(struct pfsync_state_peer *, 334 struct pf_state_peer *); 335void pfsync_update_net_tdb(struct pfsync_tdb *); 336int pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *, 337#ifdef __FreeBSD__ 338 struct route *); 339#else 340 struct rtentry *); 341#endif 342int pfsyncioctl(struct ifnet *, u_long, caddr_t); 343void pfsyncstart(struct ifnet *); 344 345struct mbuf *pfsync_if_dequeue(struct ifnet *); 346 347void pfsync_deferred(struct pf_state *, int); 348void pfsync_undefer(struct pfsync_deferral *, int); 349void pfsync_defer_tmo(void *); 350 351void pfsync_request_update(u_int32_t, u_int64_t); 352void pfsync_update_state_req(struct pf_state *); 353 354void pfsync_drop(struct pfsync_softc *); 355void pfsync_sendout(void); 356void pfsync_send_plus(void *, size_t); 357void pfsync_timeout(void *); 358void pfsync_tdb_timeout(void *); 359 360void pfsync_bulk_start(void); 361void pfsync_bulk_status(u_int8_t); 362void pfsync_bulk_update(void *); 363void pfsync_bulk_fail(void *); 364 365#ifdef __FreeBSD__ 366/* XXX: ugly */ 367#define betoh64 (unsigned long long)be64toh 368#define timeout_del callout_stop 369#endif 370 371#define PFSYNC_MAX_BULKTRIES 12 372#ifndef __FreeBSD__ 373int pfsync_sync_ok; 374#endif 375 376#ifdef __FreeBSD__ 377VNET_DEFINE(struct ifc_simple_data, pfsync_cloner_data); 378VNET_DEFINE(struct if_clone, pfsync_cloner); 379#define V_pfsync_cloner_data VNET(pfsync_cloner_data) 380#define V_pfsync_cloner VNET(pfsync_cloner) 381IFC_SIMPLE_DECLARE(pfsync, 1); 382#else 383struct if_clone pfsync_cloner = 384 IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy); 385#endif 386 387void 388pfsyncattach(int npfsync) 389{ 390 if_clone_attach(&pfsync_cloner); 391} 392int 393#ifdef __FreeBSD__ 394pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param) 395#else 396pfsync_clone_create(struct if_clone *ifc, int unit) 397#endif 398{ 399 struct pfsync_softc *sc; 400 struct ifnet *ifp; 401 int q; 402 403 if (unit != 0) 404 return (EINVAL); 405 406#ifdef __FreeBSD__ 407 sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO); 408 sc->pfsync_sync_ok = 1; 409#else 410 pfsync_sync_ok = 1; 411 sc = malloc(sizeof(*pfsyncif), M_DEVBUF, M_NOWAIT | M_ZERO); 412#endif 413 414 for (q = 0; q < PFSYNC_S_COUNT; q++) 415 TAILQ_INIT(&sc->sc_qs[q]); 416 417#ifdef __FreeBSD__ 418 sc->sc_pool = uma_zcreate("pfsync", PFSYNC_PLSIZE, NULL, NULL, NULL, 419 NULL, UMA_ALIGN_PTR, 0); 420#else 421 pool_init(&sc->sc_pool, PFSYNC_PLSIZE, 0, 0, 0, "pfsync", NULL); 422#endif 423 TAILQ_INIT(&sc->sc_upd_req_list); 424 TAILQ_INIT(&sc->sc_deferrals); 425 sc->sc_deferred = 0; 426 427 TAILQ_INIT(&sc->sc_tdb_q); 428 429 sc->sc_len = PFSYNC_MINPKT; 430 sc->sc_maxupdates = 128; 431 432#ifndef __FreeBSD__ 433 sc->sc_imo.imo_membership = (struct in_multi **)malloc( 434 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_IPMOPTS, 435 M_WAITOK | M_ZERO); 436 sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS; 437#endif 438 439#ifdef __FreeBSD__ 440 ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC); 441 if (ifp == NULL) { 442 uma_zdestroy(sc->sc_pool); 443 free(sc, M_PFSYNC); 444 return (ENOSPC); 445 } 446 if_initname(ifp, ifc->ifc_name, unit); 447#else 448 ifp = &sc->sc_if; 449 snprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit); 450#endif 451 ifp->if_softc = sc; 452 ifp->if_ioctl = pfsyncioctl; 453 ifp->if_output = pfsyncoutput; 454 ifp->if_start = pfsyncstart; 455 ifp->if_type = IFT_PFSYNC; 456 ifp->if_snd.ifq_maxlen = ifqmaxlen; 457 ifp->if_hdrlen = sizeof(struct pfsync_header); 458 ifp->if_mtu = ETHERMTU; 459#ifdef __FreeBSD__ 460 callout_init(&sc->sc_tmo, CALLOUT_MPSAFE); 461 callout_init_mtx(&sc->sc_bulk_tmo, &pf_task_mtx, 0); 462 callout_init(&sc->sc_bulkfail_tmo, CALLOUT_MPSAFE); 463#else 464 timeout_set(&sc->sc_tmo, pfsync_timeout, sc); 465 timeout_set(&sc->sc_bulk_tmo, pfsync_bulk_update, sc); 466 timeout_set(&sc->sc_bulkfail_tmo, pfsync_bulk_fail, sc); 467#endif 468 469 if_attach(ifp); 470#ifndef __FreeBSD__ 471 if_alloc_sadl(ifp); 472 473#if NCARP > 0 474 if_addgroup(ifp, "carp"); 475#endif 476#endif 477 478#if NBPFILTER > 0 479#ifdef __FreeBSD__ 480 bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN); 481#else 482 bpfattach(&sc->sc_if.if_bpf, ifp, DLT_PFSYNC, PFSYNC_HDRLEN); 483#endif 484#endif 485 486#ifdef __FreeBSD__ 487 V_pfsyncif = sc; 488#else 489 pfsyncif = sc; 490#endif 491 492 return (0); 493} 494 495#ifdef __FreeBSD__ 496void 497#else 498int 499#endif 500pfsync_clone_destroy(struct ifnet *ifp) 501{ 502 struct pfsync_softc *sc = ifp->if_softc; 503 504#ifdef __FreeBSD__ 505 PF_LOCK(); 506#endif 507 timeout_del(&sc->sc_bulkfail_tmo); 508 timeout_del(&sc->sc_bulk_tmo); 509 timeout_del(&sc->sc_tmo); 510#ifdef __FreeBSD__ 511 PF_UNLOCK(); 512 if (!sc->pfsync_sync_ok && carp_demote_adj_p) 513 (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy"); 514#else 515#if NCARP > 0 516 if (!pfsync_sync_ok) 517 carp_group_demote_adj(&sc->sc_if, -1); 518#endif 519#endif 520#if NBPFILTER > 0 521 bpfdetach(ifp); 522#endif 523 if_detach(ifp); 524 525 pfsync_drop(sc); 526 527 while (sc->sc_deferred > 0) 528 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0); 529 530#ifdef __FreeBSD__ 531 UMA_DESTROY(sc->sc_pool); 532#else 533 pool_destroy(&sc->sc_pool); 534#endif 535#ifdef __FreeBSD__ 536 if_free(ifp); 537 if (sc->sc_imo.imo_membership) 538 pfsync_multicast_cleanup(sc); 539 free(sc, M_PFSYNC); 540#else 541 free(sc->sc_imo.imo_membership, M_IPMOPTS); 542 free(sc, M_DEVBUF); 543#endif 544 545#ifdef __FreeBSD__ 546 V_pfsyncif = NULL; 547#else 548 pfsyncif = NULL; 549#endif 550 551#ifndef __FreeBSD__ 552 return (0); 553#endif 554} 555 556struct mbuf * 557pfsync_if_dequeue(struct ifnet *ifp) 558{ 559 struct mbuf *m; 560#ifndef __FreeBSD__ 561 int s; 562#endif 563 564#ifdef __FreeBSD__ 565 IF_LOCK(&ifp->if_snd); 566 _IF_DROP(&ifp->if_snd); 567 _IF_DEQUEUE(&ifp->if_snd, m); 568 IF_UNLOCK(&ifp->if_snd); 569#else 570 s = splnet(); 571 IF_DEQUEUE(&ifp->if_snd, m); 572 splx(s); 573#endif 574 575 return (m); 576} 577 578/* 579 * Start output on the pfsync interface. 580 */ 581void 582pfsyncstart(struct ifnet *ifp) 583{ 584 struct mbuf *m; 585 586 while ((m = pfsync_if_dequeue(ifp)) != NULL) { 587#ifndef __FreeBSD__ 588 IF_DROP(&ifp->if_snd); 589#endif 590 m_freem(m); 591 } 592} 593 594int 595pfsync_alloc_scrub_memory(struct pfsync_state_peer *s, 596 struct pf_state_peer *d) 597{ 598 if (s->scrub.scrub_flag && d->scrub == NULL) { 599#ifdef __FreeBSD__ 600 d->scrub = pool_get(&V_pf_state_scrub_pl, PR_NOWAIT | PR_ZERO); 601#else 602 d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO); 603#endif 604 if (d->scrub == NULL) 605 return (ENOMEM); 606 } 607 608 return (0); 609} 610 611#ifndef __FreeBSD__ 612void 613pfsync_state_export(struct pfsync_state *sp, struct pf_state *st) 614{ 615 bzero(sp, sizeof(struct pfsync_state)); 616 617 /* copy from state key */ 618 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0]; 619 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1]; 620 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0]; 621 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1]; 622 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0]; 623 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1]; 624 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0]; 625 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1]; 626 sp->proto = st->key[PF_SK_WIRE]->proto; 627 sp->af = st->key[PF_SK_WIRE]->af; 628 629 /* copy from state */ 630 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname)); 631 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr)); 632 sp->creation = htonl(time_uptime - st->creation); 633 sp->expire = pf_state_expires(st); 634 if (sp->expire <= time_second) 635 sp->expire = htonl(0); 636 else 637 sp->expire = htonl(sp->expire - time_second); 638 639 sp->direction = st->direction; 640 sp->log = st->log; 641 sp->timeout = st->timeout; 642 sp->state_flags = st->state_flags; 643 if (st->src_node) 644 sp->sync_flags |= PFSYNC_FLAG_SRCNODE; 645 if (st->nat_src_node) 646 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; 647 648 bcopy(&st->id, &sp->id, sizeof(sp->id)); 649 sp->creatorid = st->creatorid; 650 pf_state_peer_hton(&st->src, &sp->src); 651 pf_state_peer_hton(&st->dst, &sp->dst); 652 653 if (st->rule.ptr == NULL) 654 sp->rule = htonl(-1); 655 else 656 sp->rule = htonl(st->rule.ptr->nr); 657 if (st->anchor.ptr == NULL) 658 sp->anchor = htonl(-1); 659 else 660 sp->anchor = htonl(st->anchor.ptr->nr); 661 if (st->nat_rule.ptr == NULL) 662 sp->nat_rule = htonl(-1); 663 else 664 sp->nat_rule = htonl(st->nat_rule.ptr->nr); 665 666 pf_state_counter_hton(st->packets[0], sp->packets[0]); 667 pf_state_counter_hton(st->packets[1], sp->packets[1]); 668 pf_state_counter_hton(st->bytes[0], sp->bytes[0]); 669 pf_state_counter_hton(st->bytes[1], sp->bytes[1]); 670 671} 672#endif 673 674int 675pfsync_state_import(struct pfsync_state *sp, u_int8_t flags) 676{ 677 struct pf_state *st = NULL; 678 struct pf_state_key *skw = NULL, *sks = NULL; 679 struct pf_rule *r = NULL; 680 struct pfi_kif *kif; 681 int pool_flags; 682 int error; 683 684#ifdef __FreeBSD__ 685 PF_LOCK_ASSERT(); 686 687 if (sp->creatorid == 0 && V_pf_status.debug >= PF_DEBUG_MISC) { 688#else 689 if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) { 690#endif 691 printf("pfsync_state_import: invalid creator id:" 692 " %08x\n", ntohl(sp->creatorid)); 693 return (EINVAL); 694 } 695 696 if ((kif = pfi_kif_get(sp->ifname)) == NULL) { 697#ifdef __FreeBSD__ 698 if (V_pf_status.debug >= PF_DEBUG_MISC) 699#else 700 if (pf_status.debug >= PF_DEBUG_MISC) 701#endif 702 printf("pfsync_state_import: " 703 "unknown interface: %s\n", sp->ifname); 704 if (flags & PFSYNC_SI_IOCTL) 705 return (EINVAL); 706 return (0); /* skip this state */ 707 } 708 709 /* 710 * If the ruleset checksums match or the state is coming from the ioctl, 711 * it's safe to associate the state with the rule of that number. 712 */ 713 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) && 714 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) < 715 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount) 716 r = pf_main_ruleset.rules[ 717 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)]; 718 else 719#ifdef __FreeBSD__ 720 r = &V_pf_default_rule; 721#else 722 r = &pf_default_rule; 723#endif 724 725 if ((r->max_states && r->states_cur >= r->max_states)) 726 goto cleanup; 727 728#ifdef __FreeBSD__ 729 if (flags & PFSYNC_SI_IOCTL) 730 pool_flags = PR_WAITOK | PR_ZERO; 731 else 732 pool_flags = PR_NOWAIT | PR_ZERO; 733 734 if ((st = pool_get(&V_pf_state_pl, pool_flags)) == NULL) 735 goto cleanup; 736#else 737 if (flags & PFSYNC_SI_IOCTL) 738 pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO; 739 else 740 pool_flags = PR_LIMITFAIL | PR_ZERO; 741 742 if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL) 743 goto cleanup; 744#endif 745 746 if ((skw = pf_alloc_state_key(pool_flags)) == NULL) 747 goto cleanup; 748 749 if (PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0], 750 &sp->key[PF_SK_STACK].addr[0], sp->af) || 751 PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1], 752 &sp->key[PF_SK_STACK].addr[1], sp->af) || 753 sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] || 754 sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1]) { 755 if ((sks = pf_alloc_state_key(pool_flags)) == NULL) 756 goto cleanup; 757 } else 758 sks = skw; 759 760 /* allocate memory for scrub info */ 761 if (pfsync_alloc_scrub_memory(&sp->src, &st->src) || 762 pfsync_alloc_scrub_memory(&sp->dst, &st->dst)) 763 goto cleanup; 764 765 /* copy to state key(s) */ 766 skw->addr[0] = sp->key[PF_SK_WIRE].addr[0]; 767 skw->addr[1] = sp->key[PF_SK_WIRE].addr[1]; 768 skw->port[0] = sp->key[PF_SK_WIRE].port[0]; 769 skw->port[1] = sp->key[PF_SK_WIRE].port[1]; 770 skw->proto = sp->proto; 771 skw->af = sp->af; 772 if (sks != skw) { 773 sks->addr[0] = sp->key[PF_SK_STACK].addr[0]; 774 sks->addr[1] = sp->key[PF_SK_STACK].addr[1]; 775 sks->port[0] = sp->key[PF_SK_STACK].port[0]; 776 sks->port[1] = sp->key[PF_SK_STACK].port[1]; 777 sks->proto = sp->proto; 778 sks->af = sp->af; 779 } 780 781 /* copy to state */ 782 bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr)); 783 st->creation = time_uptime - ntohl(sp->creation); 784 st->expire = time_second; 785 if (sp->expire) { 786 /* XXX No adaptive scaling. */ 787 st->expire -= r->timeout[sp->timeout] - ntohl(sp->expire); 788 } 789 790 st->expire = ntohl(sp->expire) + time_second; 791 st->direction = sp->direction; 792 st->log = sp->log; 793 st->timeout = sp->timeout; 794 st->state_flags = sp->state_flags; 795 796 bcopy(sp->id, &st->id, sizeof(st->id)); 797 st->creatorid = sp->creatorid; 798 pf_state_peer_ntoh(&sp->src, &st->src); 799 pf_state_peer_ntoh(&sp->dst, &st->dst); 800 801 st->rule.ptr = r; 802 st->nat_rule.ptr = NULL; 803 st->anchor.ptr = NULL; 804 st->rt_kif = NULL; 805 806 st->pfsync_time = time_uptime; 807 st->sync_state = PFSYNC_S_NONE; 808 809 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */ 810 r->states_cur++; 811 r->states_tot++; 812 813 if (!ISSET(flags, PFSYNC_SI_IOCTL)) 814 SET(st->state_flags, PFSTATE_NOSYNC); 815 816 if ((error = pf_state_insert(kif, skw, sks, st)) != 0) { 817 /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */ 818 r->states_cur--; 819 goto cleanup_state; 820 } 821 822 if (!ISSET(flags, PFSYNC_SI_IOCTL)) { 823 CLR(st->state_flags, PFSTATE_NOSYNC); 824 if (ISSET(st->state_flags, PFSTATE_ACK)) { 825 pfsync_q_ins(st, PFSYNC_S_IACK); 826#ifdef __FreeBSD__ 827 pfsync_sendout(); 828#else 829 schednetisr(NETISR_PFSYNC); 830#endif 831 } 832 } 833 CLR(st->state_flags, PFSTATE_ACK); 834 835 return (0); 836 837cleanup: 838 error = ENOMEM; 839 if (skw == sks) 840 sks = NULL; 841#ifdef __FreeBSD__ 842 if (skw != NULL) 843 pool_put(&V_pf_state_key_pl, skw); 844 if (sks != NULL) 845 pool_put(&V_pf_state_key_pl, sks); 846#else 847 if (skw != NULL) 848 pool_put(&pf_state_key_pl, skw); 849 if (sks != NULL) 850 pool_put(&pf_state_key_pl, sks); 851#endif 852 853cleanup_state: /* pf_state_insert frees the state keys */ 854 if (st) { 855#ifdef __FreeBSD__ 856 if (st->dst.scrub) 857 pool_put(&V_pf_state_scrub_pl, st->dst.scrub); 858 if (st->src.scrub) 859 pool_put(&V_pf_state_scrub_pl, st->src.scrub); 860 pool_put(&V_pf_state_pl, st); 861#else 862 if (st->dst.scrub) 863 pool_put(&pf_state_scrub_pl, st->dst.scrub); 864 if (st->src.scrub) 865 pool_put(&pf_state_scrub_pl, st->src.scrub); 866 pool_put(&pf_state_pl, st); 867#endif 868 } 869 return (error); 870} 871 872void 873#ifdef __FreeBSD__ 874pfsync_input(struct mbuf *m, __unused int off) 875#else 876pfsync_input(struct mbuf *m, ...) 877#endif 878{ 879#ifdef __FreeBSD__ 880 struct pfsync_softc *sc = V_pfsyncif; 881#else 882 struct pfsync_softc *sc = pfsyncif; 883#endif 884 struct pfsync_pkt pkt; 885 struct ip *ip = mtod(m, struct ip *); 886 struct pfsync_header *ph; 887 struct pfsync_subheader subh; 888 889 int offset; 890 int rv; 891 892 V_pfsyncstats.pfsyncs_ipackets++; 893 894 /* verify that we have a sync interface configured */ 895#ifdef __FreeBSD__ 896 if (!sc || !sc->sc_sync_if || !V_pf_status.running) 897#else 898 if (!sc || !sc->sc_sync_if || !pf_status.running) 899#endif 900 goto done; 901 902 /* verify that the packet came in on the right interface */ 903 if (sc->sc_sync_if != m->m_pkthdr.rcvif) { 904 V_pfsyncstats.pfsyncs_badif++; 905 goto done; 906 } 907 908#ifdef __FreeBSD__ 909 sc->sc_ifp->if_ipackets++; 910 sc->sc_ifp->if_ibytes += m->m_pkthdr.len; 911#else 912 sc->sc_if.if_ipackets++; 913 sc->sc_if.if_ibytes += m->m_pkthdr.len; 914#endif 915 /* verify that the IP TTL is 255. */ 916 if (ip->ip_ttl != PFSYNC_DFLTTL) { 917 V_pfsyncstats.pfsyncs_badttl++; 918 goto done; 919 } 920 921 offset = ip->ip_hl << 2; 922 if (m->m_pkthdr.len < offset + sizeof(*ph)) { 923 V_pfsyncstats.pfsyncs_hdrops++; 924 goto done; 925 } 926 927 if (offset + sizeof(*ph) > m->m_len) { 928 if (m_pullup(m, offset + sizeof(*ph)) == NULL) { 929 V_pfsyncstats.pfsyncs_hdrops++; 930 return; 931 } 932 ip = mtod(m, struct ip *); 933 } 934 ph = (struct pfsync_header *)((char *)ip + offset); 935 936 /* verify the version */ 937 if (ph->version != PFSYNC_VERSION) { 938 V_pfsyncstats.pfsyncs_badver++; 939 goto done; 940 } 941 942#if 0 943 if (pfsync_input_hmac(m, offset) != 0) { 944 /* XXX stats */ 945 goto done; 946 } 947#endif 948 949 /* Cheaper to grab this now than having to mess with mbufs later */ 950 pkt.ip = ip; 951 pkt.src = ip->ip_src; 952 pkt.flags = 0; 953 954#ifdef __FreeBSD__ 955 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) 956#else 957 if (!bcmp(&ph->pfcksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH)) 958#endif 959 pkt.flags |= PFSYNC_SI_CKSUM; 960 961 offset += sizeof(*ph); 962 for (;;) { 963 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh); 964 offset += sizeof(subh); 965 966 if (subh.action >= PFSYNC_ACT_MAX) { 967 V_pfsyncstats.pfsyncs_badact++; 968 goto done; 969 } 970 971 rv = (*pfsync_acts[subh.action])(&pkt, m, offset, 972 ntohs(subh.count)); 973 if (rv == -1) 974 return; 975 976 offset += rv; 977 } 978 979done: 980 m_freem(m); 981} 982 983int 984pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 985{ 986 struct pfsync_clr *clr; 987 struct mbuf *mp; 988 int len = sizeof(*clr) * count; 989 int i, offp; 990 991 struct pf_state *st, *nexts; 992 struct pf_state_key *sk, *nextsk; 993 struct pf_state_item *si; 994 u_int32_t creatorid; 995 int s; 996 997 mp = m_pulldown(m, offset, len, &offp); 998 if (mp == NULL) { 999 V_pfsyncstats.pfsyncs_badlen++; 1000 return (-1); 1001 } 1002 clr = (struct pfsync_clr *)(mp->m_data + offp); 1003 1004 s = splsoftnet(); 1005#ifdef __FreeBSD__ 1006 PF_LOCK(); 1007#endif 1008 for (i = 0; i < count; i++) { 1009 creatorid = clr[i].creatorid; 1010 1011 if (clr[i].ifname[0] == '\0') { 1012#ifdef __FreeBSD__ 1013 for (st = RB_MIN(pf_state_tree_id, &V_tree_id); 1014 st; st = nexts) { 1015 nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, st); 1016#else 1017 for (st = RB_MIN(pf_state_tree_id, &tree_id); 1018 st; st = nexts) { 1019 nexts = RB_NEXT(pf_state_tree_id, &tree_id, st); 1020#endif 1021 if (st->creatorid == creatorid) { 1022 SET(st->state_flags, PFSTATE_NOSYNC); 1023 pf_unlink_state(st); 1024 } 1025 } 1026 } else { 1027 if (pfi_kif_get(clr[i].ifname) == NULL) 1028 continue; 1029 1030 /* XXX correct? */ 1031#ifdef __FreeBSD__ 1032 for (sk = RB_MIN(pf_state_tree, &V_pf_statetbl); 1033#else 1034 for (sk = RB_MIN(pf_state_tree, &pf_statetbl); 1035#endif 1036 sk; sk = nextsk) { 1037 nextsk = RB_NEXT(pf_state_tree, 1038#ifdef __FreeBSD__ 1039 &V_pf_statetbl, sk); 1040#else 1041 &pf_statetbl, sk); 1042#endif 1043 TAILQ_FOREACH(si, &sk->states, entry) { 1044 if (si->s->creatorid == creatorid) { 1045 SET(si->s->state_flags, 1046 PFSTATE_NOSYNC); 1047 pf_unlink_state(si->s); 1048 } 1049 } 1050 } 1051 } 1052 } 1053#ifdef __FreeBSD__ 1054 PF_UNLOCK(); 1055#endif 1056 splx(s); 1057 1058 return (len); 1059} 1060 1061int 1062pfsync_in_ins(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1063{ 1064 struct mbuf *mp; 1065 struct pfsync_state *sa, *sp; 1066 int len = sizeof(*sp) * count; 1067 int i, offp; 1068 1069 int s; 1070 1071 mp = m_pulldown(m, offset, len, &offp); 1072 if (mp == NULL) { 1073 V_pfsyncstats.pfsyncs_badlen++; 1074 return (-1); 1075 } 1076 sa = (struct pfsync_state *)(mp->m_data + offp); 1077 1078 s = splsoftnet(); 1079#ifdef __FreeBSD__ 1080 PF_LOCK(); 1081#endif 1082 for (i = 0; i < count; i++) { 1083 sp = &sa[i]; 1084 1085 /* check for invalid values */ 1086 if (sp->timeout >= PFTM_MAX || 1087 sp->src.state > PF_TCPS_PROXY_DST || 1088 sp->dst.state > PF_TCPS_PROXY_DST || 1089 sp->direction > PF_OUT || 1090 (sp->af != AF_INET && sp->af != AF_INET6)) { 1091#ifdef __FreeBSD__ 1092 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1093#else 1094 if (pf_status.debug >= PF_DEBUG_MISC) { 1095#endif 1096 printf("pfsync_input: PFSYNC5_ACT_INS: " 1097 "invalid value\n"); 1098 } 1099 V_pfsyncstats.pfsyncs_badval++; 1100 continue; 1101 } 1102 1103 if (pfsync_state_import(sp, pkt->flags) == ENOMEM) { 1104 /* drop out, but process the rest of the actions */ 1105 break; 1106 } 1107 } 1108#ifdef __FreeBSD__ 1109 PF_UNLOCK(); 1110#endif 1111 splx(s); 1112 1113 return (len); 1114} 1115 1116int 1117pfsync_in_iack(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1118{ 1119 struct pfsync_ins_ack *ia, *iaa; 1120 struct pf_state_cmp id_key; 1121 struct pf_state *st; 1122 1123 struct mbuf *mp; 1124 int len = count * sizeof(*ia); 1125 int offp, i; 1126 int s; 1127 1128 mp = m_pulldown(m, offset, len, &offp); 1129 if (mp == NULL) { 1130 V_pfsyncstats.pfsyncs_badlen++; 1131 return (-1); 1132 } 1133 iaa = (struct pfsync_ins_ack *)(mp->m_data + offp); 1134 1135 s = splsoftnet(); 1136#ifdef __FreeBSD__ 1137 PF_LOCK(); 1138#endif 1139 for (i = 0; i < count; i++) { 1140 ia = &iaa[i]; 1141 1142 bcopy(&ia->id, &id_key.id, sizeof(id_key.id)); 1143 id_key.creatorid = ia->creatorid; 1144 1145 st = pf_find_state_byid(&id_key); 1146 if (st == NULL) 1147 continue; 1148 1149 if (ISSET(st->state_flags, PFSTATE_ACK)) 1150 pfsync_deferred(st, 0); 1151 } 1152#ifdef __FreeBSD__ 1153 PF_UNLOCK(); 1154#endif 1155 splx(s); 1156 /* 1157 * XXX this is not yet implemented, but we know the size of the 1158 * message so we can skip it. 1159 */ 1160 1161 return (count * sizeof(struct pfsync_ins_ack)); 1162} 1163 1164int 1165pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src, 1166 struct pfsync_state_peer *dst) 1167{ 1168 int sfail = 0; 1169 1170 /* 1171 * The state should never go backwards except 1172 * for syn-proxy states. Neither should the 1173 * sequence window slide backwards. 1174 */ 1175 if (st->src.state > src->state && 1176 (st->src.state < PF_TCPS_PROXY_SRC || 1177 src->state >= PF_TCPS_PROXY_SRC)) 1178 sfail = 1; 1179 else if (SEQ_GT(st->src.seqlo, ntohl(src->seqlo))) 1180 sfail = 3; 1181 else if (st->dst.state > dst->state) { 1182 /* There might still be useful 1183 * information about the src state here, 1184 * so import that part of the update, 1185 * then "fail" so we send the updated 1186 * state back to the peer who is missing 1187 * our what we know. */ 1188 pf_state_peer_ntoh(src, &st->src); 1189 /* XXX do anything with timeouts? */ 1190 sfail = 7; 1191 } else if (st->dst.state >= TCPS_SYN_SENT && 1192 SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo))) 1193 sfail = 4; 1194 1195 return (sfail); 1196} 1197 1198int 1199pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1200{ 1201 struct pfsync_state *sa, *sp; 1202 struct pf_state_cmp id_key; 1203 struct pf_state_key *sk; 1204 struct pf_state *st; 1205 int sfail; 1206 1207 struct mbuf *mp; 1208 int len = count * sizeof(*sp); 1209 int offp, i; 1210 int s; 1211 1212 mp = m_pulldown(m, offset, len, &offp); 1213 if (mp == NULL) { 1214 V_pfsyncstats.pfsyncs_badlen++; 1215 return (-1); 1216 } 1217 sa = (struct pfsync_state *)(mp->m_data + offp); 1218 1219 s = splsoftnet(); 1220#ifdef __FreeBSD__ 1221 PF_LOCK(); 1222#endif 1223 for (i = 0; i < count; i++) { 1224 sp = &sa[i]; 1225 1226 /* check for invalid values */ 1227 if (sp->timeout >= PFTM_MAX || 1228 sp->src.state > PF_TCPS_PROXY_DST || 1229 sp->dst.state > PF_TCPS_PROXY_DST) { 1230#ifdef __FreeBSD__ 1231 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1232#else 1233 if (pf_status.debug >= PF_DEBUG_MISC) { 1234#endif 1235 printf("pfsync_input: PFSYNC_ACT_UPD: " 1236 "invalid value\n"); 1237 } 1238 V_pfsyncstats.pfsyncs_badval++; 1239 continue; 1240 } 1241 1242 bcopy(sp->id, &id_key.id, sizeof(id_key.id)); 1243 id_key.creatorid = sp->creatorid; 1244 1245 st = pf_find_state_byid(&id_key); 1246 if (st == NULL) { 1247 /* insert the update */ 1248 if (pfsync_state_import(sp, 0)) 1249 V_pfsyncstats.pfsyncs_badstate++; 1250 continue; 1251 } 1252 1253 if (ISSET(st->state_flags, PFSTATE_ACK)) 1254 pfsync_deferred(st, 1); 1255 1256 sk = st->key[PF_SK_WIRE]; /* XXX right one? */ 1257 sfail = 0; 1258 if (sk->proto == IPPROTO_TCP) 1259 sfail = pfsync_upd_tcp(st, &sp->src, &sp->dst); 1260 else { 1261 /* 1262 * Non-TCP protocol state machine always go 1263 * forwards 1264 */ 1265 if (st->src.state > sp->src.state) 1266 sfail = 5; 1267 else if (st->dst.state > sp->dst.state) 1268 sfail = 6; 1269 } 1270 1271 if (sfail) { 1272#ifdef __FreeBSD__ 1273 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1274#else 1275 if (pf_status.debug >= PF_DEBUG_MISC) { 1276#endif 1277 printf("pfsync: %s stale update (%d)" 1278 " id: %016llx creatorid: %08x\n", 1279 (sfail < 7 ? "ignoring" : "partial"), 1280 sfail, betoh64(st->id), 1281 ntohl(st->creatorid)); 1282 } 1283 V_pfsyncstats.pfsyncs_stale++; 1284 1285 pfsync_update_state(st); 1286#ifdef __FreeBSD__ 1287 pfsync_sendout(); 1288#else 1289 schednetisr(NETISR_PFSYNC); 1290#endif 1291 continue; 1292 } 1293 pfsync_alloc_scrub_memory(&sp->dst, &st->dst); 1294 pf_state_peer_ntoh(&sp->src, &st->src); 1295 pf_state_peer_ntoh(&sp->dst, &st->dst); 1296 st->expire = ntohl(sp->expire) + time_second; 1297 st->timeout = sp->timeout; 1298 st->pfsync_time = time_uptime; 1299 } 1300#ifdef __FreeBSD__ 1301 PF_UNLOCK(); 1302#endif 1303 splx(s); 1304 1305 return (len); 1306} 1307 1308int 1309pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1310{ 1311 struct pfsync_upd_c *ua, *up; 1312 struct pf_state_key *sk; 1313 struct pf_state_cmp id_key; 1314 struct pf_state *st; 1315 1316 int len = count * sizeof(*up); 1317 int sfail; 1318 1319 struct mbuf *mp; 1320 int offp, i; 1321 int s; 1322 1323 mp = m_pulldown(m, offset, len, &offp); 1324 if (mp == NULL) { 1325 V_pfsyncstats.pfsyncs_badlen++; 1326 return (-1); 1327 } 1328 ua = (struct pfsync_upd_c *)(mp->m_data + offp); 1329 1330 s = splsoftnet(); 1331#ifdef __FreeBSD__ 1332 PF_LOCK(); 1333#endif 1334 for (i = 0; i < count; i++) { 1335 up = &ua[i]; 1336 1337 /* check for invalid values */ 1338 if (up->timeout >= PFTM_MAX || 1339 up->src.state > PF_TCPS_PROXY_DST || 1340 up->dst.state > PF_TCPS_PROXY_DST) { 1341#ifdef __FreeBSD__ 1342 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1343#else 1344 if (pf_status.debug >= PF_DEBUG_MISC) { 1345#endif 1346 printf("pfsync_input: " 1347 "PFSYNC_ACT_UPD_C: " 1348 "invalid value\n"); 1349 } 1350 V_pfsyncstats.pfsyncs_badval++; 1351 continue; 1352 } 1353 1354 bcopy(&up->id, &id_key.id, sizeof(id_key.id)); 1355 id_key.creatorid = up->creatorid; 1356 1357 st = pf_find_state_byid(&id_key); 1358 if (st == NULL) { 1359 /* We don't have this state. Ask for it. */ 1360 pfsync_request_update(id_key.creatorid, id_key.id); 1361 continue; 1362 } 1363 1364 if (ISSET(st->state_flags, PFSTATE_ACK)) 1365 pfsync_deferred(st, 1); 1366 1367 sk = st->key[PF_SK_WIRE]; /* XXX right one? */ 1368 sfail = 0; 1369 if (sk->proto == IPPROTO_TCP) 1370 sfail = pfsync_upd_tcp(st, &up->src, &up->dst); 1371 else { 1372 /* 1373 * Non-TCP protocol state machine always go forwards 1374 */ 1375 if (st->src.state > up->src.state) 1376 sfail = 5; 1377 else if (st->dst.state > up->dst.state) 1378 sfail = 6; 1379 } 1380 1381 if (sfail) { 1382#ifdef __FreeBSD__ 1383 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1384#else 1385 if (pf_status.debug >= PF_DEBUG_MISC) { 1386#endif 1387 printf("pfsync: ignoring stale update " 1388 "(%d) id: %016llx " 1389 "creatorid: %08x\n", sfail, 1390 betoh64(st->id), 1391 ntohl(st->creatorid)); 1392 } 1393 V_pfsyncstats.pfsyncs_stale++; 1394 1395 pfsync_update_state(st); 1396#ifdef __FreeBSD__ 1397 pfsync_sendout(); 1398#else 1399 schednetisr(NETISR_PFSYNC); 1400#endif 1401 continue; 1402 } 1403 pfsync_alloc_scrub_memory(&up->dst, &st->dst); 1404 pf_state_peer_ntoh(&up->src, &st->src); 1405 pf_state_peer_ntoh(&up->dst, &st->dst); 1406 st->expire = ntohl(up->expire) + time_second; 1407 st->timeout = up->timeout; 1408 st->pfsync_time = time_uptime; 1409 } 1410#ifdef __FreeBSD__ 1411 PF_UNLOCK(); 1412#endif 1413 splx(s); 1414 1415 return (len); 1416} 1417 1418int 1419pfsync_in_ureq(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1420{ 1421 struct pfsync_upd_req *ur, *ura; 1422 struct mbuf *mp; 1423 int len = count * sizeof(*ur); 1424 int i, offp; 1425 1426 struct pf_state_cmp id_key; 1427 struct pf_state *st; 1428 1429 mp = m_pulldown(m, offset, len, &offp); 1430 if (mp == NULL) { 1431 V_pfsyncstats.pfsyncs_badlen++; 1432 return (-1); 1433 } 1434 ura = (struct pfsync_upd_req *)(mp->m_data + offp); 1435 1436#ifdef __FreeBSD__ 1437 PF_LOCK(); 1438#endif 1439 for (i = 0; i < count; i++) { 1440 ur = &ura[i]; 1441 1442 bcopy(&ur->id, &id_key.id, sizeof(id_key.id)); 1443 id_key.creatorid = ur->creatorid; 1444 1445 if (id_key.id == 0 && id_key.creatorid == 0) 1446 pfsync_bulk_start(); 1447 else { 1448 st = pf_find_state_byid(&id_key); 1449 if (st == NULL) { 1450 V_pfsyncstats.pfsyncs_badstate++; 1451 continue; 1452 } 1453 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) 1454 continue; 1455 1456 pfsync_update_state_req(st); 1457 } 1458 } 1459#ifdef __FreeBSD__ 1460 PF_UNLOCK(); 1461#endif 1462 1463 return (len); 1464} 1465 1466int 1467pfsync_in_del(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1468{ 1469 struct mbuf *mp; 1470 struct pfsync_state *sa, *sp; 1471 struct pf_state_cmp id_key; 1472 struct pf_state *st; 1473 int len = count * sizeof(*sp); 1474 int offp, i; 1475 int s; 1476 1477 mp = m_pulldown(m, offset, len, &offp); 1478 if (mp == NULL) { 1479 V_pfsyncstats.pfsyncs_badlen++; 1480 return (-1); 1481 } 1482 sa = (struct pfsync_state *)(mp->m_data + offp); 1483 1484 s = splsoftnet(); 1485#ifdef __FreeBSD__ 1486 PF_LOCK(); 1487#endif 1488 for (i = 0; i < count; i++) { 1489 sp = &sa[i]; 1490 1491 bcopy(sp->id, &id_key.id, sizeof(id_key.id)); 1492 id_key.creatorid = sp->creatorid; 1493 1494 st = pf_find_state_byid(&id_key); 1495 if (st == NULL) { 1496 V_pfsyncstats.pfsyncs_badstate++; 1497 continue; 1498 } 1499 SET(st->state_flags, PFSTATE_NOSYNC); 1500 pf_unlink_state(st); 1501 } 1502#ifdef __FreeBSD__ 1503 PF_UNLOCK(); 1504#endif 1505 splx(s); 1506 1507 return (len); 1508} 1509 1510int 1511pfsync_in_del_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1512{ 1513 struct mbuf *mp; 1514 struct pfsync_del_c *sa, *sp; 1515 struct pf_state_cmp id_key; 1516 struct pf_state *st; 1517 int len = count * sizeof(*sp); 1518 int offp, i; 1519 int s; 1520 1521 mp = m_pulldown(m, offset, len, &offp); 1522 if (mp == NULL) { 1523 V_pfsyncstats.pfsyncs_badlen++; 1524 return (-1); 1525 } 1526 sa = (struct pfsync_del_c *)(mp->m_data + offp); 1527 1528 s = splsoftnet(); 1529#ifdef __FreeBSD__ 1530 PF_LOCK(); 1531#endif 1532 for (i = 0; i < count; i++) { 1533 sp = &sa[i]; 1534 1535 bcopy(&sp->id, &id_key.id, sizeof(id_key.id)); 1536 id_key.creatorid = sp->creatorid; 1537 1538 st = pf_find_state_byid(&id_key); 1539 if (st == NULL) { 1540 V_pfsyncstats.pfsyncs_badstate++; 1541 continue; 1542 } 1543 1544 SET(st->state_flags, PFSTATE_NOSYNC); 1545 pf_unlink_state(st); 1546 } 1547#ifdef __FreeBSD__ 1548 PF_UNLOCK(); 1549#endif 1550 splx(s); 1551 1552 return (len); 1553} 1554 1555int 1556pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1557{ 1558#ifdef __FreeBSD__ 1559 struct pfsync_softc *sc = V_pfsyncif; 1560#else 1561 struct pfsync_softc *sc = pfsyncif; 1562#endif 1563 struct pfsync_bus *bus; 1564 struct mbuf *mp; 1565 int len = count * sizeof(*bus); 1566 int offp; 1567 1568 /* If we're not waiting for a bulk update, who cares. */ 1569 if (sc->sc_ureq_sent == 0) 1570 return (len); 1571 1572 mp = m_pulldown(m, offset, len, &offp); 1573 if (mp == NULL) { 1574 V_pfsyncstats.pfsyncs_badlen++; 1575 return (-1); 1576 } 1577 bus = (struct pfsync_bus *)(mp->m_data + offp); 1578 1579 switch (bus->status) { 1580 case PFSYNC_BUS_START: 1581#ifdef __FreeBSD__ 1582 callout_reset(&sc->sc_bulkfail_tmo, 4 * hz + 1583 V_pf_pool_limits[PF_LIMIT_STATES].limit / 1584 ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) / 1585 sizeof(struct pfsync_state)), 1586 pfsync_bulk_fail, V_pfsyncif); 1587#else 1588 timeout_add(&sc->sc_bulkfail_tmo, 4 * hz + 1589 pf_pool_limits[PF_LIMIT_STATES].limit / 1590 ((sc->sc_if.if_mtu - PFSYNC_MINPKT) / 1591 sizeof(struct pfsync_state))); 1592#endif 1593#ifdef __FreeBSD__ 1594 if (V_pf_status.debug >= PF_DEBUG_MISC) 1595#else 1596 if (pf_status.debug >= PF_DEBUG_MISC) 1597#endif 1598 printf("pfsync: received bulk update start\n"); 1599 break; 1600 1601 case PFSYNC_BUS_END: 1602 if (time_uptime - ntohl(bus->endtime) >= 1603 sc->sc_ureq_sent) { 1604 /* that's it, we're happy */ 1605 sc->sc_ureq_sent = 0; 1606 sc->sc_bulk_tries = 0; 1607 timeout_del(&sc->sc_bulkfail_tmo); 1608#ifdef __FreeBSD__ 1609 if (!sc->pfsync_sync_ok && carp_demote_adj_p) 1610 (*carp_demote_adj_p)(-V_pfsync_carp_adj, 1611 "pfsync bulk done"); 1612 sc->pfsync_sync_ok = 1; 1613#else 1614#if NCARP > 0 1615 if (!pfsync_sync_ok) 1616 carp_group_demote_adj(&sc->sc_if, -1); 1617#endif 1618 pfsync_sync_ok = 1; 1619#endif 1620#ifdef __FreeBSD__ 1621 if (V_pf_status.debug >= PF_DEBUG_MISC) 1622#else 1623 if (pf_status.debug >= PF_DEBUG_MISC) 1624#endif 1625 printf("pfsync: received valid " 1626 "bulk update end\n"); 1627 } else { 1628#ifdef __FreeBSD__ 1629 if (V_pf_status.debug >= PF_DEBUG_MISC) 1630#else 1631 if (pf_status.debug >= PF_DEBUG_MISC) 1632#endif 1633 printf("pfsync: received invalid " 1634 "bulk update end: bad timestamp\n"); 1635 } 1636 break; 1637 } 1638 1639 return (len); 1640} 1641 1642int 1643pfsync_in_tdb(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1644{ 1645 int len = count * sizeof(struct pfsync_tdb); 1646 1647#if defined(IPSEC) 1648 struct pfsync_tdb *tp; 1649 struct mbuf *mp; 1650 int offp; 1651 int i; 1652 int s; 1653 1654 mp = m_pulldown(m, offset, len, &offp); 1655 if (mp == NULL) { 1656 V_pfsyncstats.pfsyncs_badlen++; 1657 return (-1); 1658 } 1659 tp = (struct pfsync_tdb *)(mp->m_data + offp); 1660 1661 s = splsoftnet(); 1662#ifdef __FreeBSD__ 1663 PF_LOCK(); 1664#endif 1665 for (i = 0; i < count; i++) 1666 pfsync_update_net_tdb(&tp[i]); 1667#ifdef __FreeBSD__ 1668 PF_UNLOCK(); 1669#endif 1670 splx(s); 1671#endif 1672 1673 return (len); 1674} 1675 1676#if defined(IPSEC) 1677/* Update an in-kernel tdb. Silently fail if no tdb is found. */ 1678void 1679pfsync_update_net_tdb(struct pfsync_tdb *pt) 1680{ 1681 struct tdb *tdb; 1682 int s; 1683 1684 /* check for invalid values */ 1685 if (ntohl(pt->spi) <= SPI_RESERVED_MAX || 1686 (pt->dst.sa.sa_family != AF_INET && 1687 pt->dst.sa.sa_family != AF_INET6)) 1688 goto bad; 1689 1690 s = spltdb(); 1691 tdb = gettdb(pt->spi, &pt->dst, pt->sproto); 1692 if (tdb) { 1693 pt->rpl = ntohl(pt->rpl); 1694 pt->cur_bytes = betoh64(pt->cur_bytes); 1695 1696 /* Neither replay nor byte counter should ever decrease. */ 1697 if (pt->rpl < tdb->tdb_rpl || 1698 pt->cur_bytes < tdb->tdb_cur_bytes) { 1699 splx(s); 1700 goto bad; 1701 } 1702 1703 tdb->tdb_rpl = pt->rpl; 1704 tdb->tdb_cur_bytes = pt->cur_bytes; 1705 } 1706 splx(s); 1707 return; 1708 1709bad: 1710#ifdef __FreeBSD__ 1711 if (V_pf_status.debug >= PF_DEBUG_MISC) 1712#else 1713 if (pf_status.debug >= PF_DEBUG_MISC) 1714#endif 1715 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: " 1716 "invalid value\n"); 1717 V_pfsyncstats.pfsyncs_badstate++; 1718 return; 1719} 1720#endif 1721 1722 1723int 1724pfsync_in_eof(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1725{ 1726 /* check if we are at the right place in the packet */ 1727 if (offset != m->m_pkthdr.len - sizeof(struct pfsync_eof)) 1728 V_pfsyncstats.pfsyncs_badact++; 1729 1730 /* we're done. free and let the caller return */ 1731 m_freem(m); 1732 return (-1); 1733} 1734 1735int 1736pfsync_in_error(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count) 1737{ 1738 V_pfsyncstats.pfsyncs_badact++; 1739 1740 m_freem(m); 1741 return (-1); 1742} 1743 1744int 1745pfsyncoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, 1746#ifdef __FreeBSD__ 1747 struct route *rt) 1748#else 1749 struct rtentry *rt) 1750#endif 1751{ 1752 m_freem(m); 1753 return (0); 1754} 1755 1756/* ARGSUSED */ 1757int 1758pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1759{ 1760#ifndef __FreeBSD__ 1761 struct proc *p = curproc; 1762#endif 1763 struct pfsync_softc *sc = ifp->if_softc; 1764 struct ifreq *ifr = (struct ifreq *)data; 1765 struct ip_moptions *imo = &sc->sc_imo; 1766 struct pfsyncreq pfsyncr; 1767 struct ifnet *sifp; 1768 struct ip *ip; 1769 int s, error; 1770 1771 switch (cmd) { 1772#if 0 1773 case SIOCSIFADDR: 1774 case SIOCAIFADDR: 1775 case SIOCSIFDSTADDR: 1776#endif 1777 case SIOCSIFFLAGS: 1778#ifdef __FreeBSD__ 1779 if (ifp->if_flags & IFF_UP) 1780 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1781 else 1782 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1783#else 1784 if (ifp->if_flags & IFF_UP) 1785 ifp->if_flags |= IFF_RUNNING; 1786 else 1787 ifp->if_flags &= ~IFF_RUNNING; 1788#endif 1789 break; 1790 case SIOCSIFMTU: 1791 if (!sc->sc_sync_if || 1792 ifr->ifr_mtu <= PFSYNC_MINPKT || 1793 ifr->ifr_mtu > sc->sc_sync_if->if_mtu) 1794 return (EINVAL); 1795 if (ifr->ifr_mtu < ifp->if_mtu) { 1796 s = splnet(); 1797#ifdef __FreeBSD__ 1798 PF_LOCK(); 1799#endif 1800 pfsync_sendout(); 1801#ifdef __FreeBSD__ 1802 PF_UNLOCK(); 1803#endif 1804 splx(s); 1805 } 1806 ifp->if_mtu = ifr->ifr_mtu; 1807 break; 1808 case SIOCGETPFSYNC: 1809 bzero(&pfsyncr, sizeof(pfsyncr)); 1810 if (sc->sc_sync_if) { 1811 strlcpy(pfsyncr.pfsyncr_syncdev, 1812 sc->sc_sync_if->if_xname, IFNAMSIZ); 1813 } 1814 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer; 1815 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates; 1816 return (copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr))); 1817 1818 case SIOCSETPFSYNC: 1819#ifdef __FreeBSD__ 1820 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0) 1821#else 1822 if ((error = suser(p, p->p_acflag)) != 0) 1823#endif 1824 return (error); 1825 if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr)))) 1826 return (error); 1827 1828#ifdef __FreeBSD__ 1829 PF_LOCK(); 1830#endif 1831 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0) 1832#ifdef __FreeBSD__ 1833 sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP); 1834#else 1835 sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP; 1836#endif 1837 else 1838 sc->sc_sync_peer.s_addr = 1839 pfsyncr.pfsyncr_syncpeer.s_addr; 1840 1841 if (pfsyncr.pfsyncr_maxupdates > 255) 1842#ifdef __FreeBSD__ 1843 { 1844 PF_UNLOCK(); 1845#endif 1846 return (EINVAL); 1847#ifdef __FreeBSD__ 1848 } 1849#endif 1850 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates; 1851 1852 if (pfsyncr.pfsyncr_syncdev[0] == 0) { 1853 sc->sc_sync_if = NULL; 1854#ifdef __FreeBSD__ 1855 PF_UNLOCK(); 1856 if (imo->imo_membership) 1857 pfsync_multicast_cleanup(sc); 1858#else 1859 if (imo->imo_num_memberships > 0) { 1860 in_delmulti(imo->imo_membership[ 1861 --imo->imo_num_memberships]); 1862 imo->imo_multicast_ifp = NULL; 1863 } 1864#endif 1865 break; 1866 } 1867 1868#ifdef __FreeBSD__ 1869 PF_UNLOCK(); 1870#endif 1871 if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL) 1872 return (EINVAL); 1873 1874#ifdef __FreeBSD__ 1875 PF_LOCK(); 1876#endif 1877 s = splnet(); 1878#ifdef __FreeBSD__ 1879 if (sifp->if_mtu < sc->sc_ifp->if_mtu || 1880#else 1881 if (sifp->if_mtu < sc->sc_if.if_mtu || 1882#endif 1883 (sc->sc_sync_if != NULL && 1884 sifp->if_mtu < sc->sc_sync_if->if_mtu) || 1885 sifp->if_mtu < MCLBYTES - sizeof(struct ip)) 1886 pfsync_sendout(); 1887 sc->sc_sync_if = sifp; 1888 1889#ifdef __FreeBSD__ 1890 if (imo->imo_membership) { 1891 PF_UNLOCK(); 1892 pfsync_multicast_cleanup(sc); 1893 PF_LOCK(); 1894 } 1895#else 1896 if (imo->imo_num_memberships > 0) { 1897 in_delmulti(imo->imo_membership[--imo->imo_num_memberships]); 1898 imo->imo_multicast_ifp = NULL; 1899 } 1900#endif 1901 1902#ifdef __FreeBSD__ 1903 if (sc->sc_sync_if && 1904 sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) { 1905 PF_UNLOCK(); 1906 error = pfsync_multicast_setup(sc); 1907 if (error) 1908 return (error); 1909 PF_LOCK(); 1910 } 1911#else 1912 if (sc->sc_sync_if && 1913 sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) { 1914 struct in_addr addr; 1915 1916 if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) { 1917 sc->sc_sync_if = NULL; 1918 splx(s); 1919 return (EADDRNOTAVAIL); 1920 } 1921 1922 addr.s_addr = INADDR_PFSYNC_GROUP; 1923 1924 if ((imo->imo_membership[0] = 1925 in_addmulti(&addr, sc->sc_sync_if)) == NULL) { 1926 sc->sc_sync_if = NULL; 1927 splx(s); 1928 return (ENOBUFS); 1929 } 1930 imo->imo_num_memberships++; 1931 imo->imo_multicast_ifp = sc->sc_sync_if; 1932 imo->imo_multicast_ttl = PFSYNC_DFLTTL; 1933 imo->imo_multicast_loop = 0; 1934 } 1935#endif /* !__FreeBSD__ */ 1936 1937 ip = &sc->sc_template; 1938 bzero(ip, sizeof(*ip)); 1939 ip->ip_v = IPVERSION; 1940 ip->ip_hl = sizeof(sc->sc_template) >> 2; 1941 ip->ip_tos = IPTOS_LOWDELAY; 1942 /* len and id are set later */ 1943#ifdef __FreeBSD__ 1944 ip->ip_off = IP_DF; 1945#else 1946 ip->ip_off = htons(IP_DF); 1947#endif 1948 ip->ip_ttl = PFSYNC_DFLTTL; 1949 ip->ip_p = IPPROTO_PFSYNC; 1950 ip->ip_src.s_addr = INADDR_ANY; 1951 ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr; 1952 1953 if (sc->sc_sync_if) { 1954 /* Request a full state table update. */ 1955 sc->sc_ureq_sent = time_uptime; 1956#ifdef __FreeBSD__ 1957 if (sc->pfsync_sync_ok && carp_demote_adj_p) 1958 (*carp_demote_adj_p)(V_pfsync_carp_adj, 1959 "pfsync bulk start"); 1960 sc->pfsync_sync_ok = 0; 1961#else 1962#if NCARP > 0 1963 if (pfsync_sync_ok) 1964 carp_group_demote_adj(&sc->sc_if, 1); 1965#endif 1966 pfsync_sync_ok = 0; 1967#endif 1968#ifdef __FreeBSD__ 1969 if (V_pf_status.debug >= PF_DEBUG_MISC) 1970#else 1971 if (pf_status.debug >= PF_DEBUG_MISC) 1972#endif 1973 printf("pfsync: requesting bulk update\n"); 1974#ifdef __FreeBSD__ 1975 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, 1976 pfsync_bulk_fail, V_pfsyncif); 1977#else 1978 timeout_add_sec(&sc->sc_bulkfail_tmo, 5); 1979#endif 1980 pfsync_request_update(0, 0); 1981 } 1982#ifdef __FreeBSD__ 1983 PF_UNLOCK(); 1984#endif 1985 splx(s); 1986 1987 break; 1988 1989 default: 1990 return (ENOTTY); 1991 } 1992 1993 return (0); 1994} 1995 1996int 1997pfsync_out_state(struct pf_state *st, struct mbuf *m, int offset) 1998{ 1999 struct pfsync_state *sp = (struct pfsync_state *)(m->m_data + offset); 2000 2001 pfsync_state_export(sp, st); 2002 2003 return (sizeof(*sp)); 2004} 2005 2006int 2007pfsync_out_iack(struct pf_state *st, struct mbuf *m, int offset) 2008{ 2009 struct pfsync_ins_ack *iack = 2010 (struct pfsync_ins_ack *)(m->m_data + offset); 2011 2012 iack->id = st->id; 2013 iack->creatorid = st->creatorid; 2014 2015 return (sizeof(*iack)); 2016} 2017 2018int 2019pfsync_out_upd_c(struct pf_state *st, struct mbuf *m, int offset) 2020{ 2021 struct pfsync_upd_c *up = (struct pfsync_upd_c *)(m->m_data + offset); 2022 2023 up->id = st->id; 2024 pf_state_peer_hton(&st->src, &up->src); 2025 pf_state_peer_hton(&st->dst, &up->dst); 2026 up->creatorid = st->creatorid; 2027 2028 up->expire = pf_state_expires(st); 2029 if (up->expire <= time_second) 2030 up->expire = htonl(0); 2031 else 2032 up->expire = htonl(up->expire - time_second); 2033 up->timeout = st->timeout; 2034 2035 bzero(up->_pad, sizeof(up->_pad)); /* XXX */ 2036 2037 return (sizeof(*up)); 2038} 2039 2040int 2041pfsync_out_del(struct pf_state *st, struct mbuf *m, int offset) 2042{ 2043 struct pfsync_del_c *dp = (struct pfsync_del_c *)(m->m_data + offset); 2044 2045 dp->id = st->id; 2046 dp->creatorid = st->creatorid; 2047 2048 SET(st->state_flags, PFSTATE_NOSYNC); 2049 2050 return (sizeof(*dp)); 2051} 2052 2053void 2054pfsync_drop(struct pfsync_softc *sc) 2055{ 2056 struct pf_state *st; 2057 struct pfsync_upd_req_item *ur; 2058#ifdef notyet 2059 struct tdb *t; 2060#endif 2061 int q; 2062 2063 for (q = 0; q < PFSYNC_S_COUNT; q++) { 2064 if (TAILQ_EMPTY(&sc->sc_qs[q])) 2065 continue; 2066 2067 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) { 2068#ifdef PFSYNC_DEBUG 2069#ifdef __FreeBSD__ 2070 KASSERT(st->sync_state == q, 2071 ("%s: st->sync_state == q", 2072 __FUNCTION__)); 2073#else 2074 KASSERT(st->sync_state == q); 2075#endif 2076#endif 2077 st->sync_state = PFSYNC_S_NONE; 2078 } 2079 TAILQ_INIT(&sc->sc_qs[q]); 2080 } 2081 2082 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) { 2083 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry); 2084 pool_put(&sc->sc_pool, ur); 2085 } 2086 2087 sc->sc_plus = NULL; 2088 2089#ifdef notyet 2090 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) { 2091 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry) 2092 CLR(t->tdb_flags, TDBF_PFSYNC); 2093 2094 TAILQ_INIT(&sc->sc_tdb_q); 2095 } 2096#endif 2097 2098 sc->sc_len = PFSYNC_MINPKT; 2099} 2100 2101void 2102pfsync_sendout(void) 2103{ 2104#ifdef __FreeBSD__ 2105 struct pfsync_softc *sc = V_pfsyncif; 2106#else 2107 struct pfsync_softc *sc = pfsyncif; 2108#endif 2109#if NBPFILTER > 0 2110#ifdef __FreeBSD__ 2111 struct ifnet *ifp = sc->sc_ifp; 2112#else 2113 struct ifnet *ifp = &sc->sc_if; 2114#endif 2115#endif 2116 struct mbuf *m; 2117 struct ip *ip; 2118 struct pfsync_header *ph; 2119 struct pfsync_subheader *subh; 2120 struct pf_state *st; 2121 struct pfsync_upd_req_item *ur; 2122#ifdef notyet 2123 struct tdb *t; 2124#endif 2125#ifdef __FreeBSD__ 2126 size_t pktlen; 2127 int dummy_error; 2128#endif 2129 int offset; 2130 int q, count = 0; 2131 2132#ifdef __FreeBSD__ 2133 PF_LOCK_ASSERT(); 2134#else 2135 splassert(IPL_NET); 2136#endif 2137 2138 if (sc == NULL || sc->sc_len == PFSYNC_MINPKT) 2139 return; 2140 2141#if NBPFILTER > 0 2142 if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) { 2143#else 2144 if (sc->sc_sync_if == NULL) { 2145#endif 2146 pfsync_drop(sc); 2147 return; 2148 } 2149 2150 MGETHDR(m, M_DONTWAIT, MT_DATA); 2151 if (m == NULL) { 2152#ifdef __FreeBSD__ 2153 sc->sc_ifp->if_oerrors++; 2154#else 2155 sc->sc_if.if_oerrors++; 2156#endif 2157 V_pfsyncstats.pfsyncs_onomem++; 2158 pfsync_drop(sc); 2159 return; 2160 } 2161 2162#ifdef __FreeBSD__ 2163 pktlen = max_linkhdr + sc->sc_len; 2164 if (pktlen > MHLEN) { 2165 /* Find the right pool to allocate from. */ 2166 /* XXX: This is ugly. */ 2167 m_cljget(m, M_DONTWAIT, pktlen <= MCLBYTES ? MCLBYTES : 2168#if MJUMPAGESIZE != MCLBYTES 2169 pktlen <= MJUMPAGESIZE ? MJUMPAGESIZE : 2170#endif 2171 pktlen <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES); 2172#else 2173 if (max_linkhdr + sc->sc_len > MHLEN) { 2174 MCLGETI(m, M_DONTWAIT, NULL, max_linkhdr + sc->sc_len); 2175#endif 2176 if (!ISSET(m->m_flags, M_EXT)) { 2177 m_free(m); 2178#ifdef __FreeBSD__ 2179 sc->sc_ifp->if_oerrors++; 2180#else 2181 sc->sc_if.if_oerrors++; 2182#endif 2183 V_pfsyncstats.pfsyncs_onomem++; 2184 pfsync_drop(sc); 2185 return; 2186 } 2187 } 2188 m->m_data += max_linkhdr; 2189 m->m_len = m->m_pkthdr.len = sc->sc_len; 2190 2191 /* build the ip header */ 2192 ip = (struct ip *)m->m_data; 2193 bcopy(&sc->sc_template, ip, sizeof(*ip)); 2194 offset = sizeof(*ip); 2195 2196#ifdef __FreeBSD__ 2197 ip->ip_len = m->m_pkthdr.len; 2198#else 2199 ip->ip_len = htons(m->m_pkthdr.len); 2200#endif 2201 ip->ip_id = htons(ip_randomid()); 2202 2203 /* build the pfsync header */ 2204 ph = (struct pfsync_header *)(m->m_data + offset); 2205 bzero(ph, sizeof(*ph)); 2206 offset += sizeof(*ph); 2207 2208 ph->version = PFSYNC_VERSION; 2209 ph->len = htons(sc->sc_len - sizeof(*ip)); 2210#ifdef __FreeBSD__ 2211 bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH); 2212#else 2213 bcopy(pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH); 2214#endif 2215 2216 /* walk the queues */ 2217 for (q = 0; q < PFSYNC_S_COUNT; q++) { 2218 if (TAILQ_EMPTY(&sc->sc_qs[q])) 2219 continue; 2220 2221 subh = (struct pfsync_subheader *)(m->m_data + offset); 2222 offset += sizeof(*subh); 2223 2224 count = 0; 2225 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) { 2226#ifdef PFSYNC_DEBUG 2227#ifdef __FreeBSD__ 2228 KASSERT(st->sync_state == q, 2229 ("%s: st->sync_state == q", 2230 __FUNCTION__)); 2231#else 2232 KASSERT(st->sync_state == q); 2233#endif 2234#endif 2235 2236 offset += pfsync_qs[q].write(st, m, offset); 2237 st->sync_state = PFSYNC_S_NONE; 2238 count++; 2239 } 2240 TAILQ_INIT(&sc->sc_qs[q]); 2241 2242 bzero(subh, sizeof(*subh)); 2243 subh->action = pfsync_qs[q].action; 2244 subh->count = htons(count); 2245 } 2246 2247 if (!TAILQ_EMPTY(&sc->sc_upd_req_list)) { 2248 subh = (struct pfsync_subheader *)(m->m_data + offset); 2249 offset += sizeof(*subh); 2250 2251 count = 0; 2252 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) { 2253 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry); 2254 2255 bcopy(&ur->ur_msg, m->m_data + offset, 2256 sizeof(ur->ur_msg)); 2257 offset += sizeof(ur->ur_msg); 2258 2259 pool_put(&sc->sc_pool, ur); 2260 2261 count++; 2262 } 2263 2264 bzero(subh, sizeof(*subh)); 2265 subh->action = PFSYNC_ACT_UPD_REQ; 2266 subh->count = htons(count); 2267 } 2268 2269 /* has someone built a custom region for us to add? */ 2270 if (sc->sc_plus != NULL) { 2271 bcopy(sc->sc_plus, m->m_data + offset, sc->sc_pluslen); 2272 offset += sc->sc_pluslen; 2273 2274 sc->sc_plus = NULL; 2275 } 2276 2277#ifdef notyet 2278 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) { 2279 subh = (struct pfsync_subheader *)(m->m_data + offset); 2280 offset += sizeof(*subh); 2281 2282 count = 0; 2283 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry) { 2284 offset += pfsync_out_tdb(t, m, offset); 2285 CLR(t->tdb_flags, TDBF_PFSYNC); 2286 2287 count++; 2288 } 2289 TAILQ_INIT(&sc->sc_tdb_q); 2290 2291 bzero(subh, sizeof(*subh)); 2292 subh->action = PFSYNC_ACT_TDB; 2293 subh->count = htons(count); 2294 } 2295#endif 2296 2297 subh = (struct pfsync_subheader *)(m->m_data + offset); 2298 offset += sizeof(*subh); 2299 2300 bzero(subh, sizeof(*subh)); 2301 subh->action = PFSYNC_ACT_EOF; 2302 subh->count = htons(1); 2303 2304 /* XXX write checksum in EOF here */ 2305 2306 /* we're done, let's put it on the wire */ 2307#if NBPFILTER > 0 2308 if (ifp->if_bpf) { 2309 m->m_data += sizeof(*ip); 2310 m->m_len = m->m_pkthdr.len = sc->sc_len - sizeof(*ip); 2311#ifdef __FreeBSD__ 2312 BPF_MTAP(ifp, m); 2313#else 2314 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 2315#endif 2316 m->m_data -= sizeof(*ip); 2317 m->m_len = m->m_pkthdr.len = sc->sc_len; 2318 } 2319 2320 if (sc->sc_sync_if == NULL) { 2321 sc->sc_len = PFSYNC_MINPKT; 2322 m_freem(m); 2323 return; 2324 } 2325#endif 2326 2327#ifdef __FreeBSD__ 2328 sc->sc_ifp->if_opackets++; 2329 sc->sc_ifp->if_obytes += m->m_pkthdr.len; 2330 sc->sc_len = PFSYNC_MINPKT; 2331 2332 IFQ_ENQUEUE(&sc->sc_ifp->if_snd, m, dummy_error); 2333 swi_sched(V_pfsync_swi_cookie, 0); 2334#else 2335 sc->sc_if.if_opackets++; 2336 sc->sc_if.if_obytes += m->m_pkthdr.len; 2337 2338 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL) == 0) 2339 pfsyncstats.pfsyncs_opackets++; 2340 else 2341 pfsyncstats.pfsyncs_oerrors++; 2342 2343 /* start again */ 2344 sc->sc_len = PFSYNC_MINPKT; 2345#endif 2346} 2347 2348void 2349pfsync_insert_state(struct pf_state *st) 2350{ 2351#ifdef __FreeBSD__ 2352 struct pfsync_softc *sc = V_pfsyncif; 2353#else 2354 struct pfsync_softc *sc = pfsyncif; 2355#endif 2356 2357#ifdef __FreeBSD__ 2358 PF_LOCK_ASSERT(); 2359#else 2360 splassert(IPL_SOFTNET); 2361#endif 2362 2363 if (ISSET(st->rule.ptr->rule_flag, PFRULE_NOSYNC) || 2364 st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) { 2365 SET(st->state_flags, PFSTATE_NOSYNC); 2366 return; 2367 } 2368 2369 if (sc == NULL || ISSET(st->state_flags, PFSTATE_NOSYNC)) 2370 return; 2371 2372#ifdef PFSYNC_DEBUG 2373#ifdef __FreeBSD__ 2374 KASSERT(st->sync_state == PFSYNC_S_NONE, 2375 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__)); 2376#else 2377 KASSERT(st->sync_state == PFSYNC_S_NONE); 2378#endif 2379#endif 2380 2381 if (sc->sc_len == PFSYNC_MINPKT) 2382#ifdef __FreeBSD__ 2383 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, 2384 V_pfsyncif); 2385#else 2386 timeout_add_sec(&sc->sc_tmo, 1); 2387#endif 2388 2389 pfsync_q_ins(st, PFSYNC_S_INS); 2390 2391 if (ISSET(st->state_flags, PFSTATE_ACK)) 2392#ifdef __FreeBSD__ 2393 pfsync_sendout(); 2394#else 2395 schednetisr(NETISR_PFSYNC); 2396#endif 2397 else 2398 st->sync_updates = 0; 2399} 2400 2401int defer = 10; 2402 2403int 2404pfsync_defer(struct pf_state *st, struct mbuf *m) 2405{ 2406#ifdef __FreeBSD__ 2407 struct pfsync_softc *sc = V_pfsyncif; 2408#else 2409 struct pfsync_softc *sc = pfsyncif; 2410#endif 2411 struct pfsync_deferral *pd; 2412 2413#ifdef __FreeBSD__ 2414 PF_LOCK_ASSERT(); 2415#else 2416 splassert(IPL_SOFTNET); 2417#endif 2418 2419 if (sc->sc_deferred >= 128) 2420 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0); 2421 2422 pd = pool_get(&sc->sc_pool, M_NOWAIT); 2423 if (pd == NULL) 2424 return (0); 2425 sc->sc_deferred++; 2426 2427#ifdef __FreeBSD__ 2428 m->m_flags |= M_SKIP_FIREWALL; 2429#else 2430 m->m_pkthdr.pf.flags |= PF_TAG_GENERATED; 2431#endif 2432 SET(st->state_flags, PFSTATE_ACK); 2433 2434 pd->pd_st = st; 2435 pd->pd_m = m; 2436 2437 TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry); 2438#ifdef __FreeBSD__ 2439 callout_init(&pd->pd_tmo, CALLOUT_MPSAFE); 2440 callout_reset(&pd->pd_tmo, defer, pfsync_defer_tmo, 2441 pd); 2442#else 2443 timeout_set(&pd->pd_tmo, pfsync_defer_tmo, pd); 2444 timeout_add(&pd->pd_tmo, defer); 2445#endif 2446 2447 return (1); 2448} 2449 2450void 2451pfsync_undefer(struct pfsync_deferral *pd, int drop) 2452{ 2453#ifdef __FreeBSD__ 2454 struct pfsync_softc *sc = V_pfsyncif; 2455#else 2456 struct pfsync_softc *sc = pfsyncif; 2457#endif 2458 int s; 2459 2460#ifdef __FreeBSD__ 2461 PF_LOCK_ASSERT(); 2462#else 2463 splassert(IPL_SOFTNET); 2464#endif 2465 2466 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry); 2467 sc->sc_deferred--; 2468 2469 CLR(pd->pd_st->state_flags, PFSTATE_ACK); 2470 timeout_del(&pd->pd_tmo); /* bah */ 2471 if (drop) 2472 m_freem(pd->pd_m); 2473 else { 2474 s = splnet(); 2475#ifdef __FreeBSD__ 2476 /* XXX: use pf_defered?! */ 2477 PF_UNLOCK(); 2478#endif 2479 ip_output(pd->pd_m, (void *)NULL, (void *)NULL, 0, 2480 (void *)NULL, (void *)NULL); 2481#ifdef __FreeBSD__ 2482 PF_LOCK(); 2483#endif 2484 splx(s); 2485 } 2486 2487 pool_put(&sc->sc_pool, pd); 2488} 2489 2490void 2491pfsync_defer_tmo(void *arg) 2492{ 2493#if defined(__FreeBSD__) && defined(VIMAGE) 2494 struct pfsync_deferral *pd = arg; 2495#endif 2496 int s; 2497 2498 s = splsoftnet(); 2499#ifdef __FreeBSD__ 2500 CURVNET_SET(pd->pd_m->m_pkthdr.rcvif->if_vnet); /* XXX */ 2501 PF_LOCK(); 2502#endif 2503 pfsync_undefer(arg, 0); 2504#ifdef __FreeBSD__ 2505 PF_UNLOCK(); 2506 CURVNET_RESTORE(); 2507#endif 2508 splx(s); 2509} 2510 2511void 2512pfsync_deferred(struct pf_state *st, int drop) 2513{ 2514#ifdef __FreeBSD__ 2515 struct pfsync_softc *sc = V_pfsyncif; 2516#else 2517 struct pfsync_softc *sc = pfsyncif; 2518#endif 2519 struct pfsync_deferral *pd; 2520 2521 TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) { 2522 if (pd->pd_st == st) { 2523 pfsync_undefer(pd, drop); 2524 return; 2525 } 2526 } 2527 2528 panic("pfsync_send_deferred: unable to find deferred state"); 2529} 2530 2531u_int pfsync_upds = 0; 2532 2533void 2534pfsync_update_state(struct pf_state *st) 2535{ 2536#ifdef __FreeBSD__ 2537 struct pfsync_softc *sc = V_pfsyncif; 2538#else 2539 struct pfsync_softc *sc = pfsyncif; 2540#endif 2541 int sync = 0; 2542 2543#ifdef __FreeBSD__ 2544 PF_LOCK_ASSERT(); 2545#else 2546 splassert(IPL_SOFTNET); 2547#endif 2548 2549 if (sc == NULL) 2550 return; 2551 2552 if (ISSET(st->state_flags, PFSTATE_ACK)) 2553 pfsync_deferred(st, 0); 2554 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) { 2555 if (st->sync_state != PFSYNC_S_NONE) 2556 pfsync_q_del(st); 2557 return; 2558 } 2559 2560 if (sc->sc_len == PFSYNC_MINPKT) 2561#ifdef __FreeBSD__ 2562 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, 2563 V_pfsyncif); 2564#else 2565 timeout_add_sec(&sc->sc_tmo, 1); 2566#endif 2567 2568 switch (st->sync_state) { 2569 case PFSYNC_S_UPD_C: 2570 case PFSYNC_S_UPD: 2571 case PFSYNC_S_INS: 2572 /* we're already handling it */ 2573 2574 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) { 2575 st->sync_updates++; 2576 if (st->sync_updates >= sc->sc_maxupdates) 2577 sync = 1; 2578 } 2579 break; 2580 2581 case PFSYNC_S_IACK: 2582 pfsync_q_del(st); 2583 case PFSYNC_S_NONE: 2584 pfsync_q_ins(st, PFSYNC_S_UPD_C); 2585 st->sync_updates = 0; 2586 break; 2587 2588 default: 2589 panic("pfsync_update_state: unexpected sync state %d", 2590 st->sync_state); 2591 } 2592 2593 if (sync || (time_uptime - st->pfsync_time) < 2) { 2594 pfsync_upds++; 2595#ifdef __FreeBSD__ 2596 pfsync_sendout(); 2597#else 2598 schednetisr(NETISR_PFSYNC); 2599#endif 2600 } 2601} 2602 2603void 2604pfsync_request_update(u_int32_t creatorid, u_int64_t id) 2605{ 2606#ifdef __FreeBSD__ 2607 struct pfsync_softc *sc = V_pfsyncif; 2608#else 2609 struct pfsync_softc *sc = pfsyncif; 2610#endif 2611 struct pfsync_upd_req_item *item; 2612 size_t nlen = sizeof(struct pfsync_upd_req); 2613 int s; 2614 2615 PF_LOCK_ASSERT(); 2616 2617 /* 2618 * this code does nothing to prevent multiple update requests for the 2619 * same state being generated. 2620 */ 2621 2622 item = pool_get(&sc->sc_pool, PR_NOWAIT); 2623 if (item == NULL) { 2624 /* XXX stats */ 2625 return; 2626 } 2627 2628 item->ur_msg.id = id; 2629 item->ur_msg.creatorid = creatorid; 2630 2631 if (TAILQ_EMPTY(&sc->sc_upd_req_list)) 2632 nlen += sizeof(struct pfsync_subheader); 2633 2634#ifdef __FreeBSD__ 2635 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) { 2636#else 2637 if (sc->sc_len + nlen > sc->sc_if.if_mtu) { 2638#endif 2639 s = splnet(); 2640 pfsync_sendout(); 2641 splx(s); 2642 2643 nlen = sizeof(struct pfsync_subheader) + 2644 sizeof(struct pfsync_upd_req); 2645 } 2646 2647 TAILQ_INSERT_TAIL(&sc->sc_upd_req_list, item, ur_entry); 2648 sc->sc_len += nlen; 2649 2650#ifdef __FreeBSD__ 2651 pfsync_sendout(); 2652#else 2653 schednetisr(NETISR_PFSYNC); 2654#endif 2655} 2656 2657void 2658pfsync_update_state_req(struct pf_state *st) 2659{ 2660#ifdef __FreeBSD__ 2661 struct pfsync_softc *sc = V_pfsyncif; 2662#else 2663 struct pfsync_softc *sc = pfsyncif; 2664#endif 2665 2666 PF_LOCK_ASSERT(); 2667 2668 if (sc == NULL) 2669 panic("pfsync_update_state_req: nonexistant instance"); 2670 2671 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) { 2672 if (st->sync_state != PFSYNC_S_NONE) 2673 pfsync_q_del(st); 2674 return; 2675 } 2676 2677 switch (st->sync_state) { 2678 case PFSYNC_S_UPD_C: 2679 case PFSYNC_S_IACK: 2680 pfsync_q_del(st); 2681 case PFSYNC_S_NONE: 2682 pfsync_q_ins(st, PFSYNC_S_UPD); 2683#ifdef __FreeBSD__ 2684 pfsync_sendout(); 2685#else 2686 schednetisr(NETISR_PFSYNC); 2687#endif 2688 return; 2689 2690 case PFSYNC_S_INS: 2691 case PFSYNC_S_UPD: 2692 case PFSYNC_S_DEL: 2693 /* we're already handling it */ 2694 return; 2695 2696 default: 2697 panic("pfsync_update_state_req: unexpected sync state %d", 2698 st->sync_state); 2699 } 2700} 2701 2702void 2703pfsync_delete_state(struct pf_state *st) 2704{ 2705#ifdef __FreeBSD__ 2706 struct pfsync_softc *sc = V_pfsyncif; 2707#else 2708 struct pfsync_softc *sc = pfsyncif; 2709#endif 2710 2711#ifdef __FreeBSD__ 2712 PF_LOCK_ASSERT(); 2713#else 2714 splassert(IPL_SOFTNET); 2715#endif 2716 2717 if (sc == NULL) 2718 return; 2719 2720 if (ISSET(st->state_flags, PFSTATE_ACK)) 2721 pfsync_deferred(st, 1); 2722 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) { 2723 if (st->sync_state != PFSYNC_S_NONE) 2724 pfsync_q_del(st); 2725 return; 2726 } 2727 2728 if (sc->sc_len == PFSYNC_MINPKT) 2729#ifdef __FreeBSD__ 2730 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout, 2731 V_pfsyncif); 2732#else 2733 timeout_add_sec(&sc->sc_tmo, 1); 2734#endif 2735 2736 switch (st->sync_state) { 2737 case PFSYNC_S_INS: 2738 /* we never got to tell the world so just forget about it */ 2739 pfsync_q_del(st); 2740 return; 2741 2742 case PFSYNC_S_UPD_C: 2743 case PFSYNC_S_UPD: 2744 case PFSYNC_S_IACK: 2745 pfsync_q_del(st); 2746 /* FALLTHROUGH to putting it on the del list */ 2747 2748 case PFSYNC_S_NONE: 2749 pfsync_q_ins(st, PFSYNC_S_DEL); 2750 return; 2751 2752 default: 2753 panic("pfsync_delete_state: unexpected sync state %d", 2754 st->sync_state); 2755 } 2756} 2757 2758void 2759pfsync_clear_states(u_int32_t creatorid, const char *ifname) 2760{ 2761 struct { 2762 struct pfsync_subheader subh; 2763 struct pfsync_clr clr; 2764 } __packed r; 2765 2766#ifdef __FreeBSD__ 2767 struct pfsync_softc *sc = V_pfsyncif; 2768#else 2769 struct pfsync_softc *sc = pfsyncif; 2770#endif 2771 2772#ifdef __FreeBSD__ 2773 PF_LOCK_ASSERT(); 2774#else 2775 splassert(IPL_SOFTNET); 2776#endif 2777 2778 if (sc == NULL) 2779 return; 2780 2781 bzero(&r, sizeof(r)); 2782 2783 r.subh.action = PFSYNC_ACT_CLR; 2784 r.subh.count = htons(1); 2785 2786 strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname)); 2787 r.clr.creatorid = creatorid; 2788 2789 pfsync_send_plus(&r, sizeof(r)); 2790} 2791 2792void 2793pfsync_q_ins(struct pf_state *st, int q) 2794{ 2795#ifdef __FreeBSD__ 2796 struct pfsync_softc *sc = V_pfsyncif; 2797#else 2798 struct pfsync_softc *sc = pfsyncif; 2799#endif 2800 size_t nlen = pfsync_qs[q].len; 2801 int s; 2802 2803 PF_LOCK_ASSERT(); 2804 2805#ifdef __FreeBSD__ 2806 KASSERT(st->sync_state == PFSYNC_S_NONE, 2807 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__)); 2808#else 2809 KASSERT(st->sync_state == PFSYNC_S_NONE); 2810#endif 2811 2812#if 1 || defined(PFSYNC_DEBUG) 2813 if (sc->sc_len < PFSYNC_MINPKT) 2814#ifdef __FreeBSD__ 2815 panic("pfsync pkt len is too low %zu", sc->sc_len); 2816#else 2817 panic("pfsync pkt len is too low %d", sc->sc_len); 2818#endif 2819#endif 2820 if (TAILQ_EMPTY(&sc->sc_qs[q])) 2821 nlen += sizeof(struct pfsync_subheader); 2822 2823#ifdef __FreeBSD__ 2824 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) { 2825#else 2826 if (sc->sc_len + nlen > sc->sc_if.if_mtu) { 2827#endif 2828 s = splnet(); 2829 pfsync_sendout(); 2830 splx(s); 2831 2832 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len; 2833 } 2834 2835 sc->sc_len += nlen; 2836 TAILQ_INSERT_TAIL(&sc->sc_qs[q], st, sync_list); 2837 st->sync_state = q; 2838} 2839 2840void 2841pfsync_q_del(struct pf_state *st) 2842{ 2843#ifdef __FreeBSD__ 2844 struct pfsync_softc *sc = V_pfsyncif; 2845#else 2846 struct pfsync_softc *sc = pfsyncif; 2847#endif 2848 int q = st->sync_state; 2849 2850#ifdef __FreeBSD__ 2851 KASSERT(st->sync_state != PFSYNC_S_NONE, 2852 ("%s: st->sync_state != PFSYNC_S_NONE", __FUNCTION__)); 2853#else 2854 KASSERT(st->sync_state != PFSYNC_S_NONE); 2855#endif 2856 2857 sc->sc_len -= pfsync_qs[q].len; 2858 TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list); 2859 st->sync_state = PFSYNC_S_NONE; 2860 2861 if (TAILQ_EMPTY(&sc->sc_qs[q])) 2862 sc->sc_len -= sizeof(struct pfsync_subheader); 2863} 2864 2865#ifdef notyet 2866void 2867pfsync_update_tdb(struct tdb *t, int output) 2868{ 2869#ifdef __FreeBSD__ 2870 struct pfsync_softc *sc = V_pfsyncif; 2871#else 2872 struct pfsync_softc *sc = pfsyncif; 2873#endif 2874 size_t nlen = sizeof(struct pfsync_tdb); 2875 int s; 2876 2877 if (sc == NULL) 2878 return; 2879 2880 if (!ISSET(t->tdb_flags, TDBF_PFSYNC)) { 2881 if (TAILQ_EMPTY(&sc->sc_tdb_q)) 2882 nlen += sizeof(struct pfsync_subheader); 2883 2884 if (sc->sc_len + nlen > sc->sc_if.if_mtu) { 2885 s = splnet(); 2886 PF_LOCK(); 2887 pfsync_sendout(); 2888 PF_UNLOCK(); 2889 splx(s); 2890 2891 nlen = sizeof(struct pfsync_subheader) + 2892 sizeof(struct pfsync_tdb); 2893 } 2894 2895 sc->sc_len += nlen; 2896 TAILQ_INSERT_TAIL(&sc->sc_tdb_q, t, tdb_sync_entry); 2897 SET(t->tdb_flags, TDBF_PFSYNC); 2898 t->tdb_updates = 0; 2899 } else { 2900 if (++t->tdb_updates >= sc->sc_maxupdates) 2901 schednetisr(NETISR_PFSYNC); 2902 } 2903 2904 if (output) 2905 SET(t->tdb_flags, TDBF_PFSYNC_RPL); 2906 else 2907 CLR(t->tdb_flags, TDBF_PFSYNC_RPL); 2908} 2909 2910void 2911pfsync_delete_tdb(struct tdb *t) 2912{ 2913#ifdef __FreeBSD__ 2914 struct pfsync_softc *sc = V_pfsyncif; 2915#else 2916 struct pfsync_softc *sc = pfsyncif; 2917#endif 2918 2919 if (sc == NULL || !ISSET(t->tdb_flags, TDBF_PFSYNC)) 2920 return; 2921 2922 sc->sc_len -= sizeof(struct pfsync_tdb); 2923 TAILQ_REMOVE(&sc->sc_tdb_q, t, tdb_sync_entry); 2924 CLR(t->tdb_flags, TDBF_PFSYNC); 2925 2926 if (TAILQ_EMPTY(&sc->sc_tdb_q)) 2927 sc->sc_len -= sizeof(struct pfsync_subheader); 2928} 2929 2930int 2931pfsync_out_tdb(struct tdb *t, struct mbuf *m, int offset) 2932{ 2933 struct pfsync_tdb *ut = (struct pfsync_tdb *)(m->m_data + offset); 2934 2935 bzero(ut, sizeof(*ut)); 2936 ut->spi = t->tdb_spi; 2937 bcopy(&t->tdb_dst, &ut->dst, sizeof(ut->dst)); 2938 /* 2939 * When a failover happens, the master's rpl is probably above 2940 * what we see here (we may be up to a second late), so 2941 * increase it a bit for outbound tdbs to manage most such 2942 * situations. 2943 * 2944 * For now, just add an offset that is likely to be larger 2945 * than the number of packets we can see in one second. The RFC 2946 * just says the next packet must have a higher seq value. 2947 * 2948 * XXX What is a good algorithm for this? We could use 2949 * a rate-determined increase, but to know it, we would have 2950 * to extend struct tdb. 2951 * XXX pt->rpl can wrap over MAXINT, but if so the real tdb 2952 * will soon be replaced anyway. For now, just don't handle 2953 * this edge case. 2954 */ 2955#define RPL_INCR 16384 2956 ut->rpl = htonl(t->tdb_rpl + (ISSET(t->tdb_flags, TDBF_PFSYNC_RPL) ? 2957 RPL_INCR : 0)); 2958 ut->cur_bytes = htobe64(t->tdb_cur_bytes); 2959 ut->sproto = t->tdb_sproto; 2960 2961 return (sizeof(*ut)); 2962} 2963#endif 2964 2965void 2966pfsync_bulk_start(void) 2967{ 2968#ifdef __FreeBSD__ 2969 struct pfsync_softc *sc = V_pfsyncif; 2970#else 2971 struct pfsync_softc *sc = pfsyncif; 2972#endif 2973 2974#ifdef __FreeBSD__ 2975 if (V_pf_status.debug >= PF_DEBUG_MISC) 2976#else 2977 if (pf_status.debug >= PF_DEBUG_MISC) 2978#endif 2979 printf("pfsync: received bulk update request\n"); 2980 2981#ifdef __FreeBSD__ 2982 PF_LOCK_ASSERT(); 2983 if (TAILQ_EMPTY(&V_state_list)) 2984#else 2985 if (TAILQ_EMPTY(&state_list)) 2986#endif 2987 pfsync_bulk_status(PFSYNC_BUS_END); 2988 else { 2989 sc->sc_ureq_received = time_uptime; 2990 if (sc->sc_bulk_next == NULL) 2991#ifdef __FreeBSD__ 2992 sc->sc_bulk_next = TAILQ_FIRST(&V_state_list); 2993#else 2994 sc->sc_bulk_next = TAILQ_FIRST(&state_list); 2995#endif 2996 sc->sc_bulk_last = sc->sc_bulk_next; 2997 2998 pfsync_bulk_status(PFSYNC_BUS_START); 2999 callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc); 3000 } 3001} 3002 3003void 3004pfsync_bulk_update(void *arg) 3005{ 3006 struct pfsync_softc *sc = arg; 3007 struct pf_state *st = sc->sc_bulk_next; 3008 int i = 0; 3009 int s; 3010 3011 PF_LOCK_ASSERT(); 3012 3013 s = splsoftnet(); 3014#ifdef __FreeBSD__ 3015 CURVNET_SET(sc->sc_ifp->if_vnet); 3016#endif 3017 for (;;) { 3018 if (st->sync_state == PFSYNC_S_NONE && 3019 st->timeout < PFTM_MAX && 3020 st->pfsync_time <= sc->sc_ureq_received) { 3021 pfsync_update_state_req(st); 3022 i++; 3023 } 3024 3025 st = TAILQ_NEXT(st, entry_list); 3026 if (st == NULL) 3027#ifdef __FreeBSD__ 3028 st = TAILQ_FIRST(&V_state_list); 3029#else 3030 st = TAILQ_FIRST(&state_list); 3031#endif 3032 3033 if (st == sc->sc_bulk_last) { 3034 /* we're done */ 3035 sc->sc_bulk_next = NULL; 3036 sc->sc_bulk_last = NULL; 3037 pfsync_bulk_status(PFSYNC_BUS_END); 3038 break; 3039 } 3040 3041#ifdef __FreeBSD__ 3042 if (i > 1 && (sc->sc_ifp->if_mtu - sc->sc_len) < 3043#else 3044 if (i > 1 && (sc->sc_if.if_mtu - sc->sc_len) < 3045#endif 3046 sizeof(struct pfsync_state)) { 3047 /* we've filled a packet */ 3048 sc->sc_bulk_next = st; 3049#ifdef __FreeBSD__ 3050 callout_reset(&sc->sc_bulk_tmo, 1, 3051 pfsync_bulk_update, sc); 3052#else 3053 timeout_add(&sc->sc_bulk_tmo, 1); 3054#endif 3055 break; 3056 } 3057 } 3058 3059#ifdef __FreeBSD__ 3060 CURVNET_RESTORE(); 3061#endif 3062 splx(s); 3063} 3064 3065void 3066pfsync_bulk_status(u_int8_t status) 3067{ 3068 struct { 3069 struct pfsync_subheader subh; 3070 struct pfsync_bus bus; 3071 } __packed r; 3072 3073#ifdef __FreeBSD__ 3074 struct pfsync_softc *sc = V_pfsyncif; 3075#else 3076 struct pfsync_softc *sc = pfsyncif; 3077#endif 3078 3079 PF_LOCK_ASSERT(); 3080 3081 bzero(&r, sizeof(r)); 3082 3083 r.subh.action = PFSYNC_ACT_BUS; 3084 r.subh.count = htons(1); 3085 3086#ifdef __FreeBSD__ 3087 r.bus.creatorid = V_pf_status.hostid; 3088#else 3089 r.bus.creatorid = pf_status.hostid; 3090#endif 3091 r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received); 3092 r.bus.status = status; 3093 3094 pfsync_send_plus(&r, sizeof(r)); 3095} 3096 3097void 3098pfsync_bulk_fail(void *arg) 3099{ 3100 struct pfsync_softc *sc = arg; 3101 3102#ifdef __FreeBSD__ 3103 CURVNET_SET(sc->sc_ifp->if_vnet); 3104#endif 3105 3106 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) { 3107 /* Try again */ 3108#ifdef __FreeBSD__ 3109 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, 3110 pfsync_bulk_fail, V_pfsyncif); 3111#else 3112 timeout_add_sec(&sc->sc_bulkfail_tmo, 5); 3113#endif 3114 PF_LOCK(); 3115 pfsync_request_update(0, 0); 3116 PF_UNLOCK(); 3117 } else { 3118 /* Pretend like the transfer was ok */ 3119 sc->sc_ureq_sent = 0; 3120 sc->sc_bulk_tries = 0; 3121#ifdef __FreeBSD__ 3122 if (!sc->pfsync_sync_ok && carp_demote_adj_p) 3123 (*carp_demote_adj_p)(-V_pfsync_carp_adj, 3124 "pfsync bulk fail"); 3125 sc->pfsync_sync_ok = 1; 3126#else 3127#if NCARP > 0 3128 if (!pfsync_sync_ok) 3129 carp_group_demote_adj(&sc->sc_if, -1); 3130#endif 3131 pfsync_sync_ok = 1; 3132#endif 3133#ifdef __FreeBSD__ 3134 if (V_pf_status.debug >= PF_DEBUG_MISC) 3135#else 3136 if (pf_status.debug >= PF_DEBUG_MISC) 3137#endif 3138 printf("pfsync: failed to receive bulk update\n"); 3139 } 3140 3141#ifdef __FreeBSD__ 3142 CURVNET_RESTORE(); 3143#endif 3144} 3145 3146void 3147pfsync_send_plus(void *plus, size_t pluslen) 3148{ 3149#ifdef __FreeBSD__ 3150 struct pfsync_softc *sc = V_pfsyncif; 3151#else 3152 struct pfsync_softc *sc = pfsyncif; 3153#endif 3154 int s; 3155 3156 PF_LOCK_ASSERT(); 3157 3158#ifdef __FreeBSD__ 3159 if (sc->sc_len + pluslen > sc->sc_ifp->if_mtu) { 3160#else 3161 if (sc->sc_len + pluslen > sc->sc_if.if_mtu) { 3162#endif 3163 s = splnet(); 3164 pfsync_sendout(); 3165 splx(s); 3166 } 3167 3168 sc->sc_plus = plus; 3169 sc->sc_len += (sc->sc_pluslen = pluslen); 3170 3171 s = splnet(); 3172 pfsync_sendout(); 3173 splx(s); 3174} 3175 3176int 3177pfsync_up(void) 3178{ 3179#ifdef __FreeBSD__ 3180 struct pfsync_softc *sc = V_pfsyncif; 3181#else 3182 struct pfsync_softc *sc = pfsyncif; 3183#endif 3184 3185#ifdef __FreeBSD__ 3186 if (sc == NULL || !ISSET(sc->sc_ifp->if_flags, IFF_DRV_RUNNING)) 3187#else 3188 if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING)) 3189#endif 3190 return (0); 3191 3192 return (1); 3193} 3194 3195int 3196pfsync_state_in_use(struct pf_state *st) 3197{ 3198#ifdef __FreeBSD__ 3199 struct pfsync_softc *sc = V_pfsyncif; 3200#else 3201 struct pfsync_softc *sc = pfsyncif; 3202#endif 3203 3204 if (sc == NULL) 3205 return (0); 3206 3207 if (st->sync_state != PFSYNC_S_NONE || 3208 st == sc->sc_bulk_next || 3209 st == sc->sc_bulk_last) 3210 return (1); 3211 3212 return (0); 3213} 3214 3215u_int pfsync_ints; 3216u_int pfsync_tmos; 3217 3218void 3219pfsync_timeout(void *arg) 3220{ 3221#if defined(__FreeBSD__) && defined(VIMAGE) 3222 struct pfsync_softc *sc = arg; 3223#endif 3224 int s; 3225 3226#ifdef __FreeBSD__ 3227 CURVNET_SET(sc->sc_ifp->if_vnet); 3228#endif 3229 3230 pfsync_tmos++; 3231 3232 s = splnet(); 3233#ifdef __FreeBSD__ 3234 PF_LOCK(); 3235#endif 3236 pfsync_sendout(); 3237#ifdef __FreeBSD__ 3238 PF_UNLOCK(); 3239#endif 3240 splx(s); 3241 3242#ifdef __FreeBSD__ 3243 CURVNET_RESTORE(); 3244#endif 3245} 3246 3247/* this is a softnet/netisr handler */ 3248void 3249#ifdef __FreeBSD__ 3250pfsyncintr(void *arg) 3251{ 3252 struct pfsync_softc *sc = arg; 3253 struct mbuf *m, *n; 3254 3255 CURVNET_SET(sc->sc_ifp->if_vnet); 3256 pfsync_ints++; 3257 3258 IF_DEQUEUE_ALL(&sc->sc_ifp->if_snd, m); 3259 3260 for (; m != NULL; m = n) { 3261 3262 n = m->m_nextpkt; 3263 m->m_nextpkt = NULL; 3264 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL) 3265 == 0) 3266 V_pfsyncstats.pfsyncs_opackets++; 3267 else 3268 V_pfsyncstats.pfsyncs_oerrors++; 3269 } 3270 CURVNET_RESTORE(); 3271} 3272#else 3273pfsyncintr(void) 3274{ 3275 int s; 3276 3277 pfsync_ints++; 3278 3279 s = splnet(); 3280 pfsync_sendout(); 3281 splx(s); 3282} 3283#endif 3284 3285int 3286pfsync_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 3287 size_t newlen) 3288{ 3289 3290#ifdef notyet 3291 /* All sysctl names at this level are terminal. */ 3292 if (namelen != 1) 3293 return (ENOTDIR); 3294 3295 switch (name[0]) { 3296 case PFSYNCCTL_STATS: 3297 if (newp != NULL) 3298 return (EPERM); 3299 return (sysctl_struct(oldp, oldlenp, newp, newlen, 3300 &V_pfsyncstats, sizeof(V_pfsyncstats))); 3301 } 3302#endif 3303 return (ENOPROTOOPT); 3304} 3305 3306#ifdef __FreeBSD__ 3307static int 3308pfsync_multicast_setup(struct pfsync_softc *sc) 3309{ 3310 struct ip_moptions *imo = &sc->sc_imo; 3311 int error; 3312 3313 if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) { 3314 sc->sc_sync_if = NULL; 3315 return (EADDRNOTAVAIL); 3316 } 3317 3318 imo->imo_membership = (struct in_multi **)malloc( 3319 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_PFSYNC, 3320 M_WAITOK | M_ZERO); 3321 imo->imo_max_memberships = IP_MIN_MEMBERSHIPS; 3322 imo->imo_multicast_vif = -1; 3323 3324 if ((error = in_joingroup(sc->sc_sync_if, &sc->sc_sync_peer, NULL, 3325 &imo->imo_membership[0])) != 0) { 3326 free(imo->imo_membership, M_PFSYNC); 3327 return (error); 3328 } 3329 imo->imo_num_memberships++; 3330 imo->imo_multicast_ifp = sc->sc_sync_if; 3331 imo->imo_multicast_ttl = PFSYNC_DFLTTL; 3332 imo->imo_multicast_loop = 0; 3333 3334 return (0); 3335} 3336 3337static void 3338pfsync_multicast_cleanup(struct pfsync_softc *sc) 3339{ 3340 struct ip_moptions *imo = &sc->sc_imo; 3341 3342 in_leavegroup(imo->imo_membership[0], NULL); 3343 free(imo->imo_membership, M_PFSYNC); 3344 imo->imo_membership = NULL; 3345 imo->imo_multicast_ifp = NULL; 3346} 3347 3348#ifdef INET 3349extern struct domain inetdomain; 3350static struct protosw in_pfsync_protosw = { 3351 .pr_type = SOCK_RAW, 3352 .pr_domain = &inetdomain, 3353 .pr_protocol = IPPROTO_PFSYNC, 3354 .pr_flags = PR_ATOMIC|PR_ADDR, 3355 .pr_input = pfsync_input, 3356 .pr_output = (pr_output_t *)rip_output, 3357 .pr_ctloutput = rip_ctloutput, 3358 .pr_usrreqs = &rip_usrreqs 3359}; 3360#endif 3361 3362static int 3363pfsync_init() 3364{ 3365 VNET_ITERATOR_DECL(vnet_iter); 3366 int error = 0; 3367 3368 VNET_LIST_RLOCK(); 3369 VNET_FOREACH(vnet_iter) { 3370 CURVNET_SET(vnet_iter); 3371 V_pfsync_cloner = pfsync_cloner; 3372 V_pfsync_cloner_data = pfsync_cloner_data; 3373 V_pfsync_cloner.ifc_data = &V_pfsync_cloner_data; 3374 if_clone_attach(&V_pfsync_cloner); 3375 error = swi_add(NULL, "pfsync", pfsyncintr, V_pfsyncif, 3376 SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie); 3377 CURVNET_RESTORE(); 3378 if (error) 3379 goto fail_locked; 3380 } 3381 VNET_LIST_RUNLOCK(); 3382#ifdef INET 3383 error = pf_proto_register(PF_INET, &in_pfsync_protosw); 3384 if (error) 3385 goto fail; 3386 error = ipproto_register(IPPROTO_PFSYNC); 3387 if (error) { 3388 pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW); 3389 goto fail; 3390 } 3391#endif 3392 PF_LOCK(); 3393 pfsync_state_import_ptr = pfsync_state_import; 3394 pfsync_up_ptr = pfsync_up; 3395 pfsync_insert_state_ptr = pfsync_insert_state; 3396 pfsync_update_state_ptr = pfsync_update_state; 3397 pfsync_delete_state_ptr = pfsync_delete_state; 3398 pfsync_clear_states_ptr = pfsync_clear_states; 3399 pfsync_state_in_use_ptr = pfsync_state_in_use; 3400 pfsync_defer_ptr = pfsync_defer; 3401 PF_UNLOCK(); 3402 3403 return (0); 3404 3405fail: 3406 VNET_LIST_RLOCK(); 3407fail_locked: 3408 VNET_FOREACH(vnet_iter) { 3409 CURVNET_SET(vnet_iter); 3410 if (V_pfsync_swi_cookie) { 3411 swi_remove(V_pfsync_swi_cookie); 3412 if_clone_detach(&V_pfsync_cloner); 3413 } 3414 CURVNET_RESTORE(); 3415 } 3416 VNET_LIST_RUNLOCK(); 3417 3418 return (error); 3419} 3420 3421static void 3422pfsync_uninit() 3423{ 3424 VNET_ITERATOR_DECL(vnet_iter); 3425 3426 PF_LOCK(); 3427 pfsync_state_import_ptr = NULL; 3428 pfsync_up_ptr = NULL; 3429 pfsync_insert_state_ptr = NULL; 3430 pfsync_update_state_ptr = NULL; 3431 pfsync_delete_state_ptr = NULL; 3432 pfsync_clear_states_ptr = NULL; 3433 pfsync_state_in_use_ptr = NULL; 3434 pfsync_defer_ptr = NULL; 3435 PF_UNLOCK(); 3436 3437 ipproto_unregister(IPPROTO_PFSYNC); 3438 pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW); 3439 VNET_LIST_RLOCK(); 3440 VNET_FOREACH(vnet_iter) { 3441 CURVNET_SET(vnet_iter); 3442 swi_remove(V_pfsync_swi_cookie); 3443 if_clone_detach(&V_pfsync_cloner); 3444 CURVNET_RESTORE(); 3445 } 3446 VNET_LIST_RUNLOCK(); 3447} 3448 3449static int 3450pfsync_modevent(module_t mod, int type, void *data) 3451{ 3452 int error = 0; 3453 3454 switch (type) { 3455 case MOD_LOAD: 3456 error = pfsync_init(); 3457 break; 3458 case MOD_QUIESCE: 3459 /* 3460 * Module should not be unloaded due to race conditions. 3461 */ 3462 error = EPERM; 3463 break; 3464 case MOD_UNLOAD: 3465 pfsync_uninit(); 3466 break; 3467 default: 3468 error = EINVAL; 3469 break; 3470 } 3471 3472 return (error); 3473} 3474 3475static moduledata_t pfsync_mod = { 3476 "pfsync", 3477 pfsync_modevent, 3478 0 3479}; 3480 3481#define PFSYNC_MODVER 1 3482 3483DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY); 3484MODULE_VERSION(pfsync, PFSYNC_MODVER); 3485MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER); 3486#endif /* __FreeBSD__ */ 3487