Deleted Added
full compact
1/* $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $ */
2
3/*
4 * Copyright (c) 2002 Michael Shalayeff
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org>
31 *
32 * Permission to use, copy, modify, and distribute this software for any
33 * purpose with or without fee is hereby granted, provided that the above
34 * copyright notice and this permission notice appear in all copies.
35 *
36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
43 */
44
45/*
46 * Revisions picked from OpenBSD after revision 1.110 import:
47 * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates
48 * 1.120, 1.175 - use monotonic time_uptime
49 * 1.122 - reduce number of updates for non-TCP sessions
50 * 1.170 - SIOCSIFMTU checks
51 */
52
53#ifdef __FreeBSD__
54#include "opt_inet.h"
55#include "opt_inet6.h"
56#include "opt_pf.h"
57
58#include <sys/cdefs.h>
59__FBSDID("$FreeBSD: head/sys/contrib/pf/net/if_pfsync.c 229777 2012-01-07 14:39:45Z glebius $");
59__FBSDID("$FreeBSD: head/sys/contrib/pf/net/if_pfsync.c 229850 2012-01-09 08:50:22Z glebius $");
60
61#define NBPFILTER 1
62
63#ifdef DEV_PFSYNC
64#define NPFSYNC DEV_PFSYNC
65#else
66#define NPFSYNC 0
67#endif
68#endif /* __FreeBSD__ */
69
70#include <sys/param.h>
71#include <sys/kernel.h>
72#ifdef __FreeBSD__
73#include <sys/bus.h>
74#include <sys/interrupt.h>
75#include <sys/priv.h>
76#endif
77#include <sys/proc.h>
78#include <sys/systm.h>
79#include <sys/time.h>
80#include <sys/mbuf.h>
81#include <sys/socket.h>
82#ifdef __FreeBSD__
83#include <sys/endian.h>
84#include <sys/malloc.h>
85#include <sys/module.h>
86#include <sys/sockio.h>
87#include <sys/taskqueue.h>
88#include <sys/lock.h>
89#include <sys/mutex.h>
90#include <sys/protosw.h>
91#else
92#include <sys/ioctl.h>
93#include <sys/timeout.h>
94#endif
95#include <sys/sysctl.h>
96#ifndef __FreeBSD__
97#include <sys/pool.h>
98#endif
99
100#include <net/if.h>
101#ifdef __FreeBSD__
102#include <net/if_clone.h>
103#endif
104#include <net/if_types.h>
105#include <net/route.h>
106#include <net/bpf.h>
107#include <net/netisr.h>
108#ifdef __FreeBSD__
109#include <net/vnet.h>
110#endif
111
112#include <netinet/in.h>
113#include <netinet/if_ether.h>
114#include <netinet/tcp.h>
115#include <netinet/tcp_seq.h>
116
117#ifdef INET
118#include <netinet/in_systm.h>
119#include <netinet/in_var.h>
120#include <netinet/ip.h>
121#include <netinet/ip_var.h>
122#endif
123
124#ifdef INET6
125#include <netinet6/nd6.h>
126#endif /* INET6 */
127
128#ifdef __FreeBSD__
129#include <netinet/ip_carp.h>
130#else
131#include "carp.h"
132#if NCARP > 0
133#include <netinet/ip_carp.h>
134#endif
135#endif
136
137#include <net/pfvar.h>
138#include <net/if_pfsync.h>
139
140#ifndef __FreeBSD__
141#include "bpfilter.h"
142#include "pfsync.h"
143#endif
144
145#define PFSYNC_MINPKT ( \
146 sizeof(struct ip) + \
147 sizeof(struct pfsync_header) + \
148 sizeof(struct pfsync_subheader) + \
149 sizeof(struct pfsync_eof))
150
151struct pfsync_pkt {
152 struct ip *ip;
153 struct in_addr src;
154 u_int8_t flags;
155};
156
157int pfsync_input_hmac(struct mbuf *, int);
158
159int pfsync_upd_tcp(struct pf_state *, struct pfsync_state_peer *,
160 struct pfsync_state_peer *);
161
162int pfsync_in_clr(struct pfsync_pkt *, struct mbuf *, int, int);
163int pfsync_in_ins(struct pfsync_pkt *, struct mbuf *, int, int);
164int pfsync_in_iack(struct pfsync_pkt *, struct mbuf *, int, int);
165int pfsync_in_upd(struct pfsync_pkt *, struct mbuf *, int, int);
166int pfsync_in_upd_c(struct pfsync_pkt *, struct mbuf *, int, int);
167int pfsync_in_ureq(struct pfsync_pkt *, struct mbuf *, int, int);
168int pfsync_in_del(struct pfsync_pkt *, struct mbuf *, int, int);
169int pfsync_in_del_c(struct pfsync_pkt *, struct mbuf *, int, int);
170int pfsync_in_bus(struct pfsync_pkt *, struct mbuf *, int, int);
171int pfsync_in_tdb(struct pfsync_pkt *, struct mbuf *, int, int);
172int pfsync_in_eof(struct pfsync_pkt *, struct mbuf *, int, int);
173
174int pfsync_in_error(struct pfsync_pkt *, struct mbuf *, int, int);
175
176int (*pfsync_acts[])(struct pfsync_pkt *, struct mbuf *, int, int) = {
177 pfsync_in_clr, /* PFSYNC_ACT_CLR */
178 pfsync_in_ins, /* PFSYNC_ACT_INS */
179 pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */
180 pfsync_in_upd, /* PFSYNC_ACT_UPD */
181 pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */
182 pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */
183 pfsync_in_del, /* PFSYNC_ACT_DEL */
184 pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */
185 pfsync_in_error, /* PFSYNC_ACT_INS_F */
186 pfsync_in_error, /* PFSYNC_ACT_DEL_F */
187 pfsync_in_bus, /* PFSYNC_ACT_BUS */
188 pfsync_in_tdb, /* PFSYNC_ACT_TDB */
189 pfsync_in_eof /* PFSYNC_ACT_EOF */
190};
191
192struct pfsync_q {
193 int (*write)(struct pf_state *, struct mbuf *, int);
194 size_t len;
195 u_int8_t action;
196};
197
198/* we have one of these for every PFSYNC_S_ */
199int pfsync_out_state(struct pf_state *, struct mbuf *, int);
200int pfsync_out_iack(struct pf_state *, struct mbuf *, int);
201int pfsync_out_upd_c(struct pf_state *, struct mbuf *, int);
202int pfsync_out_del(struct pf_state *, struct mbuf *, int);
203
204struct pfsync_q pfsync_qs[] = {
205 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS },
206 { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK },
207 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_UPD },
208 { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C },
209 { pfsync_out_del, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C }
210};
211
212void pfsync_q_ins(struct pf_state *, int);
213void pfsync_q_del(struct pf_state *);
214
215struct pfsync_upd_req_item {
216 TAILQ_ENTRY(pfsync_upd_req_item) ur_entry;
217 struct pfsync_upd_req ur_msg;
218};
219TAILQ_HEAD(pfsync_upd_reqs, pfsync_upd_req_item);
220
221struct pfsync_deferral {
222 TAILQ_ENTRY(pfsync_deferral) pd_entry;
223 struct pf_state *pd_st;
224 struct mbuf *pd_m;
225#ifdef __FreeBSD__
226 struct callout pd_tmo;
227#else
228 struct timeout pd_tmo;
229#endif
230};
231TAILQ_HEAD(pfsync_deferrals, pfsync_deferral);
232
233#define PFSYNC_PLSIZE MAX(sizeof(struct pfsync_upd_req_item), \
234 sizeof(struct pfsync_deferral))
235
236#ifdef notyet
237int pfsync_out_tdb(struct tdb *, struct mbuf *, int);
238#endif
239
240struct pfsync_softc {
241#ifdef __FreeBSD__
242 struct ifnet *sc_ifp;
243#else
244 struct ifnet sc_if;
245#endif
246 struct ifnet *sc_sync_if;
247
248#ifdef __FreeBSD__
249 uma_zone_t sc_pool;
250#else
251 struct pool sc_pool;
252#endif
253
254 struct ip_moptions sc_imo;
255
256 struct in_addr sc_sync_peer;
257 u_int8_t sc_maxupdates;
258#ifdef __FreeBSD__
259 int pfsync_sync_ok;
260#endif
261
262 struct ip sc_template;
263
264 struct pf_state_queue sc_qs[PFSYNC_S_COUNT];
265 size_t sc_len;
266
267 struct pfsync_upd_reqs sc_upd_req_list;
268
269 struct pfsync_deferrals sc_deferrals;
270 u_int sc_deferred;
271
272 void *sc_plus;
273 size_t sc_pluslen;
274
275 u_int32_t sc_ureq_sent;
276 int sc_bulk_tries;
277#ifdef __FreeBSD__
278 struct callout sc_bulkfail_tmo;
279#else
280 struct timeout sc_bulkfail_tmo;
281#endif
282
283 u_int32_t sc_ureq_received;
284 struct pf_state *sc_bulk_next;
285 struct pf_state *sc_bulk_last;
286#ifdef __FreeBSD__
287 struct callout sc_bulk_tmo;
288#else
289 struct timeout sc_bulk_tmo;
290#endif
291
292 TAILQ_HEAD(, tdb) sc_tdb_q;
293
294#ifdef __FreeBSD__
295 struct callout sc_tmo;
296#else
297 struct timeout sc_tmo;
298#endif
298#ifdef __FreeBSD__
299 eventhandler_tag sc_detachtag;
300#endif
301
299};
300
301#ifdef __FreeBSD__
302static MALLOC_DEFINE(M_PFSYNC, "pfsync", "pfsync data");
303static VNET_DEFINE(struct pfsync_softc *, pfsyncif) = NULL;
304#define V_pfsyncif VNET(pfsyncif)
307
305static VNET_DEFINE(void *, pfsync_swi_cookie) = NULL;
306#define V_pfsync_swi_cookie VNET(pfsync_swi_cookie)
307static VNET_DEFINE(struct pfsyncstats, pfsyncstats);
308#define V_pfsyncstats VNET(pfsyncstats)
309static VNET_DEFINE(int, pfsync_carp_adj) = CARP_MAXSKEW;
310#define V_pfsync_carp_adj VNET(pfsync_carp_adj)
311
312static void pfsyncintr(void *);
313static int pfsync_multicast_setup(struct pfsync_softc *);
314static void pfsync_multicast_cleanup(struct pfsync_softc *);
315static int pfsync_init(void);
316static void pfsync_uninit(void);
317
318SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC");
319SYSCTL_VNET_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_RW,
320 &VNET_NAME(pfsyncstats), pfsyncstats,
321 "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)");
322SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_RW,
323 &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment");
324#else
325struct pfsync_softc *pfsyncif = NULL;
326struct pfsyncstats pfsyncstats;
327#define V_pfsyncstats pfsyncstats
328#endif
329
325#ifdef __FreeBSD__
326static void pfsyncintr(void *);
327struct pfsync_swi {
328 void * pfsync_swi_cookie;
329};
330static struct pfsync_swi pfsync_swi;
331#define schednetisr(p) swi_sched(pfsync_swi.pfsync_swi_cookie, 0)
332#define NETISR_PFSYNC
333#endif
334
330void pfsyncattach(int);
331#ifdef __FreeBSD__
332int pfsync_clone_create(struct if_clone *, int, caddr_t);
333void pfsync_clone_destroy(struct ifnet *);
334#else
335int pfsync_clone_create(struct if_clone *, int);
336int pfsync_clone_destroy(struct ifnet *);
337#endif
338int pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
339 struct pf_state_peer *);
340void pfsync_update_net_tdb(struct pfsync_tdb *);
341int pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
342#ifdef __FreeBSD__
343 struct route *);
344#else
345 struct rtentry *);
346#endif
347int pfsyncioctl(struct ifnet *, u_long, caddr_t);
348void pfsyncstart(struct ifnet *);
349
350struct mbuf *pfsync_if_dequeue(struct ifnet *);
351struct mbuf *pfsync_get_mbuf(struct pfsync_softc *);
352
353void pfsync_deferred(struct pf_state *, int);
354void pfsync_undefer(struct pfsync_deferral *, int);
355void pfsync_defer_tmo(void *);
356
357void pfsync_request_update(u_int32_t, u_int64_t);
358void pfsync_update_state_req(struct pf_state *);
359
360void pfsync_drop(struct pfsync_softc *);
361void pfsync_sendout(void);
362void pfsync_send_plus(void *, size_t);
363int pfsync_tdb_sendout(struct pfsync_softc *);
364int pfsync_sendout_mbuf(struct pfsync_softc *, struct mbuf *);
365void pfsync_timeout(void *);
366void pfsync_tdb_timeout(void *);
367void pfsync_send_bus(struct pfsync_softc *, u_int8_t);
368
369void pfsync_bulk_start(void);
370void pfsync_bulk_status(u_int8_t);
371void pfsync_bulk_update(void *);
372void pfsync_bulk_fail(void *);
373
374#ifdef __FreeBSD__
380void pfsync_ifdetach(void *, struct ifnet *);
381
375/* XXX: ugly */
376#define betoh64 (unsigned long long)be64toh
377#define timeout_del callout_stop
378#endif
379
380#define PFSYNC_MAX_BULKTRIES 12
381#ifndef __FreeBSD__
382int pfsync_sync_ok;
383#endif
384
385#ifdef __FreeBSD__
386VNET_DEFINE(struct ifc_simple_data, pfsync_cloner_data);
387VNET_DEFINE(struct if_clone, pfsync_cloner);
388#define V_pfsync_cloner_data VNET(pfsync_cloner_data)
389#define V_pfsync_cloner VNET(pfsync_cloner)
390IFC_SIMPLE_DECLARE(pfsync, 1);
391#else
392struct if_clone pfsync_cloner =
393 IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy);
394#endif
395
396void
397pfsyncattach(int npfsync)
398{
399 if_clone_attach(&pfsync_cloner);
400}
401int
402#ifdef __FreeBSD__
403pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param)
404#else
405pfsync_clone_create(struct if_clone *ifc, int unit)
406#endif
407{
408 struct pfsync_softc *sc;
409 struct ifnet *ifp;
410 int q;
411
412 if (unit != 0)
413 return (EINVAL);
414
418#ifndef __FreeBSD__
415#ifdef __FreeBSD__
416 sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO);
417 sc->pfsync_sync_ok = 1;
418#else
419 pfsync_sync_ok = 1;
420 sc = malloc(sizeof(*pfsyncif), M_DEVBUF, M_NOWAIT | M_ZERO);
421#endif
422
422 sc = malloc(sizeof(struct pfsync_softc), M_DEVBUF, M_NOWAIT | M_ZERO);
423 if (sc == NULL)
424 return (ENOMEM);
425
423 for (q = 0; q < PFSYNC_S_COUNT; q++)
424 TAILQ_INIT(&sc->sc_qs[q]);
425
426#ifdef __FreeBSD__
430 sc->pfsync_sync_ok = 1;
431 sc->sc_pool = uma_zcreate("pfsync", PFSYNC_PLSIZE,
432 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
433 if (sc->sc_pool == NULL) {
434 free(sc, M_DEVBUF);
435 return (ENOMEM);
436 }
427 sc->sc_pool = uma_zcreate("pfsync", PFSYNC_PLSIZE, NULL, NULL, NULL,
428 NULL, UMA_ALIGN_PTR, 0);
429#else
430 pool_init(&sc->sc_pool, PFSYNC_PLSIZE, 0, 0, 0, "pfsync", NULL);
431#endif
432 TAILQ_INIT(&sc->sc_upd_req_list);
433 TAILQ_INIT(&sc->sc_deferrals);
434 sc->sc_deferred = 0;
435
436 TAILQ_INIT(&sc->sc_tdb_q);
437
438 sc->sc_len = PFSYNC_MINPKT;
439 sc->sc_maxupdates = 128;
440
449#ifdef __FreeBSD__
441#ifndef __FreeBSD__
442 sc->sc_imo.imo_membership = (struct in_multi **)malloc(
451 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_DEVBUF,
452 M_NOWAIT | M_ZERO);
453 sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS;
454 sc->sc_imo.imo_multicast_vif = -1;
455#else
456 sc->sc_imo.imo_membership = (struct in_multi **)malloc(
443 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_IPMOPTS,
444 M_WAITOK | M_ZERO);
445 sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS;
446#endif
447
448#ifdef __FreeBSD__
449 ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC);
450 if (ifp == NULL) {
465 free(sc->sc_imo.imo_membership, M_DEVBUF);
451 uma_zdestroy(sc->sc_pool);
467 free(sc, M_DEVBUF);
452 free(sc, M_PFSYNC);
453 return (ENOSPC);
454 }
455 if_initname(ifp, ifc->ifc_name, unit);
471
472 sc->sc_detachtag = EVENTHANDLER_REGISTER(ifnet_departure_event,
473#ifdef __FreeBSD__
474 pfsync_ifdetach, V_pfsyncif, EVENTHANDLER_PRI_ANY);
456#else
476 pfsync_ifdetach, pfsyncif, EVENTHANDLER_PRI_ANY);
477#endif
478 if (sc->sc_detachtag == NULL) {
479 if_free(ifp);
480 free(sc->sc_imo.imo_membership, M_DEVBUF);
481 uma_zdestroy(sc->sc_pool);
482 free(sc, M_DEVBUF);
483 return (ENOSPC);
484 }
485#else
457 ifp = &sc->sc_if;
458 snprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit);
459#endif
460 ifp->if_softc = sc;
461 ifp->if_ioctl = pfsyncioctl;
462 ifp->if_output = pfsyncoutput;
463 ifp->if_start = pfsyncstart;
464 ifp->if_type = IFT_PFSYNC;
465 ifp->if_snd.ifq_maxlen = ifqmaxlen;
466 ifp->if_hdrlen = sizeof(struct pfsync_header);
467 ifp->if_mtu = ETHERMTU;
468#ifdef __FreeBSD__
469 callout_init(&sc->sc_tmo, CALLOUT_MPSAFE);
470 callout_init_mtx(&sc->sc_bulk_tmo, &pf_task_mtx, 0);
471 callout_init(&sc->sc_bulkfail_tmo, CALLOUT_MPSAFE);
472#else
473 timeout_set(&sc->sc_tmo, pfsync_timeout, sc);
474 timeout_set(&sc->sc_bulk_tmo, pfsync_bulk_update, sc);
475 timeout_set(&sc->sc_bulkfail_tmo, pfsync_bulk_fail, sc);
476#endif
477
478 if_attach(ifp);
479#ifndef __FreeBSD__
480 if_alloc_sadl(ifp);
481
482#if NCARP > 0
483 if_addgroup(ifp, "carp");
484#endif
485#endif
486
487#if NBPFILTER > 0
488#ifdef __FreeBSD__
489 bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
490#else
491 bpfattach(&sc->sc_if.if_bpf, ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
492#endif
493#endif
494
495#ifdef __FreeBSD__
496 V_pfsyncif = sc;
497#else
498 pfsyncif = sc;
499#endif
500
501 return (0);
502}
503
504#ifdef __FreeBSD__
505void
506#else
507int
508#endif
509pfsync_clone_destroy(struct ifnet *ifp)
510{
511 struct pfsync_softc *sc = ifp->if_softc;
512
513#ifdef __FreeBSD__
543 EVENTHANDLER_DEREGISTER(ifnet_departure_event, sc->sc_detachtag);
514 PF_LOCK();
515#endif
516 timeout_del(&sc->sc_bulkfail_tmo);
517 timeout_del(&sc->sc_bulk_tmo);
518 timeout_del(&sc->sc_tmo);
519#ifdef __FreeBSD__
520 PF_UNLOCK();
521 if (!sc->pfsync_sync_ok && carp_demote_adj_p)
522 (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy");
523#else
524#if NCARP > 0
525 if (!pfsync_sync_ok)
526 carp_group_demote_adj(&sc->sc_if, -1);
527#endif
528#endif
529#if NBPFILTER > 0
530 bpfdetach(ifp);
531#endif
532 if_detach(ifp);
533
534 pfsync_drop(sc);
535
536 while (sc->sc_deferred > 0)
537 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
538
539#ifdef __FreeBSD__
540 UMA_DESTROY(sc->sc_pool);
541#else
542 pool_destroy(&sc->sc_pool);
543#endif
544#ifdef __FreeBSD__
545 if_free(ifp);
576 free(sc->sc_imo.imo_membership, M_DEVBUF);
546 if (sc->sc_imo.imo_membership)
547 pfsync_multicast_cleanup(sc);
548 free(sc, M_PFSYNC);
549#else
550 free(sc->sc_imo.imo_membership, M_IPMOPTS);
579#endif
551 free(sc, M_DEVBUF);
552#endif
553
554#ifdef __FreeBSD__
555 V_pfsyncif = NULL;
556#else
557 pfsyncif = NULL;
558#endif
559
560#ifndef __FreeBSD__
561 return (0);
562#endif
563}
564
565struct mbuf *
566pfsync_if_dequeue(struct ifnet *ifp)
567{
568 struct mbuf *m;
569#ifndef __FreeBSD__
570 int s;
571#endif
572
573#ifdef __FreeBSD__
574 IF_LOCK(&ifp->if_snd);
575 _IF_DROP(&ifp->if_snd);
576 _IF_DEQUEUE(&ifp->if_snd, m);
577 IF_UNLOCK(&ifp->if_snd);
578#else
579 s = splnet();
580 IF_DEQUEUE(&ifp->if_snd, m);
581 splx(s);
582#endif
583
584 return (m);
585}
586
587/*
588 * Start output on the pfsync interface.
589 */
590void
591pfsyncstart(struct ifnet *ifp)
592{
593 struct mbuf *m;
594
595 while ((m = pfsync_if_dequeue(ifp)) != NULL) {
596#ifndef __FreeBSD__
597 IF_DROP(&ifp->if_snd);
598#endif
599 m_freem(m);
600 }
601}
602
603int
604pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
605 struct pf_state_peer *d)
606{
607 if (s->scrub.scrub_flag && d->scrub == NULL) {
608#ifdef __FreeBSD__
609 d->scrub = pool_get(&V_pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
610#else
611 d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
612#endif
613 if (d->scrub == NULL)
614 return (ENOMEM);
615 }
616
617 return (0);
618}
619
620#ifndef __FreeBSD__
621void
622pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
623{
624 bzero(sp, sizeof(struct pfsync_state));
625
626 /* copy from state key */
627 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
628 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
629 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
630 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
631 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
632 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
633 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
634 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
635 sp->proto = st->key[PF_SK_WIRE]->proto;
636 sp->af = st->key[PF_SK_WIRE]->af;
637
638 /* copy from state */
639 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
640 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
641 sp->creation = htonl(time_uptime - st->creation);
642 sp->expire = pf_state_expires(st);
643 if (sp->expire <= time_second)
644 sp->expire = htonl(0);
645 else
646 sp->expire = htonl(sp->expire - time_second);
647
648 sp->direction = st->direction;
649 sp->log = st->log;
650 sp->timeout = st->timeout;
651 sp->state_flags = st->state_flags;
652 if (st->src_node)
653 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
654 if (st->nat_src_node)
655 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
656
657 bcopy(&st->id, &sp->id, sizeof(sp->id));
658 sp->creatorid = st->creatorid;
659 pf_state_peer_hton(&st->src, &sp->src);
660 pf_state_peer_hton(&st->dst, &sp->dst);
661
662 if (st->rule.ptr == NULL)
663 sp->rule = htonl(-1);
664 else
665 sp->rule = htonl(st->rule.ptr->nr);
666 if (st->anchor.ptr == NULL)
667 sp->anchor = htonl(-1);
668 else
669 sp->anchor = htonl(st->anchor.ptr->nr);
670 if (st->nat_rule.ptr == NULL)
671 sp->nat_rule = htonl(-1);
672 else
673 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
674
675 pf_state_counter_hton(st->packets[0], sp->packets[0]);
676 pf_state_counter_hton(st->packets[1], sp->packets[1]);
677 pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
678 pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
679
680}
681#endif
682
683int
684pfsync_state_import(struct pfsync_state *sp, u_int8_t flags)
685{
686 struct pf_state *st = NULL;
687 struct pf_state_key *skw = NULL, *sks = NULL;
688 struct pf_rule *r = NULL;
689 struct pfi_kif *kif;
690 int pool_flags;
691 int error;
692
693 PF_LOCK_ASSERT();
694
695#ifdef __FreeBSD__
696 if (sp->creatorid == 0 && V_pf_status.debug >= PF_DEBUG_MISC) {
697#else
698 if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) {
699#endif
700 printf("pfsync_state_import: invalid creator id:"
701 " %08x\n", ntohl(sp->creatorid));
702 return (EINVAL);
703 }
704
705 if ((kif = pfi_kif_get(sp->ifname)) == NULL) {
706#ifdef __FreeBSD__
707 if (V_pf_status.debug >= PF_DEBUG_MISC)
708#else
709 if (pf_status.debug >= PF_DEBUG_MISC)
710#endif
711 printf("pfsync_state_import: "
712 "unknown interface: %s\n", sp->ifname);
713 if (flags & PFSYNC_SI_IOCTL)
714 return (EINVAL);
715 return (0); /* skip this state */
716 }
717
718 /*
719 * If the ruleset checksums match or the state is coming from the ioctl,
720 * it's safe to associate the state with the rule of that number.
721 */
722 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
723 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) <
724 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
725 r = pf_main_ruleset.rules[
726 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
727 else
728#ifdef __FreeBSD__
729 r = &V_pf_default_rule;
730#else
731 r = &pf_default_rule;
732#endif
733
734 if ((r->max_states && r->states_cur >= r->max_states))
735 goto cleanup;
736
737#ifdef __FreeBSD__
738 if (flags & PFSYNC_SI_IOCTL)
739 pool_flags = PR_WAITOK | PR_ZERO;
740 else
741 pool_flags = PR_NOWAIT | PR_ZERO;
742
743 if ((st = pool_get(&V_pf_state_pl, pool_flags)) == NULL)
744 goto cleanup;
745#else
746 if (flags & PFSYNC_SI_IOCTL)
747 pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO;
748 else
749 pool_flags = PR_LIMITFAIL | PR_ZERO;
750
751 if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL)
752 goto cleanup;
753#endif
754
755 if ((skw = pf_alloc_state_key(pool_flags)) == NULL)
756 goto cleanup;
757
758 if (PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0],
759 &sp->key[PF_SK_STACK].addr[0], sp->af) ||
760 PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1],
761 &sp->key[PF_SK_STACK].addr[1], sp->af) ||
762 sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] ||
763 sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1]) {
764 if ((sks = pf_alloc_state_key(pool_flags)) == NULL)
765 goto cleanup;
766 } else
767 sks = skw;
768
769 /* allocate memory for scrub info */
770 if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
771 pfsync_alloc_scrub_memory(&sp->dst, &st->dst))
772 goto cleanup;
773
774 /* copy to state key(s) */
775 skw->addr[0] = sp->key[PF_SK_WIRE].addr[0];
776 skw->addr[1] = sp->key[PF_SK_WIRE].addr[1];
777 skw->port[0] = sp->key[PF_SK_WIRE].port[0];
778 skw->port[1] = sp->key[PF_SK_WIRE].port[1];
779 skw->proto = sp->proto;
780 skw->af = sp->af;
781 if (sks != skw) {
782 sks->addr[0] = sp->key[PF_SK_STACK].addr[0];
783 sks->addr[1] = sp->key[PF_SK_STACK].addr[1];
784 sks->port[0] = sp->key[PF_SK_STACK].port[0];
785 sks->port[1] = sp->key[PF_SK_STACK].port[1];
786 sks->proto = sp->proto;
787 sks->af = sp->af;
788 }
789
790 /* copy to state */
791 bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr));
792 st->creation = time_uptime - ntohl(sp->creation);
793 st->expire = time_second;
794 if (sp->expire) {
795 /* XXX No adaptive scaling. */
796 st->expire -= r->timeout[sp->timeout] - ntohl(sp->expire);
797 }
798
799 st->expire = ntohl(sp->expire) + time_second;
800 st->direction = sp->direction;
801 st->log = sp->log;
802 st->timeout = sp->timeout;
803 st->state_flags = sp->state_flags;
804
805 bcopy(sp->id, &st->id, sizeof(st->id));
806 st->creatorid = sp->creatorid;
807 pf_state_peer_ntoh(&sp->src, &st->src);
808 pf_state_peer_ntoh(&sp->dst, &st->dst);
809
810 st->rule.ptr = r;
811 st->nat_rule.ptr = NULL;
812 st->anchor.ptr = NULL;
813 st->rt_kif = NULL;
814
815 st->pfsync_time = time_uptime;
816 st->sync_state = PFSYNC_S_NONE;
817
818 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
819 r->states_cur++;
820 r->states_tot++;
821
822 if (!ISSET(flags, PFSYNC_SI_IOCTL))
823 SET(st->state_flags, PFSTATE_NOSYNC);
824
825 if ((error = pf_state_insert(kif, skw, sks, st)) != 0) {
826 /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */
827 r->states_cur--;
828 goto cleanup_state;
829 }
830
831 if (!ISSET(flags, PFSYNC_SI_IOCTL)) {
832 CLR(st->state_flags, PFSTATE_NOSYNC);
833 if (ISSET(st->state_flags, PFSTATE_ACK)) {
834 pfsync_q_ins(st, PFSYNC_S_IACK);
835#ifdef __FreeBSD__
836 pfsync_sendout();
837#else
838 schednetisr(NETISR_PFSYNC);
839#endif
840 }
841 }
842 CLR(st->state_flags, PFSTATE_ACK);
843
844 return (0);
845
846cleanup:
847 error = ENOMEM;
848 if (skw == sks)
849 sks = NULL;
850#ifdef __FreeBSD__
851 if (skw != NULL)
852 pool_put(&V_pf_state_key_pl, skw);
853 if (sks != NULL)
854 pool_put(&V_pf_state_key_pl, sks);
855#else
856 if (skw != NULL)
857 pool_put(&pf_state_key_pl, skw);
858 if (sks != NULL)
859 pool_put(&pf_state_key_pl, sks);
860#endif
861
862cleanup_state: /* pf_state_insert frees the state keys */
863 if (st) {
864#ifdef __FreeBSD__
865 if (st->dst.scrub)
866 pool_put(&V_pf_state_scrub_pl, st->dst.scrub);
867 if (st->src.scrub)
868 pool_put(&V_pf_state_scrub_pl, st->src.scrub);
869 pool_put(&V_pf_state_pl, st);
870#else
871 if (st->dst.scrub)
872 pool_put(&pf_state_scrub_pl, st->dst.scrub);
873 if (st->src.scrub)
874 pool_put(&pf_state_scrub_pl, st->src.scrub);
875 pool_put(&pf_state_pl, st);
876#endif
877 }
878 return (error);
879}
880
881void
882#ifdef __FreeBSD__
883pfsync_input(struct mbuf *m, __unused int off)
884#else
885pfsync_input(struct mbuf *m, ...)
886#endif
887{
888#ifdef __FreeBSD__
889 struct pfsync_softc *sc = V_pfsyncif;
890#else
891 struct pfsync_softc *sc = pfsyncif;
892#endif
893 struct pfsync_pkt pkt;
894 struct ip *ip = mtod(m, struct ip *);
895 struct pfsync_header *ph;
896 struct pfsync_subheader subh;
897
898 int offset;
899 int rv;
900
901 V_pfsyncstats.pfsyncs_ipackets++;
902
903 /* verify that we have a sync interface configured */
904#ifdef __FreeBSD__
905 if (!sc || !sc->sc_sync_if || !V_pf_status.running)
906#else
907 if (!sc || !sc->sc_sync_if || !pf_status.running)
908#endif
909 goto done;
910
911 /* verify that the packet came in on the right interface */
912 if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
913 V_pfsyncstats.pfsyncs_badif++;
914 goto done;
915 }
916
917#ifdef __FreeBSD__
918 sc->sc_ifp->if_ipackets++;
919 sc->sc_ifp->if_ibytes += m->m_pkthdr.len;
920#else
921 sc->sc_if.if_ipackets++;
922 sc->sc_if.if_ibytes += m->m_pkthdr.len;
923#endif
924 /* verify that the IP TTL is 255. */
925 if (ip->ip_ttl != PFSYNC_DFLTTL) {
926 V_pfsyncstats.pfsyncs_badttl++;
927 goto done;
928 }
929
930 offset = ip->ip_hl << 2;
931 if (m->m_pkthdr.len < offset + sizeof(*ph)) {
932 V_pfsyncstats.pfsyncs_hdrops++;
933 goto done;
934 }
935
936 if (offset + sizeof(*ph) > m->m_len) {
937 if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
938 V_pfsyncstats.pfsyncs_hdrops++;
939 return;
940 }
941 ip = mtod(m, struct ip *);
942 }
943 ph = (struct pfsync_header *)((char *)ip + offset);
944
945 /* verify the version */
946 if (ph->version != PFSYNC_VERSION) {
947 V_pfsyncstats.pfsyncs_badver++;
948 goto done;
949 }
950
951#if 0
952 if (pfsync_input_hmac(m, offset) != 0) {
953 /* XXX stats */
954 goto done;
955 }
956#endif
957
958 /* Cheaper to grab this now than having to mess with mbufs later */
959 pkt.ip = ip;
960 pkt.src = ip->ip_src;
961 pkt.flags = 0;
962
963#ifdef __FreeBSD__
964 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
965#else
966 if (!bcmp(&ph->pfcksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
967#endif
968 pkt.flags |= PFSYNC_SI_CKSUM;
969
970 offset += sizeof(*ph);
971 for (;;) {
972 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
973 offset += sizeof(subh);
974
975 if (subh.action >= PFSYNC_ACT_MAX) {
976 V_pfsyncstats.pfsyncs_badact++;
977 goto done;
978 }
979
980 rv = (*pfsync_acts[subh.action])(&pkt, m, offset,
981 ntohs(subh.count));
982 if (rv == -1)
983 return;
984
985 offset += rv;
986 }
987
988done:
989 m_freem(m);
990}
991
992int
993pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
994{
995 struct pfsync_clr *clr;
996 struct mbuf *mp;
997 int len = sizeof(*clr) * count;
998 int i, offp;
999
1000 struct pf_state *st, *nexts;
1001 struct pf_state_key *sk, *nextsk;
1002 struct pf_state_item *si;
1003 u_int32_t creatorid;
1004 int s;
1005
1006 mp = m_pulldown(m, offset, len, &offp);
1007 if (mp == NULL) {
1008 V_pfsyncstats.pfsyncs_badlen++;
1009 return (-1);
1010 }
1011 clr = (struct pfsync_clr *)(mp->m_data + offp);
1012
1013 s = splsoftnet();
1014#ifdef __FreeBSD__
1015 PF_LOCK();
1016#endif
1017 for (i = 0; i < count; i++) {
1018 creatorid = clr[i].creatorid;
1019
1020 if (clr[i].ifname[0] == '\0') {
1021#ifdef __FreeBSD__
1022 for (st = RB_MIN(pf_state_tree_id, &V_tree_id);
1023 st; st = nexts) {
1024 nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, st);
1025#else
1026 for (st = RB_MIN(pf_state_tree_id, &tree_id);
1027 st; st = nexts) {
1028 nexts = RB_NEXT(pf_state_tree_id, &tree_id, st);
1029#endif
1030 if (st->creatorid == creatorid) {
1031 SET(st->state_flags, PFSTATE_NOSYNC);
1032 pf_unlink_state(st);
1033 }
1034 }
1035 } else {
1036 if (pfi_kif_get(clr[i].ifname) == NULL)
1037 continue;
1038
1039 /* XXX correct? */
1040#ifdef __FreeBSD__
1041 for (sk = RB_MIN(pf_state_tree, &V_pf_statetbl);
1042#else
1043 for (sk = RB_MIN(pf_state_tree, &pf_statetbl);
1044#endif
1045 sk; sk = nextsk) {
1046 nextsk = RB_NEXT(pf_state_tree,
1047#ifdef __FreeBSD__
1048 &V_pf_statetbl, sk);
1049#else
1050 &pf_statetbl, sk);
1051#endif
1052 TAILQ_FOREACH(si, &sk->states, entry) {
1053 if (si->s->creatorid == creatorid) {
1054 SET(si->s->state_flags,
1055 PFSTATE_NOSYNC);
1056 pf_unlink_state(si->s);
1057 }
1058 }
1059 }
1060 }
1061 }
1062#ifdef __FreeBSD__
1063 PF_UNLOCK();
1064#endif
1065 splx(s);
1066
1067 return (len);
1068}
1069
1070int
1071pfsync_in_ins(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1072{
1073 struct mbuf *mp;
1074 struct pfsync_state *sa, *sp;
1075 int len = sizeof(*sp) * count;
1076 int i, offp;
1077
1078 int s;
1079
1080 mp = m_pulldown(m, offset, len, &offp);
1081 if (mp == NULL) {
1082 V_pfsyncstats.pfsyncs_badlen++;
1083 return (-1);
1084 }
1085 sa = (struct pfsync_state *)(mp->m_data + offp);
1086
1087 s = splsoftnet();
1088#ifdef __FreeBSD__
1089 PF_LOCK();
1090#endif
1091 for (i = 0; i < count; i++) {
1092 sp = &sa[i];
1093
1094 /* check for invalid values */
1095 if (sp->timeout >= PFTM_MAX ||
1096 sp->src.state > PF_TCPS_PROXY_DST ||
1097 sp->dst.state > PF_TCPS_PROXY_DST ||
1098 sp->direction > PF_OUT ||
1099 (sp->af != AF_INET && sp->af != AF_INET6)) {
1100#ifdef __FreeBSD__
1101 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1102#else
1103 if (pf_status.debug >= PF_DEBUG_MISC) {
1104#endif
1105 printf("pfsync_input: PFSYNC5_ACT_INS: "
1106 "invalid value\n");
1107 }
1108 V_pfsyncstats.pfsyncs_badval++;
1109 continue;
1110 }
1111
1112 if (pfsync_state_import(sp, pkt->flags) == ENOMEM) {
1113 /* drop out, but process the rest of the actions */
1114 break;
1115 }
1116 }
1117#ifdef __FreeBSD__
1118 PF_UNLOCK();
1119#endif
1120 splx(s);
1121
1122 return (len);
1123}
1124
1125int
1126pfsync_in_iack(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1127{
1128 struct pfsync_ins_ack *ia, *iaa;
1129 struct pf_state_cmp id_key;
1130 struct pf_state *st;
1131
1132 struct mbuf *mp;
1133 int len = count * sizeof(*ia);
1134 int offp, i;
1135 int s;
1136
1137 mp = m_pulldown(m, offset, len, &offp);
1138 if (mp == NULL) {
1139 V_pfsyncstats.pfsyncs_badlen++;
1140 return (-1);
1141 }
1142 iaa = (struct pfsync_ins_ack *)(mp->m_data + offp);
1143
1144 s = splsoftnet();
1145#ifdef __FreeBSD__
1146 PF_LOCK();
1147#endif
1148 for (i = 0; i < count; i++) {
1149 ia = &iaa[i];
1150
1151 bcopy(&ia->id, &id_key.id, sizeof(id_key.id));
1152 id_key.creatorid = ia->creatorid;
1153
1154 st = pf_find_state_byid(&id_key);
1155 if (st == NULL)
1156 continue;
1157
1158 if (ISSET(st->state_flags, PFSTATE_ACK))
1159 pfsync_deferred(st, 0);
1160 }
1161#ifdef __FreeBSD__
1162 PF_UNLOCK();
1163#endif
1164 splx(s);
1165 /*
1166 * XXX this is not yet implemented, but we know the size of the
1167 * message so we can skip it.
1168 */
1169
1170 return (count * sizeof(struct pfsync_ins_ack));
1171}
1172
1173int
1174pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src,
1175 struct pfsync_state_peer *dst)
1176{
1177 int sfail = 0;
1178
1179 /*
1180 * The state should never go backwards except
1181 * for syn-proxy states. Neither should the
1182 * sequence window slide backwards.
1183 */
1184 if (st->src.state > src->state &&
1185 (st->src.state < PF_TCPS_PROXY_SRC ||
1186 src->state >= PF_TCPS_PROXY_SRC))
1187 sfail = 1;
1188 else if (SEQ_GT(st->src.seqlo, ntohl(src->seqlo)))
1189 sfail = 3;
1190 else if (st->dst.state > dst->state) {
1191 /* There might still be useful
1192 * information about the src state here,
1193 * so import that part of the update,
1194 * then "fail" so we send the updated
1195 * state back to the peer who is missing
1196 * our what we know. */
1197 pf_state_peer_ntoh(src, &st->src);
1198 /* XXX do anything with timeouts? */
1199 sfail = 7;
1200 } else if (st->dst.state >= TCPS_SYN_SENT &&
1201 SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo)))
1202 sfail = 4;
1203
1204 return (sfail);
1205}
1206
1207int
1208pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1209{
1210 struct pfsync_state *sa, *sp;
1211 struct pf_state_cmp id_key;
1212 struct pf_state_key *sk;
1213 struct pf_state *st;
1214 int sfail;
1215
1216 struct mbuf *mp;
1217 int len = count * sizeof(*sp);
1218 int offp, i;
1219 int s;
1220
1221 mp = m_pulldown(m, offset, len, &offp);
1222 if (mp == NULL) {
1223 V_pfsyncstats.pfsyncs_badlen++;
1224 return (-1);
1225 }
1226 sa = (struct pfsync_state *)(mp->m_data + offp);
1227
1228 s = splsoftnet();
1229#ifdef __FreeBSD__
1230 PF_LOCK();
1231#endif
1232 for (i = 0; i < count; i++) {
1233 sp = &sa[i];
1234
1235 /* check for invalid values */
1236 if (sp->timeout >= PFTM_MAX ||
1237 sp->src.state > PF_TCPS_PROXY_DST ||
1238 sp->dst.state > PF_TCPS_PROXY_DST) {
1239#ifdef __FreeBSD__
1240 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1241#else
1242 if (pf_status.debug >= PF_DEBUG_MISC) {
1243#endif
1244 printf("pfsync_input: PFSYNC_ACT_UPD: "
1245 "invalid value\n");
1246 }
1247 V_pfsyncstats.pfsyncs_badval++;
1248 continue;
1249 }
1250
1251 bcopy(sp->id, &id_key.id, sizeof(id_key.id));
1252 id_key.creatorid = sp->creatorid;
1253
1254 st = pf_find_state_byid(&id_key);
1255 if (st == NULL) {
1256 /* insert the update */
1257 if (pfsync_state_import(sp, 0))
1258 V_pfsyncstats.pfsyncs_badstate++;
1259 continue;
1260 }
1261
1262 if (ISSET(st->state_flags, PFSTATE_ACK))
1263 pfsync_deferred(st, 1);
1264
1265 sk = st->key[PF_SK_WIRE]; /* XXX right one? */
1266 sfail = 0;
1267 if (sk->proto == IPPROTO_TCP)
1268 sfail = pfsync_upd_tcp(st, &sp->src, &sp->dst);
1269 else {
1270 /*
1271 * Non-TCP protocol state machine always go
1272 * forwards
1273 */
1274 if (st->src.state > sp->src.state)
1275 sfail = 5;
1276 else if (st->dst.state > sp->dst.state)
1277 sfail = 6;
1278 }
1279
1280 if (sfail) {
1281#ifdef __FreeBSD__
1282 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1283#else
1284 if (pf_status.debug >= PF_DEBUG_MISC) {
1285#endif
1286 printf("pfsync: %s stale update (%d)"
1287 " id: %016llx creatorid: %08x\n",
1288 (sfail < 7 ? "ignoring" : "partial"),
1289 sfail, betoh64(st->id),
1290 ntohl(st->creatorid));
1291 }
1292 V_pfsyncstats.pfsyncs_stale++;
1293
1294 pfsync_update_state(st);
1295#ifdef __FreeBSD__
1296 pfsync_sendout();
1297#else
1298 schednetisr(NETISR_PFSYNC);
1299#endif
1300 continue;
1301 }
1302 pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
1303 pf_state_peer_ntoh(&sp->src, &st->src);
1304 pf_state_peer_ntoh(&sp->dst, &st->dst);
1305 st->expire = ntohl(sp->expire) + time_second;
1306 st->timeout = sp->timeout;
1307 st->pfsync_time = time_uptime;
1308 }
1309#ifdef __FreeBSD__
1310 PF_UNLOCK();
1311#endif
1312 splx(s);
1313
1314 return (len);
1315}
1316
1317int
1318pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1319{
1320 struct pfsync_upd_c *ua, *up;
1321 struct pf_state_key *sk;
1322 struct pf_state_cmp id_key;
1323 struct pf_state *st;
1324
1325 int len = count * sizeof(*up);
1326 int sfail;
1327
1328 struct mbuf *mp;
1329 int offp, i;
1330 int s;
1331
1332 mp = m_pulldown(m, offset, len, &offp);
1333 if (mp == NULL) {
1334 V_pfsyncstats.pfsyncs_badlen++;
1335 return (-1);
1336 }
1337 ua = (struct pfsync_upd_c *)(mp->m_data + offp);
1338
1339 s = splsoftnet();
1340#ifdef __FreeBSD__
1341 PF_LOCK();
1342#endif
1343 for (i = 0; i < count; i++) {
1344 up = &ua[i];
1345
1346 /* check for invalid values */
1347 if (up->timeout >= PFTM_MAX ||
1348 up->src.state > PF_TCPS_PROXY_DST ||
1349 up->dst.state > PF_TCPS_PROXY_DST) {
1350#ifdef __FreeBSD__
1351 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1352#else
1353 if (pf_status.debug >= PF_DEBUG_MISC) {
1354#endif
1355 printf("pfsync_input: "
1356 "PFSYNC_ACT_UPD_C: "
1357 "invalid value\n");
1358 }
1359 V_pfsyncstats.pfsyncs_badval++;
1360 continue;
1361 }
1362
1363 bcopy(&up->id, &id_key.id, sizeof(id_key.id));
1364 id_key.creatorid = up->creatorid;
1365
1366 st = pf_find_state_byid(&id_key);
1367 if (st == NULL) {
1368 /* We don't have this state. Ask for it. */
1369 pfsync_request_update(id_key.creatorid, id_key.id);
1370 continue;
1371 }
1372
1373 if (ISSET(st->state_flags, PFSTATE_ACK))
1374 pfsync_deferred(st, 1);
1375
1376 sk = st->key[PF_SK_WIRE]; /* XXX right one? */
1377 sfail = 0;
1378 if (sk->proto == IPPROTO_TCP)
1379 sfail = pfsync_upd_tcp(st, &up->src, &up->dst);
1380 else {
1381 /*
1382 * Non-TCP protocol state machine always go forwards
1383 */
1384 if (st->src.state > up->src.state)
1385 sfail = 5;
1386 else if (st->dst.state > up->dst.state)
1387 sfail = 6;
1388 }
1389
1390 if (sfail) {
1391#ifdef __FreeBSD__
1392 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1393#else
1394 if (pf_status.debug >= PF_DEBUG_MISC) {
1395#endif
1396 printf("pfsync: ignoring stale update "
1397 "(%d) id: %016llx "
1398 "creatorid: %08x\n", sfail,
1399 betoh64(st->id),
1400 ntohl(st->creatorid));
1401 }
1402 V_pfsyncstats.pfsyncs_stale++;
1403
1404 pfsync_update_state(st);
1405#ifdef __FreeBSD__
1406 pfsync_sendout();
1407#else
1408 schednetisr(NETISR_PFSYNC);
1409#endif
1410 continue;
1411 }
1412 pfsync_alloc_scrub_memory(&up->dst, &st->dst);
1413 pf_state_peer_ntoh(&up->src, &st->src);
1414 pf_state_peer_ntoh(&up->dst, &st->dst);
1415 st->expire = ntohl(up->expire) + time_second;
1416 st->timeout = up->timeout;
1417 st->pfsync_time = time_uptime;
1418 }
1419#ifdef __FreeBSD__
1420 PF_UNLOCK();
1421#endif
1422 splx(s);
1423
1424 return (len);
1425}
1426
1427int
1428pfsync_in_ureq(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1429{
1430 struct pfsync_upd_req *ur, *ura;
1431 struct mbuf *mp;
1432 int len = count * sizeof(*ur);
1433 int i, offp;
1434
1435 struct pf_state_cmp id_key;
1436 struct pf_state *st;
1437
1438 mp = m_pulldown(m, offset, len, &offp);
1439 if (mp == NULL) {
1440 V_pfsyncstats.pfsyncs_badlen++;
1441 return (-1);
1442 }
1443 ura = (struct pfsync_upd_req *)(mp->m_data + offp);
1444
1445 for (i = 0; i < count; i++) {
1446 ur = &ura[i];
1447
1448 bcopy(&ur->id, &id_key.id, sizeof(id_key.id));
1449 id_key.creatorid = ur->creatorid;
1450
1451 if (id_key.id == 0 && id_key.creatorid == 0)
1452 pfsync_bulk_start();
1453 else {
1454 st = pf_find_state_byid(&id_key);
1455 if (st == NULL) {
1456 V_pfsyncstats.pfsyncs_badstate++;
1457 continue;
1458 }
1459 if (ISSET(st->state_flags, PFSTATE_NOSYNC))
1460 continue;
1461
1462 PF_LOCK();
1463 pfsync_update_state_req(st);
1464 PF_UNLOCK();
1465 }
1466 }
1467
1468 return (len);
1469}
1470
1471int
1472pfsync_in_del(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1473{
1474 struct mbuf *mp;
1475 struct pfsync_state *sa, *sp;
1476 struct pf_state_cmp id_key;
1477 struct pf_state *st;
1478 int len = count * sizeof(*sp);
1479 int offp, i;
1480 int s;
1481
1482 mp = m_pulldown(m, offset, len, &offp);
1483 if (mp == NULL) {
1484 V_pfsyncstats.pfsyncs_badlen++;
1485 return (-1);
1486 }
1487 sa = (struct pfsync_state *)(mp->m_data + offp);
1488
1489 s = splsoftnet();
1490#ifdef __FreeBSD__
1491 PF_LOCK();
1492#endif
1493 for (i = 0; i < count; i++) {
1494 sp = &sa[i];
1495
1496 bcopy(sp->id, &id_key.id, sizeof(id_key.id));
1497 id_key.creatorid = sp->creatorid;
1498
1499 st = pf_find_state_byid(&id_key);
1500 if (st == NULL) {
1501 V_pfsyncstats.pfsyncs_badstate++;
1502 continue;
1503 }
1504 SET(st->state_flags, PFSTATE_NOSYNC);
1505 pf_unlink_state(st);
1506 }
1507#ifdef __FreeBSD__
1508 PF_UNLOCK();
1509#endif
1510 splx(s);
1511
1512 return (len);
1513}
1514
1515int
1516pfsync_in_del_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1517{
1518 struct mbuf *mp;
1519 struct pfsync_del_c *sa, *sp;
1520 struct pf_state_cmp id_key;
1521 struct pf_state *st;
1522 int len = count * sizeof(*sp);
1523 int offp, i;
1524 int s;
1525
1526 mp = m_pulldown(m, offset, len, &offp);
1527 if (mp == NULL) {
1528 V_pfsyncstats.pfsyncs_badlen++;
1529 return (-1);
1530 }
1531 sa = (struct pfsync_del_c *)(mp->m_data + offp);
1532
1533 s = splsoftnet();
1534#ifdef __FreeBSD__
1535 PF_LOCK();
1536#endif
1537 for (i = 0; i < count; i++) {
1538 sp = &sa[i];
1539
1540 bcopy(&sp->id, &id_key.id, sizeof(id_key.id));
1541 id_key.creatorid = sp->creatorid;
1542
1543 st = pf_find_state_byid(&id_key);
1544 if (st == NULL) {
1545 V_pfsyncstats.pfsyncs_badstate++;
1546 continue;
1547 }
1548
1549 SET(st->state_flags, PFSTATE_NOSYNC);
1550 pf_unlink_state(st);
1551 }
1552#ifdef __FreeBSD__
1553 PF_UNLOCK();
1554#endif
1555 splx(s);
1556
1557 return (len);
1558}
1559
1560int
1561pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1562{
1563#ifdef __FreeBSD__
1564 struct pfsync_softc *sc = V_pfsyncif;
1565#else
1566 struct pfsync_softc *sc = pfsyncif;
1567#endif
1568 struct pfsync_bus *bus;
1569 struct mbuf *mp;
1570 int len = count * sizeof(*bus);
1571 int offp;
1572
1573 /* If we're not waiting for a bulk update, who cares. */
1574 if (sc->sc_ureq_sent == 0)
1575 return (len);
1576
1577 mp = m_pulldown(m, offset, len, &offp);
1578 if (mp == NULL) {
1579 V_pfsyncstats.pfsyncs_badlen++;
1580 return (-1);
1581 }
1582 bus = (struct pfsync_bus *)(mp->m_data + offp);
1583
1584 switch (bus->status) {
1585 case PFSYNC_BUS_START:
1586#ifdef __FreeBSD__
1587 callout_reset(&sc->sc_bulkfail_tmo, 4 * hz +
1588 V_pf_pool_limits[PF_LIMIT_STATES].limit /
1589 ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) /
1590 sizeof(struct pfsync_state)),
1591 pfsync_bulk_fail, V_pfsyncif);
1592#else
1593 timeout_add(&sc->sc_bulkfail_tmo, 4 * hz +
1594 pf_pool_limits[PF_LIMIT_STATES].limit /
1595 ((sc->sc_if.if_mtu - PFSYNC_MINPKT) /
1596 sizeof(struct pfsync_state)));
1597#endif
1598#ifdef __FreeBSD__
1599 if (V_pf_status.debug >= PF_DEBUG_MISC)
1600#else
1601 if (pf_status.debug >= PF_DEBUG_MISC)
1602#endif
1603 printf("pfsync: received bulk update start\n");
1604 break;
1605
1606 case PFSYNC_BUS_END:
1607 if (time_uptime - ntohl(bus->endtime) >=
1608 sc->sc_ureq_sent) {
1609 /* that's it, we're happy */
1610 sc->sc_ureq_sent = 0;
1611 sc->sc_bulk_tries = 0;
1612 timeout_del(&sc->sc_bulkfail_tmo);
1613#ifdef __FreeBSD__
1614 if (!sc->pfsync_sync_ok && carp_demote_adj_p)
1615 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
1616 "pfsync bulk done");
1617 sc->pfsync_sync_ok = 1;
1618#else
1619#if NCARP > 0
1620 if (!pfsync_sync_ok)
1621 carp_group_demote_adj(&sc->sc_if, -1);
1622#endif
1623 pfsync_sync_ok = 1;
1624#endif
1625#ifdef __FreeBSD__
1626 if (V_pf_status.debug >= PF_DEBUG_MISC)
1627#else
1628 if (pf_status.debug >= PF_DEBUG_MISC)
1629#endif
1630 printf("pfsync: received valid "
1631 "bulk update end\n");
1632 } else {
1633#ifdef __FreeBSD__
1634 if (V_pf_status.debug >= PF_DEBUG_MISC)
1635#else
1636 if (pf_status.debug >= PF_DEBUG_MISC)
1637#endif
1638 printf("pfsync: received invalid "
1639 "bulk update end: bad timestamp\n");
1640 }
1641 break;
1642 }
1643
1644 return (len);
1645}
1646
1647int
1648pfsync_in_tdb(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1649{
1650 int len = count * sizeof(struct pfsync_tdb);
1651
1652#if defined(IPSEC)
1653 struct pfsync_tdb *tp;
1654 struct mbuf *mp;
1655 int offp;
1656 int i;
1657 int s;
1658
1659 mp = m_pulldown(m, offset, len, &offp);
1660 if (mp == NULL) {
1661 V_pfsyncstats.pfsyncs_badlen++;
1662 return (-1);
1663 }
1664 tp = (struct pfsync_tdb *)(mp->m_data + offp);
1665
1666 s = splsoftnet();
1667#ifdef __FreeBSD__
1668 PF_LOCK();
1669#endif
1670 for (i = 0; i < count; i++)
1671 pfsync_update_net_tdb(&tp[i]);
1672#ifdef __FreeBSD__
1673 PF_UNLOCK();
1674#endif
1675 splx(s);
1676#endif
1677
1678 return (len);
1679}
1680
1681#if defined(IPSEC)
1682/* Update an in-kernel tdb. Silently fail if no tdb is found. */
1683void
1684pfsync_update_net_tdb(struct pfsync_tdb *pt)
1685{
1686 struct tdb *tdb;
1687 int s;
1688
1689 /* check for invalid values */
1690 if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
1691 (pt->dst.sa.sa_family != AF_INET &&
1692 pt->dst.sa.sa_family != AF_INET6))
1693 goto bad;
1694
1695 s = spltdb();
1696 tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
1697 if (tdb) {
1698 pt->rpl = ntohl(pt->rpl);
1699 pt->cur_bytes = betoh64(pt->cur_bytes);
1700
1701 /* Neither replay nor byte counter should ever decrease. */
1702 if (pt->rpl < tdb->tdb_rpl ||
1703 pt->cur_bytes < tdb->tdb_cur_bytes) {
1704 splx(s);
1705 goto bad;
1706 }
1707
1708 tdb->tdb_rpl = pt->rpl;
1709 tdb->tdb_cur_bytes = pt->cur_bytes;
1710 }
1711 splx(s);
1712 return;
1713
1714bad:
1715#ifdef __FreeBSD__
1716 if (V_pf_status.debug >= PF_DEBUG_MISC)
1717#else
1718 if (pf_status.debug >= PF_DEBUG_MISC)
1719#endif
1720 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
1721 "invalid value\n");
1722 V_pfsyncstats.pfsyncs_badstate++;
1723 return;
1724}
1725#endif
1726
1727
1728int
1729pfsync_in_eof(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1730{
1731 /* check if we are at the right place in the packet */
1732 if (offset != m->m_pkthdr.len - sizeof(struct pfsync_eof))
1733 V_pfsyncstats.pfsyncs_badact++;
1734
1735 /* we're done. free and let the caller return */
1736 m_freem(m);
1737 return (-1);
1738}
1739
1740int
1741pfsync_in_error(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1742{
1743 V_pfsyncstats.pfsyncs_badact++;
1744
1745 m_freem(m);
1746 return (-1);
1747}
1748
1749int
1750pfsyncoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
1751#ifdef __FreeBSD__
1752 struct route *rt)
1753#else
1754 struct rtentry *rt)
1755#endif
1756{
1757 m_freem(m);
1758 return (0);
1759}
1760
1761/* ARGSUSED */
1762int
1763pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1764{
1765#ifndef __FreeBSD__
1766 struct proc *p = curproc;
1767#endif
1768 struct pfsync_softc *sc = ifp->if_softc;
1769 struct ifreq *ifr = (struct ifreq *)data;
1770 struct ip_moptions *imo = &sc->sc_imo;
1771 struct pfsyncreq pfsyncr;
1772 struct ifnet *sifp;
1773 struct ip *ip;
1774 int s, error;
1775
1776 switch (cmd) {
1777#if 0
1778 case SIOCSIFADDR:
1779 case SIOCAIFADDR:
1780 case SIOCSIFDSTADDR:
1781#endif
1782 case SIOCSIFFLAGS:
1783#ifdef __FreeBSD__
1784 if (ifp->if_flags & IFF_UP)
1785 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1786 else
1787 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1788#else
1789 if (ifp->if_flags & IFF_UP)
1790 ifp->if_flags |= IFF_RUNNING;
1791 else
1792 ifp->if_flags &= ~IFF_RUNNING;
1793#endif
1794 break;
1795 case SIOCSIFMTU:
1796 if (!sc->sc_sync_if ||
1797 ifr->ifr_mtu <= PFSYNC_MINPKT ||
1798 ifr->ifr_mtu > sc->sc_sync_if->if_mtu)
1799 return (EINVAL);
1800 if (ifr->ifr_mtu < ifp->if_mtu) {
1801 s = splnet();
1802#ifdef __FreeBSD__
1803 PF_LOCK();
1804#endif
1805 pfsync_sendout();
1806#ifdef __FreeBSD__
1807 PF_UNLOCK();
1808#endif
1809 splx(s);
1810 }
1811 ifp->if_mtu = ifr->ifr_mtu;
1812 break;
1813 case SIOCGETPFSYNC:
1814 bzero(&pfsyncr, sizeof(pfsyncr));
1815 if (sc->sc_sync_if) {
1816 strlcpy(pfsyncr.pfsyncr_syncdev,
1817 sc->sc_sync_if->if_xname, IFNAMSIZ);
1818 }
1819 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
1820 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
1821 return (copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr)));
1822
1823 case SIOCSETPFSYNC:
1824#ifdef __FreeBSD__
1825 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
1826#else
1827 if ((error = suser(p, p->p_acflag)) != 0)
1828#endif
1829 return (error);
1830 if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr))))
1831 return (error);
1832
1833#ifdef __FreeBSD__
1834 PF_LOCK();
1835#endif
1836 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
1837#ifdef __FreeBSD__
1838 sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP);
1839#else
1840 sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
1841#endif
1842 else
1843 sc->sc_sync_peer.s_addr =
1844 pfsyncr.pfsyncr_syncpeer.s_addr;
1845
1846 if (pfsyncr.pfsyncr_maxupdates > 255)
1847#ifdef __FreeBSD__
1848 {
1849 PF_UNLOCK();
1850#endif
1851 return (EINVAL);
1852#ifdef __FreeBSD__
1853 }
1854#endif
1855 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
1856
1857 if (pfsyncr.pfsyncr_syncdev[0] == 0) {
1858 sc->sc_sync_if = NULL;
1859#ifdef __FreeBSD__
1860 PF_UNLOCK();
1889#endif
1861 if (imo->imo_membership)
1862 pfsync_multicast_cleanup(sc);
1863#else
1864 if (imo->imo_num_memberships > 0) {
1865 in_delmulti(imo->imo_membership[
1866 --imo->imo_num_memberships]);
1867 imo->imo_multicast_ifp = NULL;
1868 }
1869#endif
1870 break;
1871 }
1872
1873#ifdef __FreeBSD__
1874 PF_UNLOCK();
1875#endif
1876 if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL)
1877 return (EINVAL);
1878
1879#ifdef __FreeBSD__
1880 PF_LOCK();
1881#endif
1882 s = splnet();
1883#ifdef __FreeBSD__
1884 if (sifp->if_mtu < sc->sc_ifp->if_mtu ||
1885#else
1886 if (sifp->if_mtu < sc->sc_if.if_mtu ||
1887#endif
1888 (sc->sc_sync_if != NULL &&
1889 sifp->if_mtu < sc->sc_sync_if->if_mtu) ||
1890 sifp->if_mtu < MCLBYTES - sizeof(struct ip))
1891 pfsync_sendout();
1892 sc->sc_sync_if = sifp;
1893
1919 if (imo->imo_num_memberships > 0) {
1894#ifdef __FreeBSD__
1895 if (imo->imo_membership) {
1896 PF_UNLOCK();
1922#endif
1923 in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
1924#ifdef __FreeBSD__
1897 pfsync_multicast_cleanup(sc);
1898 PF_LOCK();
1926#endif
1899 }
1900#else
1901 if (imo->imo_num_memberships > 0) {
1902 in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
1903 imo->imo_multicast_ifp = NULL;
1904 }
1905#endif
1906
1930 if (sc->sc_sync_if &&
1907#ifdef __FreeBSD__
1908 if (sc->sc_sync_if &&
1909 sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
1910 PF_UNLOCK();
1911 error = pfsync_multicast_setup(sc);
1912 if (error)
1913 return (error);
1914 PF_LOCK();
1915 }
1916#else
1917 if (sc->sc_sync_if &&
1918 sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
1935#endif
1919 struct in_addr addr;
1920
1921 if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) {
1922 sc->sc_sync_if = NULL;
1940#ifdef __FreeBSD__
1941 PF_UNLOCK();
1942#endif
1923 splx(s);
1924 return (EADDRNOTAVAIL);
1925 }
1926
1947#ifdef __FreeBSD__
1948 addr.s_addr = htonl(INADDR_PFSYNC_GROUP);
1949#else
1927 addr.s_addr = INADDR_PFSYNC_GROUP;
1951#endif
1928
1953#ifdef __FreeBSD__
1954 PF_UNLOCK();
1955#endif
1929 if ((imo->imo_membership[0] =
1930 in_addmulti(&addr, sc->sc_sync_if)) == NULL) {
1931 sc->sc_sync_if = NULL;
1932 splx(s);
1933 return (ENOBUFS);
1934 }
1962#ifdef __FreeBSD__
1963 PF_LOCK();
1964#endif
1935 imo->imo_num_memberships++;
1936 imo->imo_multicast_ifp = sc->sc_sync_if;
1937 imo->imo_multicast_ttl = PFSYNC_DFLTTL;
1938 imo->imo_multicast_loop = 0;
1939 }
1940#endif /* !__FreeBSD__ */
1941
1942 ip = &sc->sc_template;
1943 bzero(ip, sizeof(*ip));
1944 ip->ip_v = IPVERSION;
1945 ip->ip_hl = sizeof(sc->sc_template) >> 2;
1946 ip->ip_tos = IPTOS_LOWDELAY;
1947 /* len and id are set later */
1948#ifdef __FreeBSD__
1949 ip->ip_off = IP_DF;
1950#else
1951 ip->ip_off = htons(IP_DF);
1952#endif
1953 ip->ip_ttl = PFSYNC_DFLTTL;
1954 ip->ip_p = IPPROTO_PFSYNC;
1955 ip->ip_src.s_addr = INADDR_ANY;
1956 ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr;
1957
1958 if (sc->sc_sync_if) {
1959 /* Request a full state table update. */
1960 sc->sc_ureq_sent = time_uptime;
1961#ifdef __FreeBSD__
1962 if (sc->pfsync_sync_ok && carp_demote_adj_p)
1963 (*carp_demote_adj_p)(V_pfsync_carp_adj,
1964 "pfsync bulk start");
1965 sc->pfsync_sync_ok = 0;
1966#else
1967#if NCARP > 0
1968 if (pfsync_sync_ok)
1969 carp_group_demote_adj(&sc->sc_if, 1);
1970#endif
1971 pfsync_sync_ok = 0;
1972#endif
1973#ifdef __FreeBSD__
1974 if (V_pf_status.debug >= PF_DEBUG_MISC)
1975#else
1976 if (pf_status.debug >= PF_DEBUG_MISC)
1977#endif
1978 printf("pfsync: requesting bulk update\n");
1979#ifdef __FreeBSD__
1980 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
1981 pfsync_bulk_fail, V_pfsyncif);
1982#else
1983 timeout_add_sec(&sc->sc_bulkfail_tmo, 5);
1984#endif
1985 pfsync_request_update(0, 0);
1986 }
1987#ifdef __FreeBSD__
1988 PF_UNLOCK();
1989#endif
1990 splx(s);
1991
1992 break;
1993
1994 default:
1995 return (ENOTTY);
1996 }
1997
1998 return (0);
1999}
2000
2001int
2002pfsync_out_state(struct pf_state *st, struct mbuf *m, int offset)
2003{
2004 struct pfsync_state *sp = (struct pfsync_state *)(m->m_data + offset);
2005
2006 pfsync_state_export(sp, st);
2007
2008 return (sizeof(*sp));
2009}
2010
2011int
2012pfsync_out_iack(struct pf_state *st, struct mbuf *m, int offset)
2013{
2014 struct pfsync_ins_ack *iack =
2015 (struct pfsync_ins_ack *)(m->m_data + offset);
2016
2017 iack->id = st->id;
2018 iack->creatorid = st->creatorid;
2019
2020 return (sizeof(*iack));
2021}
2022
2023int
2024pfsync_out_upd_c(struct pf_state *st, struct mbuf *m, int offset)
2025{
2026 struct pfsync_upd_c *up = (struct pfsync_upd_c *)(m->m_data + offset);
2027
2028 up->id = st->id;
2029 pf_state_peer_hton(&st->src, &up->src);
2030 pf_state_peer_hton(&st->dst, &up->dst);
2031 up->creatorid = st->creatorid;
2032
2033 up->expire = pf_state_expires(st);
2034 if (up->expire <= time_second)
2035 up->expire = htonl(0);
2036 else
2037 up->expire = htonl(up->expire - time_second);
2038 up->timeout = st->timeout;
2039
2040 bzero(up->_pad, sizeof(up->_pad)); /* XXX */
2041
2042 return (sizeof(*up));
2043}
2044
2045int
2046pfsync_out_del(struct pf_state *st, struct mbuf *m, int offset)
2047{
2048 struct pfsync_del_c *dp = (struct pfsync_del_c *)(m->m_data + offset);
2049
2050 dp->id = st->id;
2051 dp->creatorid = st->creatorid;
2052
2053 SET(st->state_flags, PFSTATE_NOSYNC);
2054
2055 return (sizeof(*dp));
2056}
2057
2058void
2059pfsync_drop(struct pfsync_softc *sc)
2060{
2061 struct pf_state *st;
2062 struct pfsync_upd_req_item *ur;
2063#ifdef notyet
2064 struct tdb *t;
2065#endif
2066 int q;
2067
2068 for (q = 0; q < PFSYNC_S_COUNT; q++) {
2069 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2070 continue;
2071
2072 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) {
2073#ifdef PFSYNC_DEBUG
2074#ifdef __FreeBSD__
2075 KASSERT(st->sync_state == q,
2076 ("%s: st->sync_state == q",
2077 __FUNCTION__));
2078#else
2079 KASSERT(st->sync_state == q);
2080#endif
2081#endif
2082 st->sync_state = PFSYNC_S_NONE;
2083 }
2084 TAILQ_INIT(&sc->sc_qs[q]);
2085 }
2086
2087 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
2088 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
2089 pool_put(&sc->sc_pool, ur);
2090 }
2091
2092 sc->sc_plus = NULL;
2093
2094#ifdef notyet
2095 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) {
2096 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry)
2097 CLR(t->tdb_flags, TDBF_PFSYNC);
2098
2099 TAILQ_INIT(&sc->sc_tdb_q);
2100 }
2101#endif
2102
2103 sc->sc_len = PFSYNC_MINPKT;
2104}
2105
2106void
2107pfsync_sendout(void)
2108{
2109#ifdef __FreeBSD__
2110 struct pfsync_softc *sc = V_pfsyncif;
2111#else
2112 struct pfsync_softc *sc = pfsyncif;
2113#endif
2114#if NBPFILTER > 0
2115#ifdef __FreeBSD__
2116 struct ifnet *ifp = sc->sc_ifp;
2117#else
2118 struct ifnet *ifp = &sc->sc_if;
2119#endif
2120#endif
2121 struct mbuf *m;
2122 struct ip *ip;
2123 struct pfsync_header *ph;
2124 struct pfsync_subheader *subh;
2125 struct pf_state *st;
2126 struct pfsync_upd_req_item *ur;
2127#ifdef notyet
2128 struct tdb *t;
2129#endif
2130#ifdef __FreeBSD__
2131 size_t pktlen;
2132 int dummy_error;
2133#endif
2134 int offset;
2135 int q, count = 0;
2136
2137#ifdef __FreeBSD__
2138 PF_LOCK_ASSERT();
2139#else
2140 splassert(IPL_NET);
2141#endif
2142
2143 if (sc == NULL || sc->sc_len == PFSYNC_MINPKT)
2144 return;
2145
2146#if NBPFILTER > 0
2147 if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) {
2148#else
2149 if (sc->sc_sync_if == NULL) {
2150#endif
2151 pfsync_drop(sc);
2152 return;
2153 }
2154
2155 MGETHDR(m, M_DONTWAIT, MT_DATA);
2156 if (m == NULL) {
2157#ifdef __FreeBSD__
2158 sc->sc_ifp->if_oerrors++;
2159#else
2160 sc->sc_if.if_oerrors++;
2161#endif
2162 V_pfsyncstats.pfsyncs_onomem++;
2163 pfsync_drop(sc);
2164 return;
2165 }
2166
2167#ifdef __FreeBSD__
2168 pktlen = max_linkhdr + sc->sc_len;
2169 if (pktlen > MHLEN) {
2170 /* Find the right pool to allocate from. */
2171 /* XXX: This is ugly. */
2172 m_cljget(m, M_DONTWAIT, pktlen <= MSIZE ? MSIZE :
2173 pktlen <= MCLBYTES ? MCLBYTES :
2174#if MJUMPAGESIZE != MCLBYTES
2175 pktlen <= MJUMPAGESIZE ? MJUMPAGESIZE :
2176#endif
2177 pktlen <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES);
2178#else
2179 if (max_linkhdr + sc->sc_len > MHLEN) {
2180 MCLGETI(m, M_DONTWAIT, NULL, max_linkhdr + sc->sc_len);
2181#endif
2182 if (!ISSET(m->m_flags, M_EXT)) {
2183 m_free(m);
2184#ifdef __FreeBSD__
2185 sc->sc_ifp->if_oerrors++;
2186#else
2187 sc->sc_if.if_oerrors++;
2188#endif
2189 V_pfsyncstats.pfsyncs_onomem++;
2190 pfsync_drop(sc);
2191 return;
2192 }
2193 }
2194 m->m_data += max_linkhdr;
2195 m->m_len = m->m_pkthdr.len = sc->sc_len;
2196
2197 /* build the ip header */
2198 ip = (struct ip *)m->m_data;
2199 bcopy(&sc->sc_template, ip, sizeof(*ip));
2200 offset = sizeof(*ip);
2201
2202#ifdef __FreeBSD__
2203 ip->ip_len = m->m_pkthdr.len;
2204#else
2205 ip->ip_len = htons(m->m_pkthdr.len);
2206#endif
2207 ip->ip_id = htons(ip_randomid());
2208
2209 /* build the pfsync header */
2210 ph = (struct pfsync_header *)(m->m_data + offset);
2211 bzero(ph, sizeof(*ph));
2212 offset += sizeof(*ph);
2213
2214 ph->version = PFSYNC_VERSION;
2215 ph->len = htons(sc->sc_len - sizeof(*ip));
2216#ifdef __FreeBSD__
2217 bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
2218#else
2219 bcopy(pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
2220#endif
2221
2222 /* walk the queues */
2223 for (q = 0; q < PFSYNC_S_COUNT; q++) {
2224 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2225 continue;
2226
2227 subh = (struct pfsync_subheader *)(m->m_data + offset);
2228 offset += sizeof(*subh);
2229
2230 count = 0;
2231 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) {
2232#ifdef PFSYNC_DEBUG
2233#ifdef __FreeBSD__
2234 KASSERT(st->sync_state == q,
2235 ("%s: st->sync_state == q",
2236 __FUNCTION__));
2237#else
2238 KASSERT(st->sync_state == q);
2239#endif
2240#endif
2241
2242 offset += pfsync_qs[q].write(st, m, offset);
2243 st->sync_state = PFSYNC_S_NONE;
2244 count++;
2245 }
2246 TAILQ_INIT(&sc->sc_qs[q]);
2247
2248 bzero(subh, sizeof(*subh));
2249 subh->action = pfsync_qs[q].action;
2250 subh->count = htons(count);
2251 }
2252
2253 if (!TAILQ_EMPTY(&sc->sc_upd_req_list)) {
2254 subh = (struct pfsync_subheader *)(m->m_data + offset);
2255 offset += sizeof(*subh);
2256
2257 count = 0;
2258 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
2259 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
2260
2261 bcopy(&ur->ur_msg, m->m_data + offset,
2262 sizeof(ur->ur_msg));
2263 offset += sizeof(ur->ur_msg);
2264
2265 pool_put(&sc->sc_pool, ur);
2266
2267 count++;
2268 }
2269
2270 bzero(subh, sizeof(*subh));
2271 subh->action = PFSYNC_ACT_UPD_REQ;
2272 subh->count = htons(count);
2273 }
2274
2275 /* has someone built a custom region for us to add? */
2276 if (sc->sc_plus != NULL) {
2277 bcopy(sc->sc_plus, m->m_data + offset, sc->sc_pluslen);
2278 offset += sc->sc_pluslen;
2279
2280 sc->sc_plus = NULL;
2281 }
2282
2283#ifdef notyet
2284 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) {
2285 subh = (struct pfsync_subheader *)(m->m_data + offset);
2286 offset += sizeof(*subh);
2287
2288 count = 0;
2289 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry) {
2290 offset += pfsync_out_tdb(t, m, offset);
2291 CLR(t->tdb_flags, TDBF_PFSYNC);
2292
2293 count++;
2294 }
2295 TAILQ_INIT(&sc->sc_tdb_q);
2296
2297 bzero(subh, sizeof(*subh));
2298 subh->action = PFSYNC_ACT_TDB;
2299 subh->count = htons(count);
2300 }
2301#endif
2302
2303 subh = (struct pfsync_subheader *)(m->m_data + offset);
2304 offset += sizeof(*subh);
2305
2306 bzero(subh, sizeof(*subh));
2307 subh->action = PFSYNC_ACT_EOF;
2308 subh->count = htons(1);
2309
2310 /* XXX write checksum in EOF here */
2311
2312 /* we're done, let's put it on the wire */
2313#if NBPFILTER > 0
2314 if (ifp->if_bpf) {
2315 m->m_data += sizeof(*ip);
2316 m->m_len = m->m_pkthdr.len = sc->sc_len - sizeof(*ip);
2317#ifdef __FreeBSD__
2318 BPF_MTAP(ifp, m);
2319#else
2320 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2321#endif
2322 m->m_data -= sizeof(*ip);
2323 m->m_len = m->m_pkthdr.len = sc->sc_len;
2324 }
2325
2326 if (sc->sc_sync_if == NULL) {
2327 sc->sc_len = PFSYNC_MINPKT;
2328 m_freem(m);
2329 return;
2330 }
2331#endif
2332
2333#ifdef __FreeBSD__
2334 sc->sc_ifp->if_opackets++;
2335 sc->sc_ifp->if_obytes += m->m_pkthdr.len;
2336 sc->sc_len = PFSYNC_MINPKT;
2337
2338 IFQ_ENQUEUE(&sc->sc_ifp->if_snd, m, dummy_error);
2368 schednetisr(NETISR_PFSYNC);
2339 swi_sched(V_pfsync_swi_cookie, 0);
2340#else
2341 sc->sc_if.if_opackets++;
2342 sc->sc_if.if_obytes += m->m_pkthdr.len;
2343
2344 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL) == 0)
2345 pfsyncstats.pfsyncs_opackets++;
2346 else
2347 pfsyncstats.pfsyncs_oerrors++;
2348
2349 /* start again */
2350 sc->sc_len = PFSYNC_MINPKT;
2351#endif
2352}
2353
2354void
2355pfsync_insert_state(struct pf_state *st)
2356{
2357#ifdef __FreeBSD__
2358 struct pfsync_softc *sc = V_pfsyncif;
2359#else
2360 struct pfsync_softc *sc = pfsyncif;
2361#endif
2362
2363#ifdef __FreeBSD__
2364 PF_LOCK_ASSERT();
2365#else
2366 splassert(IPL_SOFTNET);
2367#endif
2368
2369 if (ISSET(st->rule.ptr->rule_flag, PFRULE_NOSYNC) ||
2370 st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) {
2371 SET(st->state_flags, PFSTATE_NOSYNC);
2372 return;
2373 }
2374
2375 if (sc == NULL || ISSET(st->state_flags, PFSTATE_NOSYNC))
2376 return;
2377
2378#ifdef PFSYNC_DEBUG
2379#ifdef __FreeBSD__
2380 KASSERT(st->sync_state == PFSYNC_S_NONE,
2381 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__));
2382#else
2383 KASSERT(st->sync_state == PFSYNC_S_NONE);
2384#endif
2385#endif
2386
2387 if (sc->sc_len == PFSYNC_MINPKT)
2388#ifdef __FreeBSD__
2389 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2390 V_pfsyncif);
2391#else
2392 timeout_add_sec(&sc->sc_tmo, 1);
2393#endif
2394
2395 pfsync_q_ins(st, PFSYNC_S_INS);
2396
2397 if (ISSET(st->state_flags, PFSTATE_ACK))
2398#ifdef __FreeBSD__
2399 pfsync_sendout();
2400#else
2401 schednetisr(NETISR_PFSYNC);
2402#endif
2403 else
2404 st->sync_updates = 0;
2405}
2406
2407int defer = 10;
2408
2409int
2410pfsync_defer(struct pf_state *st, struct mbuf *m)
2411{
2412#ifdef __FreeBSD__
2413 struct pfsync_softc *sc = V_pfsyncif;
2414#else
2415 struct pfsync_softc *sc = pfsyncif;
2416#endif
2417 struct pfsync_deferral *pd;
2418
2419#ifdef __FreeBSD__
2420 PF_LOCK_ASSERT();
2421#else
2422 splassert(IPL_SOFTNET);
2423#endif
2424
2425 if (sc->sc_deferred >= 128)
2426 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
2427
2428 pd = pool_get(&sc->sc_pool, M_NOWAIT);
2429 if (pd == NULL)
2430 return (0);
2431 sc->sc_deferred++;
2432
2433#ifdef __FreeBSD__
2434 m->m_flags |= M_SKIP_FIREWALL;
2435#else
2436 m->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
2437#endif
2438 SET(st->state_flags, PFSTATE_ACK);
2439
2440 pd->pd_st = st;
2441 pd->pd_m = m;
2442
2443 TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry);
2444#ifdef __FreeBSD__
2445 callout_init(&pd->pd_tmo, CALLOUT_MPSAFE);
2446 callout_reset(&pd->pd_tmo, defer, pfsync_defer_tmo,
2447 pd);
2448#else
2449 timeout_set(&pd->pd_tmo, pfsync_defer_tmo, pd);
2450 timeout_add(&pd->pd_tmo, defer);
2451#endif
2452
2453 return (1);
2454}
2455
2456void
2457pfsync_undefer(struct pfsync_deferral *pd, int drop)
2458{
2459#ifdef __FreeBSD__
2460 struct pfsync_softc *sc = V_pfsyncif;
2461#else
2462 struct pfsync_softc *sc = pfsyncif;
2463#endif
2464 int s;
2465
2466#ifdef __FreeBSD__
2467 PF_LOCK_ASSERT();
2468#else
2469 splassert(IPL_SOFTNET);
2470#endif
2471
2472 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry);
2473 sc->sc_deferred--;
2474
2475 CLR(pd->pd_st->state_flags, PFSTATE_ACK);
2476 timeout_del(&pd->pd_tmo); /* bah */
2477 if (drop)
2478 m_freem(pd->pd_m);
2479 else {
2480 s = splnet();
2481#ifdef __FreeBSD__
2482 /* XXX: use pf_defered?! */
2483 PF_UNLOCK();
2484#endif
2485 ip_output(pd->pd_m, (void *)NULL, (void *)NULL, 0,
2486 (void *)NULL, (void *)NULL);
2487#ifdef __FreeBSD__
2488 PF_LOCK();
2489#endif
2490 splx(s);
2491 }
2492
2493 pool_put(&sc->sc_pool, pd);
2494}
2495
2496void
2497pfsync_defer_tmo(void *arg)
2498{
2499#if defined(__FreeBSD__) && defined(VIMAGE)
2500 struct pfsync_deferral *pd = arg;
2501#endif
2502 int s;
2503
2504 s = splsoftnet();
2505#ifdef __FreeBSD__
2506 CURVNET_SET(pd->pd_m->m_pkthdr.rcvif->if_vnet); /* XXX */
2507 PF_LOCK();
2508#endif
2509 pfsync_undefer(arg, 0);
2510#ifdef __FreeBSD__
2511 PF_UNLOCK();
2512 CURVNET_RESTORE();
2513#endif
2514 splx(s);
2515}
2516
2517void
2518pfsync_deferred(struct pf_state *st, int drop)
2519{
2520#ifdef __FreeBSD__
2521 struct pfsync_softc *sc = V_pfsyncif;
2522#else
2523 struct pfsync_softc *sc = pfsyncif;
2524#endif
2525 struct pfsync_deferral *pd;
2526
2527 TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) {
2528 if (pd->pd_st == st) {
2529 pfsync_undefer(pd, drop);
2530 return;
2531 }
2532 }
2533
2534 panic("pfsync_send_deferred: unable to find deferred state");
2535}
2536
2537u_int pfsync_upds = 0;
2538
2539void
2540pfsync_update_state(struct pf_state *st)
2541{
2542#ifdef __FreeBSD__
2543 struct pfsync_softc *sc = V_pfsyncif;
2544#else
2545 struct pfsync_softc *sc = pfsyncif;
2546#endif
2547 int sync = 0;
2548
2549#ifdef __FreeBSD__
2550 PF_LOCK_ASSERT();
2551#else
2552 splassert(IPL_SOFTNET);
2553#endif
2554
2555 if (sc == NULL)
2556 return;
2557
2558 if (ISSET(st->state_flags, PFSTATE_ACK))
2559 pfsync_deferred(st, 0);
2560 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2561 if (st->sync_state != PFSYNC_S_NONE)
2562 pfsync_q_del(st);
2563 return;
2564 }
2565
2566 if (sc->sc_len == PFSYNC_MINPKT)
2567#ifdef __FreeBSD__
2568 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2569 V_pfsyncif);
2570#else
2571 timeout_add_sec(&sc->sc_tmo, 1);
2572#endif
2573
2574 switch (st->sync_state) {
2575 case PFSYNC_S_UPD_C:
2576 case PFSYNC_S_UPD:
2577 case PFSYNC_S_INS:
2578 /* we're already handling it */
2579
2580 if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) {
2581 st->sync_updates++;
2582 if (st->sync_updates >= sc->sc_maxupdates)
2583 sync = 1;
2584 }
2585 break;
2586
2587 case PFSYNC_S_IACK:
2588 pfsync_q_del(st);
2589 case PFSYNC_S_NONE:
2590 pfsync_q_ins(st, PFSYNC_S_UPD_C);
2591 st->sync_updates = 0;
2592 break;
2593
2594 default:
2595 panic("pfsync_update_state: unexpected sync state %d",
2596 st->sync_state);
2597 }
2598
2599 if (sync || (time_uptime - st->pfsync_time) < 2) {
2600 pfsync_upds++;
2601#ifdef __FreeBSD__
2602 pfsync_sendout();
2603#else
2604 schednetisr(NETISR_PFSYNC);
2605#endif
2606 }
2607}
2608
2609void
2610pfsync_request_update(u_int32_t creatorid, u_int64_t id)
2611{
2612#ifdef __FreeBSD__
2613 struct pfsync_softc *sc = V_pfsyncif;
2614#else
2615 struct pfsync_softc *sc = pfsyncif;
2616#endif
2617 struct pfsync_upd_req_item *item;
2618 size_t nlen = sizeof(struct pfsync_upd_req);
2619 int s;
2620
2621 PF_LOCK_ASSERT();
2622
2623 /*
2624 * this code does nothing to prevent multiple update requests for the
2625 * same state being generated.
2626 */
2627
2628 item = pool_get(&sc->sc_pool, PR_NOWAIT);
2629 if (item == NULL) {
2630 /* XXX stats */
2631 return;
2632 }
2633
2634 item->ur_msg.id = id;
2635 item->ur_msg.creatorid = creatorid;
2636
2637 if (TAILQ_EMPTY(&sc->sc_upd_req_list))
2638 nlen += sizeof(struct pfsync_subheader);
2639
2640#ifdef __FreeBSD__
2641 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
2642#else
2643 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2644#endif
2645 s = splnet();
2646 pfsync_sendout();
2647 splx(s);
2648
2649 nlen = sizeof(struct pfsync_subheader) +
2650 sizeof(struct pfsync_upd_req);
2651 }
2652
2653 TAILQ_INSERT_TAIL(&sc->sc_upd_req_list, item, ur_entry);
2654 sc->sc_len += nlen;
2655
2656#ifdef __FreeBSD__
2657 pfsync_sendout();
2658#else
2659 schednetisr(NETISR_PFSYNC);
2660#endif
2661}
2662
2663void
2664pfsync_update_state_req(struct pf_state *st)
2665{
2666#ifdef __FreeBSD__
2667 struct pfsync_softc *sc = V_pfsyncif;
2668#else
2669 struct pfsync_softc *sc = pfsyncif;
2670#endif
2671
2672 PF_LOCK_ASSERT();
2673
2674 if (sc == NULL)
2675 panic("pfsync_update_state_req: nonexistant instance");
2676
2677 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2678 if (st->sync_state != PFSYNC_S_NONE)
2679 pfsync_q_del(st);
2680 return;
2681 }
2682
2683 switch (st->sync_state) {
2684 case PFSYNC_S_UPD_C:
2685 case PFSYNC_S_IACK:
2686 pfsync_q_del(st);
2687 case PFSYNC_S_NONE:
2688 pfsync_q_ins(st, PFSYNC_S_UPD);
2689#ifdef __FreeBSD__
2690 pfsync_sendout();
2691#else
2692 schednetisr(NETISR_PFSYNC);
2693#endif
2694 return;
2695
2696 case PFSYNC_S_INS:
2697 case PFSYNC_S_UPD:
2698 case PFSYNC_S_DEL:
2699 /* we're already handling it */
2700 return;
2701
2702 default:
2703 panic("pfsync_update_state_req: unexpected sync state %d",
2704 st->sync_state);
2705 }
2706}
2707
2708void
2709pfsync_delete_state(struct pf_state *st)
2710{
2711#ifdef __FreeBSD__
2712 struct pfsync_softc *sc = V_pfsyncif;
2713#else
2714 struct pfsync_softc *sc = pfsyncif;
2715#endif
2716
2717#ifdef __FreeBSD__
2718 PF_LOCK_ASSERT();
2719#else
2720 splassert(IPL_SOFTNET);
2721#endif
2722
2723 if (sc == NULL)
2724 return;
2725
2726 if (ISSET(st->state_flags, PFSTATE_ACK))
2727 pfsync_deferred(st, 1);
2728 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2729 if (st->sync_state != PFSYNC_S_NONE)
2730 pfsync_q_del(st);
2731 return;
2732 }
2733
2734 if (sc->sc_len == PFSYNC_MINPKT)
2735#ifdef __FreeBSD__
2736 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2737 V_pfsyncif);
2738#else
2739 timeout_add_sec(&sc->sc_tmo, 1);
2740#endif
2741
2742 switch (st->sync_state) {
2743 case PFSYNC_S_INS:
2744 /* we never got to tell the world so just forget about it */
2745 pfsync_q_del(st);
2746 return;
2747
2748 case PFSYNC_S_UPD_C:
2749 case PFSYNC_S_UPD:
2750 case PFSYNC_S_IACK:
2751 pfsync_q_del(st);
2752 /* FALLTHROUGH to putting it on the del list */
2753
2754 case PFSYNC_S_NONE:
2755 pfsync_q_ins(st, PFSYNC_S_DEL);
2756 return;
2757
2758 default:
2759 panic("pfsync_delete_state: unexpected sync state %d",
2760 st->sync_state);
2761 }
2762}
2763
2764void
2765pfsync_clear_states(u_int32_t creatorid, const char *ifname)
2766{
2767 struct {
2768 struct pfsync_subheader subh;
2769 struct pfsync_clr clr;
2770 } __packed r;
2771
2772#ifdef __FreeBSD__
2773 struct pfsync_softc *sc = V_pfsyncif;
2774#else
2775 struct pfsync_softc *sc = pfsyncif;
2776#endif
2777
2778#ifdef __FreeBSD__
2779 PF_LOCK_ASSERT();
2780#else
2781 splassert(IPL_SOFTNET);
2782#endif
2783
2784 if (sc == NULL)
2785 return;
2786
2787 bzero(&r, sizeof(r));
2788
2789 r.subh.action = PFSYNC_ACT_CLR;
2790 r.subh.count = htons(1);
2791
2792 strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname));
2793 r.clr.creatorid = creatorid;
2794
2795 pfsync_send_plus(&r, sizeof(r));
2796}
2797
2798void
2799pfsync_q_ins(struct pf_state *st, int q)
2800{
2801#ifdef __FreeBSD__
2802 struct pfsync_softc *sc = V_pfsyncif;
2803#else
2804 struct pfsync_softc *sc = pfsyncif;
2805#endif
2806 size_t nlen = pfsync_qs[q].len;
2807 int s;
2808
2809 PF_LOCK_ASSERT();
2810
2811#ifdef __FreeBSD__
2812 KASSERT(st->sync_state == PFSYNC_S_NONE,
2813 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__));
2814#else
2815 KASSERT(st->sync_state == PFSYNC_S_NONE);
2816#endif
2817
2818#if 1 || defined(PFSYNC_DEBUG)
2819 if (sc->sc_len < PFSYNC_MINPKT)
2820#ifdef __FreeBSD__
2821 panic("pfsync pkt len is too low %zu", sc->sc_len);
2822#else
2823 panic("pfsync pkt len is too low %d", sc->sc_len);
2824#endif
2825#endif
2826 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2827 nlen += sizeof(struct pfsync_subheader);
2828
2829#ifdef __FreeBSD__
2830 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
2831#else
2832 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2833#endif
2834 s = splnet();
2835 pfsync_sendout();
2836 splx(s);
2837
2838 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len;
2839 }
2840
2841 sc->sc_len += nlen;
2842 TAILQ_INSERT_TAIL(&sc->sc_qs[q], st, sync_list);
2843 st->sync_state = q;
2844}
2845
2846void
2847pfsync_q_del(struct pf_state *st)
2848{
2849#ifdef __FreeBSD__
2850 struct pfsync_softc *sc = V_pfsyncif;
2851#else
2852 struct pfsync_softc *sc = pfsyncif;
2853#endif
2854 int q = st->sync_state;
2855
2856#ifdef __FreeBSD__
2857 KASSERT(st->sync_state != PFSYNC_S_NONE,
2858 ("%s: st->sync_state != PFSYNC_S_NONE", __FUNCTION__));
2859#else
2860 KASSERT(st->sync_state != PFSYNC_S_NONE);
2861#endif
2862
2863 sc->sc_len -= pfsync_qs[q].len;
2864 TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list);
2865 st->sync_state = PFSYNC_S_NONE;
2866
2867 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2868 sc->sc_len -= sizeof(struct pfsync_subheader);
2869}
2870
2871#ifdef notyet
2872void
2873pfsync_update_tdb(struct tdb *t, int output)
2874{
2875#ifdef __FreeBSD__
2876 struct pfsync_softc *sc = V_pfsyncif;
2877#else
2878 struct pfsync_softc *sc = pfsyncif;
2879#endif
2880 size_t nlen = sizeof(struct pfsync_tdb);
2881 int s;
2882
2883 if (sc == NULL)
2884 return;
2885
2886 if (!ISSET(t->tdb_flags, TDBF_PFSYNC)) {
2887 if (TAILQ_EMPTY(&sc->sc_tdb_q))
2888 nlen += sizeof(struct pfsync_subheader);
2889
2890 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2891 s = splnet();
2892 PF_LOCK();
2893 pfsync_sendout();
2894 PF_UNLOCK();
2895 splx(s);
2896
2897 nlen = sizeof(struct pfsync_subheader) +
2898 sizeof(struct pfsync_tdb);
2899 }
2900
2901 sc->sc_len += nlen;
2902 TAILQ_INSERT_TAIL(&sc->sc_tdb_q, t, tdb_sync_entry);
2903 SET(t->tdb_flags, TDBF_PFSYNC);
2904 t->tdb_updates = 0;
2905 } else {
2906 if (++t->tdb_updates >= sc->sc_maxupdates)
2907 schednetisr(NETISR_PFSYNC);
2908 }
2909
2910 if (output)
2911 SET(t->tdb_flags, TDBF_PFSYNC_RPL);
2912 else
2913 CLR(t->tdb_flags, TDBF_PFSYNC_RPL);
2914}
2915
2916void
2917pfsync_delete_tdb(struct tdb *t)
2918{
2919#ifdef __FreeBSD__
2920 struct pfsync_softc *sc = V_pfsyncif;
2921#else
2922 struct pfsync_softc *sc = pfsyncif;
2923#endif
2924
2925 if (sc == NULL || !ISSET(t->tdb_flags, TDBF_PFSYNC))
2926 return;
2927
2928 sc->sc_len -= sizeof(struct pfsync_tdb);
2929 TAILQ_REMOVE(&sc->sc_tdb_q, t, tdb_sync_entry);
2930 CLR(t->tdb_flags, TDBF_PFSYNC);
2931
2932 if (TAILQ_EMPTY(&sc->sc_tdb_q))
2933 sc->sc_len -= sizeof(struct pfsync_subheader);
2934}
2935
2936int
2937pfsync_out_tdb(struct tdb *t, struct mbuf *m, int offset)
2938{
2939 struct pfsync_tdb *ut = (struct pfsync_tdb *)(m->m_data + offset);
2940
2941 bzero(ut, sizeof(*ut));
2942 ut->spi = t->tdb_spi;
2943 bcopy(&t->tdb_dst, &ut->dst, sizeof(ut->dst));
2944 /*
2945 * When a failover happens, the master's rpl is probably above
2946 * what we see here (we may be up to a second late), so
2947 * increase it a bit for outbound tdbs to manage most such
2948 * situations.
2949 *
2950 * For now, just add an offset that is likely to be larger
2951 * than the number of packets we can see in one second. The RFC
2952 * just says the next packet must have a higher seq value.
2953 *
2954 * XXX What is a good algorithm for this? We could use
2955 * a rate-determined increase, but to know it, we would have
2956 * to extend struct tdb.
2957 * XXX pt->rpl can wrap over MAXINT, but if so the real tdb
2958 * will soon be replaced anyway. For now, just don't handle
2959 * this edge case.
2960 */
2961#define RPL_INCR 16384
2962 ut->rpl = htonl(t->tdb_rpl + (ISSET(t->tdb_flags, TDBF_PFSYNC_RPL) ?
2963 RPL_INCR : 0));
2964 ut->cur_bytes = htobe64(t->tdb_cur_bytes);
2965 ut->sproto = t->tdb_sproto;
2966
2967 return (sizeof(*ut));
2968}
2969#endif
2970
2971void
2972pfsync_bulk_start(void)
2973{
2974#ifdef __FreeBSD__
2975 struct pfsync_softc *sc = V_pfsyncif;
2976#else
2977 struct pfsync_softc *sc = pfsyncif;
2978#endif
2979
2980#ifdef __FreeBSD__
2981 if (V_pf_status.debug >= PF_DEBUG_MISC)
2982#else
2983 if (pf_status.debug >= PF_DEBUG_MISC)
2984#endif
2985 printf("pfsync: received bulk update request\n");
2986
2987#ifdef __FreeBSD__
2988 PF_LOCK();
2989 if (TAILQ_EMPTY(&V_state_list))
2990#else
2991 if (TAILQ_EMPTY(&state_list))
2992#endif
2993 pfsync_bulk_status(PFSYNC_BUS_END);
2994 else {
2995 sc->sc_ureq_received = time_uptime;
2996 if (sc->sc_bulk_next == NULL)
2997#ifdef __FreeBSD__
2998 sc->sc_bulk_next = TAILQ_FIRST(&V_state_list);
2999#else
3000 sc->sc_bulk_next = TAILQ_FIRST(&state_list);
3001#endif
3002 sc->sc_bulk_last = sc->sc_bulk_next;
3003
3004 pfsync_bulk_status(PFSYNC_BUS_START);
3005 callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc);
3006 }
3007#ifdef __FreeBSD__
3008 PF_UNLOCK();
3009#endif
3010}
3011
3012void
3013pfsync_bulk_update(void *arg)
3014{
3015 struct pfsync_softc *sc = arg;
3016 struct pf_state *st = sc->sc_bulk_next;
3017 int i = 0;
3018 int s;
3019
3020 PF_LOCK_ASSERT();
3021
3022 s = splsoftnet();
3023#ifdef __FreeBSD__
3024 CURVNET_SET(sc->sc_ifp->if_vnet);
3025#endif
3026 for (;;) {
3027 if (st->sync_state == PFSYNC_S_NONE &&
3028 st->timeout < PFTM_MAX &&
3029 st->pfsync_time <= sc->sc_ureq_received) {
3030 pfsync_update_state_req(st);
3031 i++;
3032 }
3033
3034 st = TAILQ_NEXT(st, entry_list);
3035 if (st == NULL)
3036#ifdef __FreeBSD__
3037 st = TAILQ_FIRST(&V_state_list);
3038#else
3039 st = TAILQ_FIRST(&state_list);
3040#endif
3041
3042 if (st == sc->sc_bulk_last) {
3043 /* we're done */
3044 sc->sc_bulk_next = NULL;
3045 sc->sc_bulk_last = NULL;
3046 pfsync_bulk_status(PFSYNC_BUS_END);
3047 break;
3048 }
3049
3050#ifdef __FreeBSD__
3051 if (i > 1 && (sc->sc_ifp->if_mtu - sc->sc_len) <
3052#else
3053 if (i > 1 && (sc->sc_if.if_mtu - sc->sc_len) <
3054#endif
3055 sizeof(struct pfsync_state)) {
3056 /* we've filled a packet */
3057 sc->sc_bulk_next = st;
3058#ifdef __FreeBSD__
3059 callout_reset(&sc->sc_bulk_tmo, 1,
3060 pfsync_bulk_update, sc);
3061#else
3062 timeout_add(&sc->sc_bulk_tmo, 1);
3063#endif
3064 break;
3065 }
3066 }
3067
3068#ifdef __FreeBSD__
3069 CURVNET_RESTORE();
3070#endif
3071 splx(s);
3072}
3073
3074void
3075pfsync_bulk_status(u_int8_t status)
3076{
3077 struct {
3078 struct pfsync_subheader subh;
3079 struct pfsync_bus bus;
3080 } __packed r;
3081
3082#ifdef __FreeBSD__
3083 struct pfsync_softc *sc = V_pfsyncif;
3084#else
3085 struct pfsync_softc *sc = pfsyncif;
3086#endif
3087
3088 PF_LOCK_ASSERT();
3089
3090 bzero(&r, sizeof(r));
3091
3092 r.subh.action = PFSYNC_ACT_BUS;
3093 r.subh.count = htons(1);
3094
3095#ifdef __FreeBSD__
3096 r.bus.creatorid = V_pf_status.hostid;
3097#else
3098 r.bus.creatorid = pf_status.hostid;
3099#endif
3100 r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received);
3101 r.bus.status = status;
3102
3103 pfsync_send_plus(&r, sizeof(r));
3104}
3105
3106void
3107pfsync_bulk_fail(void *arg)
3108{
3109 struct pfsync_softc *sc = arg;
3110
3111#ifdef __FreeBSD__
3112 CURVNET_SET(sc->sc_ifp->if_vnet);
3113#endif
3114
3115 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
3116 /* Try again */
3117#ifdef __FreeBSD__
3118 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
3119 pfsync_bulk_fail, V_pfsyncif);
3120#else
3121 timeout_add_sec(&sc->sc_bulkfail_tmo, 5);
3122#endif
3123 PF_LOCK();
3124 pfsync_request_update(0, 0);
3125 PF_UNLOCK();
3126 } else {
3127 /* Pretend like the transfer was ok */
3128 sc->sc_ureq_sent = 0;
3129 sc->sc_bulk_tries = 0;
3130#ifdef __FreeBSD__
3131 if (!sc->pfsync_sync_ok && carp_demote_adj_p)
3132 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
3133 "pfsync bulk fail");
3134 sc->pfsync_sync_ok = 1;
3135#else
3136#if NCARP > 0
3137 if (!pfsync_sync_ok)
3138 carp_group_demote_adj(&sc->sc_if, -1);
3139#endif
3140 pfsync_sync_ok = 1;
3141#endif
3142#ifdef __FreeBSD__
3143 if (V_pf_status.debug >= PF_DEBUG_MISC)
3144#else
3145 if (pf_status.debug >= PF_DEBUG_MISC)
3146#endif
3147 printf("pfsync: failed to receive bulk update\n");
3148 }
3149
3150#ifdef __FreeBSD__
3151 CURVNET_RESTORE();
3152#endif
3153}
3154
3155void
3156pfsync_send_plus(void *plus, size_t pluslen)
3157{
3158#ifdef __FreeBSD__
3159 struct pfsync_softc *sc = V_pfsyncif;
3160#else
3161 struct pfsync_softc *sc = pfsyncif;
3162#endif
3163 int s;
3164
3165 PF_LOCK_ASSERT();
3166
3167#ifdef __FreeBSD__
3168 if (sc->sc_len + pluslen > sc->sc_ifp->if_mtu) {
3169#else
3170 if (sc->sc_len + pluslen > sc->sc_if.if_mtu) {
3171#endif
3172 s = splnet();
3173 pfsync_sendout();
3174 splx(s);
3175 }
3176
3177 sc->sc_plus = plus;
3178 sc->sc_len += (sc->sc_pluslen = pluslen);
3179
3180 s = splnet();
3181 pfsync_sendout();
3182 splx(s);
3183}
3184
3185int
3186pfsync_up(void)
3187{
3188#ifdef __FreeBSD__
3189 struct pfsync_softc *sc = V_pfsyncif;
3190#else
3191 struct pfsync_softc *sc = pfsyncif;
3192#endif
3193
3194#ifdef __FreeBSD__
3195 if (sc == NULL || !ISSET(sc->sc_ifp->if_flags, IFF_DRV_RUNNING))
3196#else
3197 if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING))
3198#endif
3199 return (0);
3200
3201 return (1);
3202}
3203
3204int
3205pfsync_state_in_use(struct pf_state *st)
3206{
3207#ifdef __FreeBSD__
3208 struct pfsync_softc *sc = V_pfsyncif;
3209#else
3210 struct pfsync_softc *sc = pfsyncif;
3211#endif
3212
3213 if (sc == NULL)
3214 return (0);
3215
3216 if (st->sync_state != PFSYNC_S_NONE ||
3217 st == sc->sc_bulk_next ||
3218 st == sc->sc_bulk_last)
3219 return (1);
3220
3221 return (0);
3222}
3223
3224u_int pfsync_ints;
3225u_int pfsync_tmos;
3226
3227void
3228pfsync_timeout(void *arg)
3229{
3230#if defined(__FreeBSD__) && defined(VIMAGE)
3231 struct pfsync_softc *sc = arg;
3232#endif
3233 int s;
3234
3235#ifdef __FreeBSD__
3236 CURVNET_SET(sc->sc_ifp->if_vnet);
3237#endif
3238
3239 pfsync_tmos++;
3240
3241 s = splnet();
3242#ifdef __FreeBSD__
3243 PF_LOCK();
3244#endif
3245 pfsync_sendout();
3246#ifdef __FreeBSD__
3247 PF_UNLOCK();
3248#endif
3249 splx(s);
3250
3251#ifdef __FreeBSD__
3252 CURVNET_RESTORE();
3253#endif
3254}
3255
3256/* this is a softnet/netisr handler */
3257void
3258#ifdef __FreeBSD__
3259pfsyncintr(void *arg)
3260{
3261 struct pfsync_softc *sc = arg;
3262 struct mbuf *m, *n;
3263
3264 CURVNET_SET(sc->sc_ifp->if_vnet);
3265 pfsync_ints++;
3266
3267 IF_DEQUEUE_ALL(&sc->sc_ifp->if_snd, m);
3268
3269 for (; m != NULL; m = n) {
3270
3271 n = m->m_nextpkt;
3272 m->m_nextpkt = NULL;
3273 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL)
3274 == 0)
3275 V_pfsyncstats.pfsyncs_opackets++;
3276 else
3277 V_pfsyncstats.pfsyncs_oerrors++;
3278 }
3279 CURVNET_RESTORE();
3280}
3281#else
3282pfsyncintr(void)
3283{
3284 int s;
3285
3286 pfsync_ints++;
3287
3288 s = splnet();
3289 pfsync_sendout();
3290 splx(s);
3291}
3292#endif
3293
3294int
3295pfsync_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
3296 size_t newlen)
3297{
3298
3299#ifdef notyet
3300 /* All sysctl names at this level are terminal. */
3301 if (namelen != 1)
3302 return (ENOTDIR);
3303
3304 switch (name[0]) {
3305 case PFSYNCCTL_STATS:
3306 if (newp != NULL)
3307 return (EPERM);
3308 return (sysctl_struct(oldp, oldlenp, newp, newlen,
3309 &V_pfsyncstats, sizeof(V_pfsyncstats)));
3310 }
3311#endif
3312 return (ENOPROTOOPT);
3313}
3314
3315#ifdef __FreeBSD__
3345void
3346pfsync_ifdetach(void *arg, struct ifnet *ifp)
3316static int
3317pfsync_multicast_setup(struct pfsync_softc *sc)
3318{
3348 struct pfsync_softc *sc = (struct pfsync_softc *)arg;
3349 struct ip_moptions *imo;
3319 struct ip_moptions *imo = &sc->sc_imo;
3320 int error;
3321
3351 if (sc == NULL || sc->sc_sync_if != ifp)
3352 return; /* not for us; unlocked read */
3322 if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) {
3323 sc->sc_sync_if = NULL;
3324 return (EADDRNOTAVAIL);
3325 }
3326
3354 CURVNET_SET(sc->sc_ifp->if_vnet);
3327 imo->imo_membership = (struct in_multi **)malloc(
3328 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_PFSYNC,
3329 M_WAITOK | M_ZERO);
3330 imo->imo_max_memberships = IP_MIN_MEMBERSHIPS;
3331 imo->imo_multicast_vif = -1;
3332
3356 PF_LOCK();
3357
3358 /* Deal with a member interface going away from under us. */
3359 sc->sc_sync_if = NULL;
3360 imo = &sc->sc_imo;
3361 if (imo->imo_num_memberships > 0) {
3362 KASSERT(imo->imo_num_memberships == 1,
3363 ("%s: imo_num_memberships != 1", __func__));
3364 /*
3365 * Our event handler is always called after protocol
3366 * domains have been detached from the underlying ifnet.
3367 * Do not call in_delmulti(); we held a single reference
3368 * which the protocol domain has purged in in_purgemaddrs().
3369 */
3370 PF_UNLOCK();
3371 imo->imo_membership[--imo->imo_num_memberships] = NULL;
3372 PF_LOCK();
3373 imo->imo_multicast_ifp = NULL;
3333 if ((error = in_joingroup(sc->sc_sync_if, &sc->sc_sync_peer, NULL,
3334 &imo->imo_membership[0])) != 0) {
3335 free(imo->imo_membership, M_PFSYNC);
3336 return (error);
3337 }
3338 imo->imo_num_memberships++;
3339 imo->imo_multicast_ifp = sc->sc_sync_if;
3340 imo->imo_multicast_ttl = PFSYNC_DFLTTL;
3341 imo->imo_multicast_loop = 0;
3342
3376 PF_UNLOCK();
3377
3378 CURVNET_RESTORE();
3343 return (0);
3344}
3345
3346static void
3347pfsync_multicast_cleanup(struct pfsync_softc *sc)
3348{
3349 struct ip_moptions *imo = &sc->sc_imo;
3350
3351 in_leavegroup(imo->imo_membership[0], NULL);
3352 free(imo->imo_membership, M_PFSYNC);
3353 imo->imo_membership = NULL;
3354 imo->imo_multicast_ifp = NULL;
3355}
3356
3357#ifdef INET
3358extern struct domain inetdomain;
3359static struct protosw in_pfsync_protosw = {
3360 .pr_type = SOCK_RAW,
3361 .pr_domain = &inetdomain,
3362 .pr_protocol = IPPROTO_PFSYNC,
3363 .pr_flags = PR_ATOMIC|PR_ADDR,
3364 .pr_input = pfsync_input,
3365 .pr_output = (pr_output_t *)rip_output,
3366 .pr_ctloutput = rip_ctloutput,
3367 .pr_usrreqs = &rip_usrreqs
3368};
3369#endif
3370
3371static int
3382vnet_pfsync_init(const void *unused)
3372pfsync_init()
3373{
3374 VNET_ITERATOR_DECL(vnet_iter);
3375 int error = 0;
3376
3386 pfsyncattach(0);
3387
3388 error = swi_add(NULL, "pfsync", pfsyncintr, V_pfsyncif,
3389 SWI_NET, INTR_MPSAFE, &pfsync_swi.pfsync_swi_cookie);
3377 VNET_LIST_RLOCK();
3378 VNET_FOREACH(vnet_iter) {
3379 CURVNET_SET(vnet_iter);
3380 V_pfsync_cloner = pfsync_cloner;
3381 V_pfsync_cloner_data = pfsync_cloner_data;
3382 V_pfsync_cloner.ifc_data = &V_pfsync_cloner_data;
3383 if_clone_attach(&V_pfsync_cloner);
3384 error = swi_add(NULL, "pfsync", pfsyncintr, V_pfsyncif,
3385 SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie);
3386 CURVNET_RESTORE();
3387 if (error)
3388 goto fail_locked;
3389 }
3390 VNET_LIST_RUNLOCK();
3391#ifdef INET
3392 error = pf_proto_register(PF_INET, &in_pfsync_protosw);
3393 if (error)
3391 panic("%s: swi_add %d", __func__, error);
3392
3394 goto fail;
3395 error = ipproto_register(IPPROTO_PFSYNC);
3396 if (error) {
3397 pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW);
3398 goto fail;
3399 }
3400#endif
3401 PF_LOCK();
3402 pfsync_state_import_ptr = pfsync_state_import;
3403 pfsync_up_ptr = pfsync_up;
3404 pfsync_insert_state_ptr = pfsync_insert_state;
3405 pfsync_update_state_ptr = pfsync_update_state;
3406 pfsync_delete_state_ptr = pfsync_delete_state;
3407 pfsync_clear_states_ptr = pfsync_clear_states;
3408 pfsync_state_in_use_ptr = pfsync_state_in_use;
3409 pfsync_defer_ptr = pfsync_defer;
3410 PF_UNLOCK();
3411
3412 return (0);
3413
3414fail:
3415 VNET_LIST_RLOCK();
3416fail_locked:
3417 VNET_FOREACH(vnet_iter) {
3418 CURVNET_SET(vnet_iter);
3419 if (V_pfsync_swi_cookie) {
3420 swi_remove(V_pfsync_swi_cookie);
3421 if_clone_detach(&V_pfsync_cloner);
3422 }
3423 CURVNET_RESTORE();
3424 }
3425 VNET_LIST_RUNLOCK();
3426
3427 return (error);
3428}
3429
3407static int
3408vnet_pfsync_uninit(const void *unused)
3430static void
3431pfsync_uninit()
3432{
3433 VNET_ITERATOR_DECL(vnet_iter);
3434
3411 swi_remove(pfsync_swi.pfsync_swi_cookie);
3412
3435 PF_LOCK();
3436 pfsync_state_import_ptr = NULL;
3437 pfsync_up_ptr = NULL;
3438 pfsync_insert_state_ptr = NULL;
3439 pfsync_update_state_ptr = NULL;
3440 pfsync_delete_state_ptr = NULL;
3441 pfsync_clear_states_ptr = NULL;
3442 pfsync_state_in_use_ptr = NULL;
3443 pfsync_defer_ptr = NULL;
3444 PF_UNLOCK();
3445
3424 if_clone_detach(&pfsync_cloner);
3425
3426 return (0);
3446 ipproto_unregister(IPPROTO_PFSYNC);
3447 pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW);
3448 VNET_LIST_RLOCK();
3449 VNET_FOREACH(vnet_iter) {
3450 CURVNET_SET(vnet_iter);
3451 swi_remove(V_pfsync_swi_cookie);
3452 if_clone_detach(&V_pfsync_cloner);
3453 CURVNET_RESTORE();
3454 }
3455 VNET_LIST_RUNLOCK();
3456}
3457
3429/* Define startup order. */
3430#define PFSYNC_SYSINIT_ORDER SI_SUB_PROTO_IF
3431#define PFSYNC_MODEVENT_ORDER (SI_ORDER_FIRST) /* On boot slot in here. */
3432#define PFSYNC_VNET_ORDER (PFSYNC_MODEVENT_ORDER + 2) /* Later still. */
3433
3434/*
3435 * Starting up.
3436 * VNET_SYSINIT is called for each existing vnet and each new vnet.
3437 */
3438VNET_SYSINIT(vnet_pfsync_init, PFSYNC_SYSINIT_ORDER, PFSYNC_VNET_ORDER,
3439 vnet_pfsync_init, NULL);
3440
3441/*
3442 * Closing up shop. These are done in REVERSE ORDER,
3443 * Not called on reboot.
3444 * VNET_SYSUNINIT is called for each exiting vnet as it exits.
3445 */
3446VNET_SYSUNINIT(vnet_pfsync_uninit, PFSYNC_SYSINIT_ORDER, PFSYNC_VNET_ORDER,
3447 vnet_pfsync_uninit, NULL);
3458static int
3459pfsync_modevent(module_t mod, int type, void *data)
3460{
3461 int error = 0;
3462
3463 switch (type) {
3464 case MOD_LOAD:
3455#ifndef __FreeBSD__
3456 pfsyncattach(0);
3457#endif
3465 error = pfsync_init();
3466 break;
3467 case MOD_QUIESCE:
3468 /*
3469 * Module should not be unloaded due to race conditions.
3470 */
3471 error = EPERM;
3472 break;
3473 case MOD_UNLOAD:
3460#ifndef __FreeBSD__
3461 if_clone_detach(&pfsync_cloner);
3462#endif
3474 pfsync_uninit();
3475 break;
3476 default:
3477 error = EINVAL;
3478 break;
3479 }
3480
3469 return error;
3481 return (error);
3482}
3483
3484static moduledata_t pfsync_mod = {
3485 "pfsync",
3486 pfsync_modevent,
3487 0
3488};
3489
3490#define PFSYNC_MODVER 1
3491
3480DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
3492DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
3493MODULE_VERSION(pfsync, PFSYNC_MODVER);
3494MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER);
3495#endif /* __FreeBSD__ */