Deleted Added
full compact
if_pfsync.c (228736) if_pfsync.c (228811)
1/* $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $ */
2
3/*
4 * Copyright (c) 2002 Michael Shalayeff
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org>
31 *
32 * Permission to use, copy, modify, and distribute this software for any
33 * purpose with or without fee is hereby granted, provided that the above
34 * copyright notice and this permission notice appear in all copies.
35 *
36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
43 */
44
45#ifdef __FreeBSD__
46#include "opt_inet.h"
47#include "opt_inet6.h"
1/* $OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $ */
2
3/*
4 * Copyright (c) 2002 Michael Shalayeff
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org>
31 *
32 * Permission to use, copy, modify, and distribute this software for any
33 * purpose with or without fee is hereby granted, provided that the above
34 * copyright notice and this permission notice appear in all copies.
35 *
36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
43 */
44
45#ifdef __FreeBSD__
46#include "opt_inet.h"
47#include "opt_inet6.h"
48#include "opt_bpf.h"
49#include "opt_pf.h"
50
51#include <sys/cdefs.h>
48#include "opt_pf.h"
49
50#include <sys/cdefs.h>
52__FBSDID("$FreeBSD: head/sys/contrib/pf/net/if_pfsync.c 228736 2011-12-20 13:53:31Z glebius $");
51__FBSDID("$FreeBSD: head/sys/contrib/pf/net/if_pfsync.c 228811 2011-12-22 18:31:47Z glebius $");
53
52
54#ifdef DEV_BPF
55#define NBPFILTER DEV_BPF
56#else
57#define NBPFILTER 0
58#endif
53#define NBPFILTER 1
59
60#ifdef DEV_PFSYNC
61#define NPFSYNC DEV_PFSYNC
62#else
63#define NPFSYNC 0
64#endif
65#endif /* __FreeBSD__ */
66
67#include <sys/param.h>
68#include <sys/kernel.h>
69#ifdef __FreeBSD__
70#include <sys/bus.h>
71#include <sys/interrupt.h>
72#include <sys/priv.h>
73#endif
74#include <sys/proc.h>
75#include <sys/systm.h>
76#include <sys/time.h>
77#include <sys/mbuf.h>
78#include <sys/socket.h>
79#ifdef __FreeBSD__
80#include <sys/endian.h>
81#include <sys/malloc.h>
82#include <sys/module.h>
83#include <sys/sockio.h>
84#include <sys/taskqueue.h>
85#include <sys/lock.h>
86#include <sys/mutex.h>
87#else
88#include <sys/ioctl.h>
89#include <sys/timeout.h>
90#endif
91#include <sys/sysctl.h>
92#ifndef __FreeBSD__
93#include <sys/pool.h>
94#endif
95
96#include <net/if.h>
97#ifdef __FreeBSD__
98#include <net/if_clone.h>
99#endif
100#include <net/if_types.h>
101#include <net/route.h>
102#include <net/bpf.h>
103#include <net/netisr.h>
104#ifdef __FreeBSD__
105#include <net/vnet.h>
106#endif
107
108#include <netinet/in.h>
109#include <netinet/if_ether.h>
110#include <netinet/tcp.h>
111#include <netinet/tcp_seq.h>
112
113#ifdef INET
114#include <netinet/in_systm.h>
115#include <netinet/in_var.h>
116#include <netinet/ip.h>
117#include <netinet/ip_var.h>
118#endif
119
120#ifdef INET6
121#include <netinet6/nd6.h>
122#endif /* INET6 */
123
124#ifdef __FreeBSD__
125#include <netinet/ip_carp.h>
126#else
127#include "carp.h"
128#if NCARP > 0
129#include <netinet/ip_carp.h>
130#endif
131#endif
132
133#include <net/pfvar.h>
134#include <net/if_pfsync.h>
135
136#ifndef __FreeBSD__
137#include "bpfilter.h"
138#include "pfsync.h"
139#endif
140
141#define PFSYNC_MINPKT ( \
142 sizeof(struct ip) + \
143 sizeof(struct pfsync_header) + \
144 sizeof(struct pfsync_subheader) + \
145 sizeof(struct pfsync_eof))
146
147struct pfsync_pkt {
148 struct ip *ip;
149 struct in_addr src;
150 u_int8_t flags;
151};
152
153int pfsync_input_hmac(struct mbuf *, int);
154
155int pfsync_upd_tcp(struct pf_state *, struct pfsync_state_peer *,
156 struct pfsync_state_peer *);
157
158int pfsync_in_clr(struct pfsync_pkt *, struct mbuf *, int, int);
159int pfsync_in_ins(struct pfsync_pkt *, struct mbuf *, int, int);
160int pfsync_in_iack(struct pfsync_pkt *, struct mbuf *, int, int);
161int pfsync_in_upd(struct pfsync_pkt *, struct mbuf *, int, int);
162int pfsync_in_upd_c(struct pfsync_pkt *, struct mbuf *, int, int);
163int pfsync_in_ureq(struct pfsync_pkt *, struct mbuf *, int, int);
164int pfsync_in_del(struct pfsync_pkt *, struct mbuf *, int, int);
165int pfsync_in_del_c(struct pfsync_pkt *, struct mbuf *, int, int);
166int pfsync_in_bus(struct pfsync_pkt *, struct mbuf *, int, int);
167int pfsync_in_tdb(struct pfsync_pkt *, struct mbuf *, int, int);
168int pfsync_in_eof(struct pfsync_pkt *, struct mbuf *, int, int);
169
170int pfsync_in_error(struct pfsync_pkt *, struct mbuf *, int, int);
171
172int (*pfsync_acts[])(struct pfsync_pkt *, struct mbuf *, int, int) = {
173 pfsync_in_clr, /* PFSYNC_ACT_CLR */
174 pfsync_in_ins, /* PFSYNC_ACT_INS */
175 pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */
176 pfsync_in_upd, /* PFSYNC_ACT_UPD */
177 pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */
178 pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */
179 pfsync_in_del, /* PFSYNC_ACT_DEL */
180 pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */
181 pfsync_in_error, /* PFSYNC_ACT_INS_F */
182 pfsync_in_error, /* PFSYNC_ACT_DEL_F */
183 pfsync_in_bus, /* PFSYNC_ACT_BUS */
184 pfsync_in_tdb, /* PFSYNC_ACT_TDB */
185 pfsync_in_eof /* PFSYNC_ACT_EOF */
186};
187
188struct pfsync_q {
189 int (*write)(struct pf_state *, struct mbuf *, int);
190 size_t len;
191 u_int8_t action;
192};
193
194/* we have one of these for every PFSYNC_S_ */
195int pfsync_out_state(struct pf_state *, struct mbuf *, int);
196int pfsync_out_iack(struct pf_state *, struct mbuf *, int);
197int pfsync_out_upd_c(struct pf_state *, struct mbuf *, int);
198int pfsync_out_del(struct pf_state *, struct mbuf *, int);
199
200struct pfsync_q pfsync_qs[] = {
201 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS },
202 { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK },
203 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_UPD },
204 { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C },
205 { pfsync_out_del, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C }
206};
207
208void pfsync_q_ins(struct pf_state *, int);
209void pfsync_q_del(struct pf_state *);
210
211struct pfsync_upd_req_item {
212 TAILQ_ENTRY(pfsync_upd_req_item) ur_entry;
213 struct pfsync_upd_req ur_msg;
214};
215TAILQ_HEAD(pfsync_upd_reqs, pfsync_upd_req_item);
216
217struct pfsync_deferral {
218 TAILQ_ENTRY(pfsync_deferral) pd_entry;
219 struct pf_state *pd_st;
220 struct mbuf *pd_m;
221#ifdef __FreeBSD__
222 struct callout pd_tmo;
223#else
224 struct timeout pd_tmo;
225#endif
226};
227TAILQ_HEAD(pfsync_deferrals, pfsync_deferral);
228
229#define PFSYNC_PLSIZE MAX(sizeof(struct pfsync_upd_req_item), \
230 sizeof(struct pfsync_deferral))
231
232#ifdef notyet
233int pfsync_out_tdb(struct tdb *, struct mbuf *, int);
234#endif
235
236struct pfsync_softc {
237#ifdef __FreeBSD__
238 struct ifnet *sc_ifp;
239#else
240 struct ifnet sc_if;
241#endif
242 struct ifnet *sc_sync_if;
243
244#ifdef __FreeBSD__
245 uma_zone_t sc_pool;
246#else
247 struct pool sc_pool;
248#endif
249
250 struct ip_moptions sc_imo;
251
252 struct in_addr sc_sync_peer;
253 u_int8_t sc_maxupdates;
254#ifdef __FreeBSD__
255 int pfsync_sync_ok;
256#endif
257
258 struct ip sc_template;
259
260 struct pf_state_queue sc_qs[PFSYNC_S_COUNT];
261 size_t sc_len;
262
263 struct pfsync_upd_reqs sc_upd_req_list;
264
265 struct pfsync_deferrals sc_deferrals;
266 u_int sc_deferred;
267
268 void *sc_plus;
269 size_t sc_pluslen;
270
271 u_int32_t sc_ureq_sent;
272 int sc_bulk_tries;
273#ifdef __FreeBSD__
274 struct callout sc_bulkfail_tmo;
275#else
276 struct timeout sc_bulkfail_tmo;
277#endif
278
279 u_int32_t sc_ureq_received;
280 struct pf_state *sc_bulk_next;
281 struct pf_state *sc_bulk_last;
282#ifdef __FreeBSD__
283 struct callout sc_bulk_tmo;
284#else
285 struct timeout sc_bulk_tmo;
286#endif
287
288 TAILQ_HEAD(, tdb) sc_tdb_q;
289
290#ifdef __FreeBSD__
291 struct callout sc_tmo;
292#else
293 struct timeout sc_tmo;
294#endif
295#ifdef __FreeBSD__
296 eventhandler_tag sc_detachtag;
297#endif
298
299};
300
301#ifdef __FreeBSD__
302static VNET_DEFINE(struct pfsync_softc *, pfsyncif) = NULL;
303#define V_pfsyncif VNET(pfsyncif)
304
305static VNET_DEFINE(struct pfsyncstats, pfsyncstats);
306#define V_pfsyncstats VNET(pfsyncstats)
307static VNET_DEFINE(int, pfsync_carp_adj) = CARP_MAXSKEW;
308#define V_pfsync_carp_adj VNET(pfsync_carp_adj)
309
310SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC");
311SYSCTL_VNET_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_RW,
312 &VNET_NAME(pfsyncstats), pfsyncstats,
313 "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)");
314SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_RW,
315 &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment");
316#else
317struct pfsync_softc *pfsyncif = NULL;
318struct pfsyncstats pfsyncstats;
319#define V_pfsyncstats pfsyncstats
320#endif
321
322#ifdef __FreeBSD__
323static void pfsyncintr(void *);
324struct pfsync_swi {
325 void * pfsync_swi_cookie;
326};
327static struct pfsync_swi pfsync_swi;
328#define schednetisr(p) swi_sched(pfsync_swi.pfsync_swi_cookie, 0)
329#define NETISR_PFSYNC
330#endif
331
332void pfsyncattach(int);
333#ifdef __FreeBSD__
334int pfsync_clone_create(struct if_clone *, int, caddr_t);
335void pfsync_clone_destroy(struct ifnet *);
336#else
337int pfsync_clone_create(struct if_clone *, int);
338int pfsync_clone_destroy(struct ifnet *);
339#endif
340int pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
341 struct pf_state_peer *);
342void pfsync_update_net_tdb(struct pfsync_tdb *);
343int pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
344#ifdef __FreeBSD__
345 struct route *);
346#else
347 struct rtentry *);
348#endif
349int pfsyncioctl(struct ifnet *, u_long, caddr_t);
350void pfsyncstart(struct ifnet *);
351
352struct mbuf *pfsync_if_dequeue(struct ifnet *);
353struct mbuf *pfsync_get_mbuf(struct pfsync_softc *);
354
355void pfsync_deferred(struct pf_state *, int);
356void pfsync_undefer(struct pfsync_deferral *, int);
357void pfsync_defer_tmo(void *);
358
359void pfsync_request_update(u_int32_t, u_int64_t);
360void pfsync_update_state_req(struct pf_state *);
361
362void pfsync_drop(struct pfsync_softc *);
363void pfsync_sendout(void);
364void pfsync_send_plus(void *, size_t);
365int pfsync_tdb_sendout(struct pfsync_softc *);
366int pfsync_sendout_mbuf(struct pfsync_softc *, struct mbuf *);
367void pfsync_timeout(void *);
368void pfsync_tdb_timeout(void *);
369void pfsync_send_bus(struct pfsync_softc *, u_int8_t);
370
371void pfsync_bulk_start(void);
372void pfsync_bulk_status(u_int8_t);
373void pfsync_bulk_update(void *);
374void pfsync_bulk_fail(void *);
375
376#ifdef __FreeBSD__
377void pfsync_ifdetach(void *, struct ifnet *);
378
379/* XXX: ugly */
380#define betoh64 (unsigned long long)be64toh
381#define timeout_del callout_stop
382#endif
383
384#define PFSYNC_MAX_BULKTRIES 12
385#ifndef __FreeBSD__
386int pfsync_sync_ok;
387#endif
388
389#ifdef __FreeBSD__
390IFC_SIMPLE_DECLARE(pfsync, 1);
391#else
392struct if_clone pfsync_cloner =
393 IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy);
394#endif
395
396void
397pfsyncattach(int npfsync)
398{
399 if_clone_attach(&pfsync_cloner);
400}
401int
402#ifdef __FreeBSD__
403pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param)
404#else
405pfsync_clone_create(struct if_clone *ifc, int unit)
406#endif
407{
408 struct pfsync_softc *sc;
409 struct ifnet *ifp;
410 int q;
411
412 if (unit != 0)
413 return (EINVAL);
414
415#ifndef __FreeBSD__
416 pfsync_sync_ok = 1;
417#endif
418
419 sc = malloc(sizeof(struct pfsync_softc), M_DEVBUF, M_NOWAIT | M_ZERO);
420 if (sc == NULL)
421 return (ENOMEM);
422
423 for (q = 0; q < PFSYNC_S_COUNT; q++)
424 TAILQ_INIT(&sc->sc_qs[q]);
425
426#ifdef __FreeBSD__
427 sc->pfsync_sync_ok = 1;
428 sc->sc_pool = uma_zcreate("pfsync", PFSYNC_PLSIZE,
429 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
430 if (sc->sc_pool == NULL) {
431 free(sc, M_DEVBUF);
432 return (ENOMEM);
433 }
434#else
435 pool_init(&sc->sc_pool, PFSYNC_PLSIZE, 0, 0, 0, "pfsync", NULL);
436#endif
437 TAILQ_INIT(&sc->sc_upd_req_list);
438 TAILQ_INIT(&sc->sc_deferrals);
439 sc->sc_deferred = 0;
440
441 TAILQ_INIT(&sc->sc_tdb_q);
442
443 sc->sc_len = PFSYNC_MINPKT;
444 sc->sc_maxupdates = 128;
445
446#ifdef __FreeBSD__
447 sc->sc_imo.imo_membership = (struct in_multi **)malloc(
448 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_DEVBUF,
449 M_NOWAIT | M_ZERO);
450 sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS;
451 sc->sc_imo.imo_multicast_vif = -1;
452#else
453 sc->sc_imo.imo_membership = (struct in_multi **)malloc(
454 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_IPMOPTS,
455 M_WAITOK | M_ZERO);
456 sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS;
457#endif
458
459#ifdef __FreeBSD__
460 ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC);
461 if (ifp == NULL) {
462 free(sc->sc_imo.imo_membership, M_DEVBUF);
463 uma_zdestroy(sc->sc_pool);
464 free(sc, M_DEVBUF);
465 return (ENOSPC);
466 }
467 if_initname(ifp, ifc->ifc_name, unit);
468
469 sc->sc_detachtag = EVENTHANDLER_REGISTER(ifnet_departure_event,
470#ifdef __FreeBSD__
471 pfsync_ifdetach, V_pfsyncif, EVENTHANDLER_PRI_ANY);
472#else
473 pfsync_ifdetach, pfsyncif, EVENTHANDLER_PRI_ANY);
474#endif
475 if (sc->sc_detachtag == NULL) {
476 if_free(ifp);
477 free(sc->sc_imo.imo_membership, M_DEVBUF);
478 uma_zdestroy(sc->sc_pool);
479 free(sc, M_DEVBUF);
480 return (ENOSPC);
481 }
482#else
483 ifp = &sc->sc_if;
484 snprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit);
485#endif
486 ifp->if_softc = sc;
487 ifp->if_ioctl = pfsyncioctl;
488 ifp->if_output = pfsyncoutput;
489 ifp->if_start = pfsyncstart;
490 ifp->if_type = IFT_PFSYNC;
491 ifp->if_snd.ifq_maxlen = ifqmaxlen;
492 ifp->if_hdrlen = sizeof(struct pfsync_header);
493 ifp->if_mtu = 1500; /* XXX */
494#ifdef __FreeBSD__
495 callout_init(&sc->sc_tmo, CALLOUT_MPSAFE);
496 callout_init_mtx(&sc->sc_bulk_tmo, &pf_task_mtx, 0);
497 callout_init(&sc->sc_bulkfail_tmo, CALLOUT_MPSAFE);
498#else
499 ifp->if_hardmtu = MCLBYTES; /* XXX */
500 timeout_set(&sc->sc_tmo, pfsync_timeout, sc);
501 timeout_set(&sc->sc_bulk_tmo, pfsync_bulk_update, sc);
502 timeout_set(&sc->sc_bulkfail_tmo, pfsync_bulk_fail, sc);
503#endif
504
505 if_attach(ifp);
506#ifndef __FreeBSD__
507 if_alloc_sadl(ifp);
508
509#if NCARP > 0
510 if_addgroup(ifp, "carp");
511#endif
512#endif
513
514#if NBPFILTER > 0
515#ifdef __FreeBSD__
516 bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
517#else
518 bpfattach(&sc->sc_if.if_bpf, ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
519#endif
520#endif
521
522#ifdef __FreeBSD__
523 V_pfsyncif = sc;
524#else
525 pfsyncif = sc;
526#endif
527
528 return (0);
529}
530
531#ifdef __FreeBSD__
532void
533#else
534int
535#endif
536pfsync_clone_destroy(struct ifnet *ifp)
537{
538 struct pfsync_softc *sc = ifp->if_softc;
539
540#ifdef __FreeBSD__
541 EVENTHANDLER_DEREGISTER(ifnet_departure_event, sc->sc_detachtag);
542 PF_LOCK();
543#endif
544 timeout_del(&sc->sc_bulk_tmo);
545 timeout_del(&sc->sc_tmo);
546#ifdef __FreeBSD__
547 PF_UNLOCK();
548 if (!sc->pfsync_sync_ok && carp_demote_adj_p)
549 (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy");
550#else
551#if NCARP > 0
552 if (!pfsync_sync_ok)
553 carp_group_demote_adj(&sc->sc_if, -1);
554#endif
555#endif
556#if NBPFILTER > 0
557 bpfdetach(ifp);
558#endif
559 if_detach(ifp);
560
561 pfsync_drop(sc);
562
563 while (sc->sc_deferred > 0)
564 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
565
566#ifdef __FreeBSD__
567 UMA_DESTROY(sc->sc_pool);
568#else
569 pool_destroy(&sc->sc_pool);
570#endif
571#ifdef __FreeBSD__
572 if_free(ifp);
573 free(sc->sc_imo.imo_membership, M_DEVBUF);
574#else
575 free(sc->sc_imo.imo_membership, M_IPMOPTS);
576#endif
577 free(sc, M_DEVBUF);
578
579#ifdef __FreeBSD__
580 V_pfsyncif = NULL;
581#else
582 pfsyncif = NULL;
583#endif
584
585#ifndef __FreeBSD__
586 return (0);
587#endif
588}
589
590struct mbuf *
591pfsync_if_dequeue(struct ifnet *ifp)
592{
593 struct mbuf *m;
594#ifndef __FreeBSD__
595 int s;
596#endif
597
598#ifdef __FreeBSD__
599 IF_LOCK(&ifp->if_snd);
600 _IF_DROP(&ifp->if_snd);
601 _IF_DEQUEUE(&ifp->if_snd, m);
602 IF_UNLOCK(&ifp->if_snd);
603#else
604 s = splnet();
605 IF_DEQUEUE(&ifp->if_snd, m);
606 splx(s);
607#endif
608
609 return (m);
610}
611
612/*
613 * Start output on the pfsync interface.
614 */
615void
616pfsyncstart(struct ifnet *ifp)
617{
618 struct mbuf *m;
619
620 while ((m = pfsync_if_dequeue(ifp)) != NULL) {
621#ifndef __FreeBSD__
622 IF_DROP(&ifp->if_snd);
623#endif
624 m_freem(m);
625 }
626}
627
628int
629pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
630 struct pf_state_peer *d)
631{
632 if (s->scrub.scrub_flag && d->scrub == NULL) {
633#ifdef __FreeBSD__
634 d->scrub = pool_get(&V_pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
635#else
636 d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
637#endif
638 if (d->scrub == NULL)
639 return (ENOMEM);
640 }
641
642 return (0);
643}
644
645#ifndef __FreeBSD__
646void
647pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
648{
649 bzero(sp, sizeof(struct pfsync_state));
650
651 /* copy from state key */
652 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
653 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
654 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
655 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
656 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
657 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
658 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
659 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
660 sp->proto = st->key[PF_SK_WIRE]->proto;
661 sp->af = st->key[PF_SK_WIRE]->af;
662
663 /* copy from state */
664 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
665 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
666 sp->creation = htonl(time_second - st->creation);
667 sp->expire = pf_state_expires(st);
668 if (sp->expire <= time_second)
669 sp->expire = htonl(0);
670 else
671 sp->expire = htonl(sp->expire - time_second);
672
673 sp->direction = st->direction;
674 sp->log = st->log;
675 sp->timeout = st->timeout;
676 sp->state_flags = st->state_flags;
677 if (st->src_node)
678 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
679 if (st->nat_src_node)
680 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
681
682 bcopy(&st->id, &sp->id, sizeof(sp->id));
683 sp->creatorid = st->creatorid;
684 pf_state_peer_hton(&st->src, &sp->src);
685 pf_state_peer_hton(&st->dst, &sp->dst);
686
687 if (st->rule.ptr == NULL)
688 sp->rule = htonl(-1);
689 else
690 sp->rule = htonl(st->rule.ptr->nr);
691 if (st->anchor.ptr == NULL)
692 sp->anchor = htonl(-1);
693 else
694 sp->anchor = htonl(st->anchor.ptr->nr);
695 if (st->nat_rule.ptr == NULL)
696 sp->nat_rule = htonl(-1);
697 else
698 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
699
700 pf_state_counter_hton(st->packets[0], sp->packets[0]);
701 pf_state_counter_hton(st->packets[1], sp->packets[1]);
702 pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
703 pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
704
705}
706#endif
707
708int
709pfsync_state_import(struct pfsync_state *sp, u_int8_t flags)
710{
711 struct pf_state *st = NULL;
712 struct pf_state_key *skw = NULL, *sks = NULL;
713 struct pf_rule *r = NULL;
714 struct pfi_kif *kif;
715 int pool_flags;
716 int error;
717
718 PF_LOCK_ASSERT();
719
720#ifdef __FreeBSD__
721 if (sp->creatorid == 0 && V_pf_status.debug >= PF_DEBUG_MISC) {
722#else
723 if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) {
724#endif
725 printf("pfsync_state_import: invalid creator id:"
726 " %08x\n", ntohl(sp->creatorid));
727 return (EINVAL);
728 }
729
730 if ((kif = pfi_kif_get(sp->ifname)) == NULL) {
731#ifdef __FreeBSD__
732 if (V_pf_status.debug >= PF_DEBUG_MISC)
733#else
734 if (pf_status.debug >= PF_DEBUG_MISC)
735#endif
736 printf("pfsync_state_import: "
737 "unknown interface: %s\n", sp->ifname);
738 if (flags & PFSYNC_SI_IOCTL)
739 return (EINVAL);
740 return (0); /* skip this state */
741 }
742
743 /*
744 * If the ruleset checksums match or the state is coming from the ioctl,
745 * it's safe to associate the state with the rule of that number.
746 */
747 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
748 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) <
749 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
750 r = pf_main_ruleset.rules[
751 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
752 else
753#ifdef __FreeBSD__
754 r = &V_pf_default_rule;
755#else
756 r = &pf_default_rule;
757#endif
758
759 if ((r->max_states && r->states_cur >= r->max_states))
760 goto cleanup;
761
762#ifdef __FreeBSD__
763 if (flags & PFSYNC_SI_IOCTL)
764 pool_flags = PR_WAITOK | PR_ZERO;
765 else
766 pool_flags = PR_NOWAIT | PR_ZERO;
767
768 if ((st = pool_get(&V_pf_state_pl, pool_flags)) == NULL)
769 goto cleanup;
770#else
771 if (flags & PFSYNC_SI_IOCTL)
772 pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO;
773 else
774 pool_flags = PR_LIMITFAIL | PR_ZERO;
775
776 if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL)
777 goto cleanup;
778#endif
779
780 if ((skw = pf_alloc_state_key(pool_flags)) == NULL)
781 goto cleanup;
782
783 if (PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0],
784 &sp->key[PF_SK_STACK].addr[0], sp->af) ||
785 PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1],
786 &sp->key[PF_SK_STACK].addr[1], sp->af) ||
787 sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] ||
788 sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1]) {
789 if ((sks = pf_alloc_state_key(pool_flags)) == NULL)
790 goto cleanup;
791 } else
792 sks = skw;
793
794 /* allocate memory for scrub info */
795 if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
796 pfsync_alloc_scrub_memory(&sp->dst, &st->dst))
797 goto cleanup;
798
799 /* copy to state key(s) */
800 skw->addr[0] = sp->key[PF_SK_WIRE].addr[0];
801 skw->addr[1] = sp->key[PF_SK_WIRE].addr[1];
802 skw->port[0] = sp->key[PF_SK_WIRE].port[0];
803 skw->port[1] = sp->key[PF_SK_WIRE].port[1];
804 skw->proto = sp->proto;
805 skw->af = sp->af;
806 if (sks != skw) {
807 sks->addr[0] = sp->key[PF_SK_STACK].addr[0];
808 sks->addr[1] = sp->key[PF_SK_STACK].addr[1];
809 sks->port[0] = sp->key[PF_SK_STACK].port[0];
810 sks->port[1] = sp->key[PF_SK_STACK].port[1];
811 sks->proto = sp->proto;
812 sks->af = sp->af;
813 }
814
815 /* copy to state */
816 bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr));
817 st->creation = time_second - ntohl(sp->creation);
818 st->expire = time_second;
819 if (sp->expire) {
820 /* XXX No adaptive scaling. */
821 st->expire -= r->timeout[sp->timeout] - ntohl(sp->expire);
822 }
823
824 st->expire = ntohl(sp->expire) + time_second;
825 st->direction = sp->direction;
826 st->log = sp->log;
827 st->timeout = sp->timeout;
828 st->state_flags = sp->state_flags;
829
830 bcopy(sp->id, &st->id, sizeof(st->id));
831 st->creatorid = sp->creatorid;
832 pf_state_peer_ntoh(&sp->src, &st->src);
833 pf_state_peer_ntoh(&sp->dst, &st->dst);
834
835 st->rule.ptr = r;
836 st->nat_rule.ptr = NULL;
837 st->anchor.ptr = NULL;
838 st->rt_kif = NULL;
839
840 st->pfsync_time = time_second;
841 st->sync_state = PFSYNC_S_NONE;
842
843 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
844 r->states_cur++;
845 r->states_tot++;
846
847 if (!ISSET(flags, PFSYNC_SI_IOCTL))
848 SET(st->state_flags, PFSTATE_NOSYNC);
849
850 if ((error = pf_state_insert(kif, skw, sks, st)) != 0) {
851 /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */
852 r->states_cur--;
853 goto cleanup_state;
854 }
855
856 if (!ISSET(flags, PFSYNC_SI_IOCTL)) {
857 CLR(st->state_flags, PFSTATE_NOSYNC);
858 if (ISSET(st->state_flags, PFSTATE_ACK)) {
859 pfsync_q_ins(st, PFSYNC_S_IACK);
860#ifdef __FreeBSD__
861 pfsync_sendout();
862#else
863 schednetisr(NETISR_PFSYNC);
864#endif
865 }
866 }
867 CLR(st->state_flags, PFSTATE_ACK);
868
869 return (0);
870
871cleanup:
872 error = ENOMEM;
873 if (skw == sks)
874 sks = NULL;
875#ifdef __FreeBSD__
876 if (skw != NULL)
877 pool_put(&V_pf_state_key_pl, skw);
878 if (sks != NULL)
879 pool_put(&V_pf_state_key_pl, sks);
880#else
881 if (skw != NULL)
882 pool_put(&pf_state_key_pl, skw);
883 if (sks != NULL)
884 pool_put(&pf_state_key_pl, sks);
885#endif
886
887cleanup_state: /* pf_state_insert frees the state keys */
888 if (st) {
889#ifdef __FreeBSD__
890 if (st->dst.scrub)
891 pool_put(&V_pf_state_scrub_pl, st->dst.scrub);
892 if (st->src.scrub)
893 pool_put(&V_pf_state_scrub_pl, st->src.scrub);
894 pool_put(&V_pf_state_pl, st);
895#else
896 if (st->dst.scrub)
897 pool_put(&pf_state_scrub_pl, st->dst.scrub);
898 if (st->src.scrub)
899 pool_put(&pf_state_scrub_pl, st->src.scrub);
900 pool_put(&pf_state_pl, st);
901#endif
902 }
903 return (error);
904}
905
906void
907#ifdef __FreeBSD__
908pfsync_input(struct mbuf *m, __unused int off)
909#else
910pfsync_input(struct mbuf *m, ...)
911#endif
912{
913#ifdef __FreeBSD__
914 struct pfsync_softc *sc = V_pfsyncif;
915#else
916 struct pfsync_softc *sc = pfsyncif;
917#endif
918 struct pfsync_pkt pkt;
919 struct ip *ip = mtod(m, struct ip *);
920 struct pfsync_header *ph;
921 struct pfsync_subheader subh;
922
923 int offset;
924 int rv;
925
926 V_pfsyncstats.pfsyncs_ipackets++;
927
928 /* verify that we have a sync interface configured */
929#ifdef __FreeBSD__
930 if (!sc || !sc->sc_sync_if || !V_pf_status.running)
931#else
932 if (!sc || !sc->sc_sync_if || !pf_status.running)
933#endif
934 goto done;
935
936 /* verify that the packet came in on the right interface */
937 if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
938 V_pfsyncstats.pfsyncs_badif++;
939 goto done;
940 }
941
942#ifdef __FreeBSD__
943 sc->sc_ifp->if_ipackets++;
944 sc->sc_ifp->if_ibytes += m->m_pkthdr.len;
945#else
946 sc->sc_if.if_ipackets++;
947 sc->sc_if.if_ibytes += m->m_pkthdr.len;
948#endif
949 /* verify that the IP TTL is 255. */
950 if (ip->ip_ttl != PFSYNC_DFLTTL) {
951 V_pfsyncstats.pfsyncs_badttl++;
952 goto done;
953 }
954
955 offset = ip->ip_hl << 2;
956 if (m->m_pkthdr.len < offset + sizeof(*ph)) {
957 V_pfsyncstats.pfsyncs_hdrops++;
958 goto done;
959 }
960
961 if (offset + sizeof(*ph) > m->m_len) {
962 if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
963 V_pfsyncstats.pfsyncs_hdrops++;
964 return;
965 }
966 ip = mtod(m, struct ip *);
967 }
968 ph = (struct pfsync_header *)((char *)ip + offset);
969
970 /* verify the version */
971 if (ph->version != PFSYNC_VERSION) {
972 V_pfsyncstats.pfsyncs_badver++;
973 goto done;
974 }
975
976#if 0
977 if (pfsync_input_hmac(m, offset) != 0) {
978 /* XXX stats */
979 goto done;
980 }
981#endif
982
983 /* Cheaper to grab this now than having to mess with mbufs later */
984 pkt.ip = ip;
985 pkt.src = ip->ip_src;
986 pkt.flags = 0;
987
988#ifdef __FreeBSD__
989 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
990#else
991 if (!bcmp(&ph->pfcksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
992#endif
993 pkt.flags |= PFSYNC_SI_CKSUM;
994
995 offset += sizeof(*ph);
996 for (;;) {
997 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
998 offset += sizeof(subh);
999
1000 if (subh.action >= PFSYNC_ACT_MAX) {
1001 V_pfsyncstats.pfsyncs_badact++;
1002 goto done;
1003 }
1004
1005 rv = (*pfsync_acts[subh.action])(&pkt, m, offset,
1006 ntohs(subh.count));
1007 if (rv == -1)
1008 return;
1009
1010 offset += rv;
1011 }
1012
1013done:
1014 m_freem(m);
1015}
1016
1017int
1018pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1019{
1020 struct pfsync_clr *clr;
1021 struct mbuf *mp;
1022 int len = sizeof(*clr) * count;
1023 int i, offp;
1024
1025 struct pf_state *st, *nexts;
1026 struct pf_state_key *sk, *nextsk;
1027 struct pf_state_item *si;
1028 u_int32_t creatorid;
1029 int s;
1030
1031 mp = m_pulldown(m, offset, len, &offp);
1032 if (mp == NULL) {
1033 V_pfsyncstats.pfsyncs_badlen++;
1034 return (-1);
1035 }
1036 clr = (struct pfsync_clr *)(mp->m_data + offp);
1037
1038 s = splsoftnet();
1039#ifdef __FreeBSD__
1040 PF_LOCK();
1041#endif
1042 for (i = 0; i < count; i++) {
1043 creatorid = clr[i].creatorid;
1044
1045 if (clr[i].ifname[0] == '\0') {
1046#ifdef __FreeBSD__
1047 for (st = RB_MIN(pf_state_tree_id, &V_tree_id);
1048 st; st = nexts) {
1049 nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, st);
1050#else
1051 for (st = RB_MIN(pf_state_tree_id, &tree_id);
1052 st; st = nexts) {
1053 nexts = RB_NEXT(pf_state_tree_id, &tree_id, st);
1054#endif
1055 if (st->creatorid == creatorid) {
1056 SET(st->state_flags, PFSTATE_NOSYNC);
1057 pf_unlink_state(st);
1058 }
1059 }
1060 } else {
1061 if (pfi_kif_get(clr[i].ifname) == NULL)
1062 continue;
1063
1064 /* XXX correct? */
1065#ifdef __FreeBSD__
1066 for (sk = RB_MIN(pf_state_tree, &V_pf_statetbl);
1067#else
1068 for (sk = RB_MIN(pf_state_tree, &pf_statetbl);
1069#endif
1070 sk; sk = nextsk) {
1071 nextsk = RB_NEXT(pf_state_tree,
1072#ifdef __FreeBSD__
1073 &V_pf_statetbl, sk);
1074#else
1075 &pf_statetbl, sk);
1076#endif
1077 TAILQ_FOREACH(si, &sk->states, entry) {
1078 if (si->s->creatorid == creatorid) {
1079 SET(si->s->state_flags,
1080 PFSTATE_NOSYNC);
1081 pf_unlink_state(si->s);
1082 }
1083 }
1084 }
1085 }
1086 }
1087#ifdef __FreeBSD__
1088 PF_UNLOCK();
1089#endif
1090 splx(s);
1091
1092 return (len);
1093}
1094
1095int
1096pfsync_in_ins(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1097{
1098 struct mbuf *mp;
1099 struct pfsync_state *sa, *sp;
1100 int len = sizeof(*sp) * count;
1101 int i, offp;
1102
1103 int s;
1104
1105 mp = m_pulldown(m, offset, len, &offp);
1106 if (mp == NULL) {
1107 V_pfsyncstats.pfsyncs_badlen++;
1108 return (-1);
1109 }
1110 sa = (struct pfsync_state *)(mp->m_data + offp);
1111
1112 s = splsoftnet();
1113#ifdef __FreeBSD__
1114 PF_LOCK();
1115#endif
1116 for (i = 0; i < count; i++) {
1117 sp = &sa[i];
1118
1119 /* check for invalid values */
1120 if (sp->timeout >= PFTM_MAX ||
1121 sp->src.state > PF_TCPS_PROXY_DST ||
1122 sp->dst.state > PF_TCPS_PROXY_DST ||
1123 sp->direction > PF_OUT ||
1124 (sp->af != AF_INET && sp->af != AF_INET6)) {
1125#ifdef __FreeBSD__
1126 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1127#else
1128 if (pf_status.debug >= PF_DEBUG_MISC) {
1129#endif
1130 printf("pfsync_input: PFSYNC5_ACT_INS: "
1131 "invalid value\n");
1132 }
1133 V_pfsyncstats.pfsyncs_badval++;
1134 continue;
1135 }
1136
1137 if (pfsync_state_import(sp, pkt->flags) == ENOMEM) {
1138 /* drop out, but process the rest of the actions */
1139 break;
1140 }
1141 }
1142#ifdef __FreeBSD__
1143 PF_UNLOCK();
1144#endif
1145 splx(s);
1146
1147 return (len);
1148}
1149
1150int
1151pfsync_in_iack(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1152{
1153 struct pfsync_ins_ack *ia, *iaa;
1154 struct pf_state_cmp id_key;
1155 struct pf_state *st;
1156
1157 struct mbuf *mp;
1158 int len = count * sizeof(*ia);
1159 int offp, i;
1160 int s;
1161
1162 mp = m_pulldown(m, offset, len, &offp);
1163 if (mp == NULL) {
1164 V_pfsyncstats.pfsyncs_badlen++;
1165 return (-1);
1166 }
1167 iaa = (struct pfsync_ins_ack *)(mp->m_data + offp);
1168
1169 s = splsoftnet();
1170#ifdef __FreeBSD__
1171 PF_LOCK();
1172#endif
1173 for (i = 0; i < count; i++) {
1174 ia = &iaa[i];
1175
1176 bcopy(&ia->id, &id_key.id, sizeof(id_key.id));
1177 id_key.creatorid = ia->creatorid;
1178
1179 st = pf_find_state_byid(&id_key);
1180 if (st == NULL)
1181 continue;
1182
1183 if (ISSET(st->state_flags, PFSTATE_ACK))
1184 pfsync_deferred(st, 0);
1185 }
1186#ifdef __FreeBSD__
1187 PF_UNLOCK();
1188#endif
1189 splx(s);
1190 /*
1191 * XXX this is not yet implemented, but we know the size of the
1192 * message so we can skip it.
1193 */
1194
1195 return (count * sizeof(struct pfsync_ins_ack));
1196}
1197
1198int
1199pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src,
1200 struct pfsync_state_peer *dst)
1201{
1202 int sfail = 0;
1203
1204 /*
1205 * The state should never go backwards except
1206 * for syn-proxy states. Neither should the
1207 * sequence window slide backwards.
1208 */
1209 if (st->src.state > src->state &&
1210 (st->src.state < PF_TCPS_PROXY_SRC ||
1211 src->state >= PF_TCPS_PROXY_SRC))
1212 sfail = 1;
1213 else if (SEQ_GT(st->src.seqlo, ntohl(src->seqlo)))
1214 sfail = 3;
1215 else if (st->dst.state > dst->state) {
1216 /* There might still be useful
1217 * information about the src state here,
1218 * so import that part of the update,
1219 * then "fail" so we send the updated
1220 * state back to the peer who is missing
1221 * our what we know. */
1222 pf_state_peer_ntoh(src, &st->src);
1223 /* XXX do anything with timeouts? */
1224 sfail = 7;
1225 } else if (st->dst.state >= TCPS_SYN_SENT &&
1226 SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo)))
1227 sfail = 4;
1228
1229 return (sfail);
1230}
1231
1232int
1233pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1234{
1235 struct pfsync_state *sa, *sp;
1236 struct pf_state_cmp id_key;
1237 struct pf_state_key *sk;
1238 struct pf_state *st;
1239 int sfail;
1240
1241 struct mbuf *mp;
1242 int len = count * sizeof(*sp);
1243 int offp, i;
1244 int s;
1245
1246 mp = m_pulldown(m, offset, len, &offp);
1247 if (mp == NULL) {
1248 V_pfsyncstats.pfsyncs_badlen++;
1249 return (-1);
1250 }
1251 sa = (struct pfsync_state *)(mp->m_data + offp);
1252
1253 s = splsoftnet();
1254#ifdef __FreeBSD__
1255 PF_LOCK();
1256#endif
1257 for (i = 0; i < count; i++) {
1258 sp = &sa[i];
1259
1260 /* check for invalid values */
1261 if (sp->timeout >= PFTM_MAX ||
1262 sp->src.state > PF_TCPS_PROXY_DST ||
1263 sp->dst.state > PF_TCPS_PROXY_DST) {
1264#ifdef __FreeBSD__
1265 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1266#else
1267 if (pf_status.debug >= PF_DEBUG_MISC) {
1268#endif
1269 printf("pfsync_input: PFSYNC_ACT_UPD: "
1270 "invalid value\n");
1271 }
1272 V_pfsyncstats.pfsyncs_badval++;
1273 continue;
1274 }
1275
1276 bcopy(sp->id, &id_key.id, sizeof(id_key.id));
1277 id_key.creatorid = sp->creatorid;
1278
1279 st = pf_find_state_byid(&id_key);
1280 if (st == NULL) {
1281 /* insert the update */
1282 if (pfsync_state_import(sp, 0))
1283 V_pfsyncstats.pfsyncs_badstate++;
1284 continue;
1285 }
1286
1287 if (ISSET(st->state_flags, PFSTATE_ACK))
1288 pfsync_deferred(st, 1);
1289
1290 sk = st->key[PF_SK_WIRE]; /* XXX right one? */
1291 sfail = 0;
1292 if (sk->proto == IPPROTO_TCP)
1293 sfail = pfsync_upd_tcp(st, &sp->src, &sp->dst);
1294 else {
1295 /*
1296 * Non-TCP protocol state machine always go
1297 * forwards
1298 */
1299 if (st->src.state > sp->src.state)
1300 sfail = 5;
1301 else if (st->dst.state > sp->dst.state)
1302 sfail = 6;
1303 }
1304
1305 if (sfail) {
1306#ifdef __FreeBSD__
1307 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1308#else
1309 if (pf_status.debug >= PF_DEBUG_MISC) {
1310#endif
1311 printf("pfsync: %s stale update (%d)"
1312 " id: %016llx creatorid: %08x\n",
1313 (sfail < 7 ? "ignoring" : "partial"),
1314 sfail, betoh64(st->id),
1315 ntohl(st->creatorid));
1316 }
1317 V_pfsyncstats.pfsyncs_stale++;
1318
1319 pfsync_update_state(st);
1320#ifdef __FreeBSD__
1321 pfsync_sendout();
1322#else
1323 schednetisr(NETISR_PFSYNC);
1324#endif
1325 continue;
1326 }
1327 pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
1328 pf_state_peer_ntoh(&sp->src, &st->src);
1329 pf_state_peer_ntoh(&sp->dst, &st->dst);
1330 st->expire = ntohl(sp->expire) + time_second;
1331 st->timeout = sp->timeout;
1332 st->pfsync_time = time_second;
1333 }
1334#ifdef __FreeBSD__
1335 PF_UNLOCK();
1336#endif
1337 splx(s);
1338
1339 return (len);
1340}
1341
1342int
1343pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1344{
1345 struct pfsync_upd_c *ua, *up;
1346 struct pf_state_key *sk;
1347 struct pf_state_cmp id_key;
1348 struct pf_state *st;
1349
1350 int len = count * sizeof(*up);
1351 int sfail;
1352
1353 struct mbuf *mp;
1354 int offp, i;
1355 int s;
1356
1357 mp = m_pulldown(m, offset, len, &offp);
1358 if (mp == NULL) {
1359 V_pfsyncstats.pfsyncs_badlen++;
1360 return (-1);
1361 }
1362 ua = (struct pfsync_upd_c *)(mp->m_data + offp);
1363
1364 s = splsoftnet();
1365#ifdef __FreeBSD__
1366 PF_LOCK();
1367#endif
1368 for (i = 0; i < count; i++) {
1369 up = &ua[i];
1370
1371 /* check for invalid values */
1372 if (up->timeout >= PFTM_MAX ||
1373 up->src.state > PF_TCPS_PROXY_DST ||
1374 up->dst.state > PF_TCPS_PROXY_DST) {
1375#ifdef __FreeBSD__
1376 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1377#else
1378 if (pf_status.debug >= PF_DEBUG_MISC) {
1379#endif
1380 printf("pfsync_input: "
1381 "PFSYNC_ACT_UPD_C: "
1382 "invalid value\n");
1383 }
1384 V_pfsyncstats.pfsyncs_badval++;
1385 continue;
1386 }
1387
1388 bcopy(&up->id, &id_key.id, sizeof(id_key.id));
1389 id_key.creatorid = up->creatorid;
1390
1391 st = pf_find_state_byid(&id_key);
1392 if (st == NULL) {
1393 /* We don't have this state. Ask for it. */
1394 pfsync_request_update(id_key.creatorid, id_key.id);
1395 continue;
1396 }
1397
1398 if (ISSET(st->state_flags, PFSTATE_ACK))
1399 pfsync_deferred(st, 1);
1400
1401 sk = st->key[PF_SK_WIRE]; /* XXX right one? */
1402 sfail = 0;
1403 if (sk->proto == IPPROTO_TCP)
1404 sfail = pfsync_upd_tcp(st, &up->src, &up->dst);
1405 else {
1406 /*
1407 * Non-TCP protocol state machine always go forwards
1408 */
1409 if (st->src.state > up->src.state)
1410 sfail = 5;
1411 else if (st->dst.state > up->dst.state)
1412 sfail = 6;
1413 }
1414
1415 if (sfail) {
1416#ifdef __FreeBSD__
1417 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1418#else
1419 if (pf_status.debug >= PF_DEBUG_MISC) {
1420#endif
1421 printf("pfsync: ignoring stale update "
1422 "(%d) id: %016llx "
1423 "creatorid: %08x\n", sfail,
1424 betoh64(st->id),
1425 ntohl(st->creatorid));
1426 }
1427 V_pfsyncstats.pfsyncs_stale++;
1428
1429 pfsync_update_state(st);
1430#ifdef __FreeBSD__
1431 pfsync_sendout();
1432#else
1433 schednetisr(NETISR_PFSYNC);
1434#endif
1435 continue;
1436 }
1437 pfsync_alloc_scrub_memory(&up->dst, &st->dst);
1438 pf_state_peer_ntoh(&up->src, &st->src);
1439 pf_state_peer_ntoh(&up->dst, &st->dst);
1440 st->expire = ntohl(up->expire) + time_second;
1441 st->timeout = up->timeout;
1442 st->pfsync_time = time_second;
1443 }
1444#ifdef __FreeBSD__
1445 PF_UNLOCK();
1446#endif
1447 splx(s);
1448
1449 return (len);
1450}
1451
1452int
1453pfsync_in_ureq(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1454{
1455 struct pfsync_upd_req *ur, *ura;
1456 struct mbuf *mp;
1457 int len = count * sizeof(*ur);
1458 int i, offp;
1459
1460 struct pf_state_cmp id_key;
1461 struct pf_state *st;
1462
1463 mp = m_pulldown(m, offset, len, &offp);
1464 if (mp == NULL) {
1465 V_pfsyncstats.pfsyncs_badlen++;
1466 return (-1);
1467 }
1468 ura = (struct pfsync_upd_req *)(mp->m_data + offp);
1469
1470 for (i = 0; i < count; i++) {
1471 ur = &ura[i];
1472
1473 bcopy(&ur->id, &id_key.id, sizeof(id_key.id));
1474 id_key.creatorid = ur->creatorid;
1475
1476 if (id_key.id == 0 && id_key.creatorid == 0)
1477 pfsync_bulk_start();
1478 else {
1479 st = pf_find_state_byid(&id_key);
1480 if (st == NULL) {
1481 V_pfsyncstats.pfsyncs_badstate++;
1482 continue;
1483 }
1484 if (ISSET(st->state_flags, PFSTATE_NOSYNC))
1485 continue;
1486
1487 PF_LOCK();
1488 pfsync_update_state_req(st);
1489 PF_UNLOCK();
1490 }
1491 }
1492
1493 return (len);
1494}
1495
1496int
1497pfsync_in_del(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1498{
1499 struct mbuf *mp;
1500 struct pfsync_state *sa, *sp;
1501 struct pf_state_cmp id_key;
1502 struct pf_state *st;
1503 int len = count * sizeof(*sp);
1504 int offp, i;
1505 int s;
1506
1507 mp = m_pulldown(m, offset, len, &offp);
1508 if (mp == NULL) {
1509 V_pfsyncstats.pfsyncs_badlen++;
1510 return (-1);
1511 }
1512 sa = (struct pfsync_state *)(mp->m_data + offp);
1513
1514 s = splsoftnet();
1515#ifdef __FreeBSD__
1516 PF_LOCK();
1517#endif
1518 for (i = 0; i < count; i++) {
1519 sp = &sa[i];
1520
1521 bcopy(sp->id, &id_key.id, sizeof(id_key.id));
1522 id_key.creatorid = sp->creatorid;
1523
1524 st = pf_find_state_byid(&id_key);
1525 if (st == NULL) {
1526 V_pfsyncstats.pfsyncs_badstate++;
1527 continue;
1528 }
1529 SET(st->state_flags, PFSTATE_NOSYNC);
1530 pf_unlink_state(st);
1531 }
1532#ifdef __FreeBSD__
1533 PF_UNLOCK();
1534#endif
1535 splx(s);
1536
1537 return (len);
1538}
1539
1540int
1541pfsync_in_del_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1542{
1543 struct mbuf *mp;
1544 struct pfsync_del_c *sa, *sp;
1545 struct pf_state_cmp id_key;
1546 struct pf_state *st;
1547 int len = count * sizeof(*sp);
1548 int offp, i;
1549 int s;
1550
1551 mp = m_pulldown(m, offset, len, &offp);
1552 if (mp == NULL) {
1553 V_pfsyncstats.pfsyncs_badlen++;
1554 return (-1);
1555 }
1556 sa = (struct pfsync_del_c *)(mp->m_data + offp);
1557
1558 s = splsoftnet();
1559#ifdef __FreeBSD__
1560 PF_LOCK();
1561#endif
1562 for (i = 0; i < count; i++) {
1563 sp = &sa[i];
1564
1565 bcopy(&sp->id, &id_key.id, sizeof(id_key.id));
1566 id_key.creatorid = sp->creatorid;
1567
1568 st = pf_find_state_byid(&id_key);
1569 if (st == NULL) {
1570 V_pfsyncstats.pfsyncs_badstate++;
1571 continue;
1572 }
1573
1574 SET(st->state_flags, PFSTATE_NOSYNC);
1575 pf_unlink_state(st);
1576 }
1577#ifdef __FreeBSD__
1578 PF_UNLOCK();
1579#endif
1580 splx(s);
1581
1582 return (len);
1583}
1584
1585int
1586pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1587{
1588#ifdef __FreeBSD__
1589 struct pfsync_softc *sc = V_pfsyncif;
1590#else
1591 struct pfsync_softc *sc = pfsyncif;
1592#endif
1593 struct pfsync_bus *bus;
1594 struct mbuf *mp;
1595 int len = count * sizeof(*bus);
1596 int offp;
1597
1598 /* If we're not waiting for a bulk update, who cares. */
1599 if (sc->sc_ureq_sent == 0)
1600 return (len);
1601
1602 mp = m_pulldown(m, offset, len, &offp);
1603 if (mp == NULL) {
1604 V_pfsyncstats.pfsyncs_badlen++;
1605 return (-1);
1606 }
1607 bus = (struct pfsync_bus *)(mp->m_data + offp);
1608
1609 switch (bus->status) {
1610 case PFSYNC_BUS_START:
1611#ifdef __FreeBSD__
1612 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail,
1613 V_pfsyncif);
1614#else
1615 timeout_add_sec(&sc->sc_bulkfail_tmo, 5); /* XXX magic */
1616#endif
1617#ifdef XXX
1618 pf_pool_limits[PF_LIMIT_STATES].limit /
1619 (PFSYNC_BULKPACKETS * sc->sc_maxcount));
1620#endif
1621#ifdef __FreeBSD__
1622 if (V_pf_status.debug >= PF_DEBUG_MISC)
1623#else
1624 if (pf_status.debug >= PF_DEBUG_MISC)
1625#endif
1626 printf("pfsync: received bulk update start\n");
1627 break;
1628
1629 case PFSYNC_BUS_END:
1630 if (time_uptime - ntohl(bus->endtime) >=
1631 sc->sc_ureq_sent) {
1632 /* that's it, we're happy */
1633 sc->sc_ureq_sent = 0;
1634 sc->sc_bulk_tries = 0;
1635 timeout_del(&sc->sc_bulkfail_tmo);
1636#ifdef __FreeBSD__
1637 if (!sc->pfsync_sync_ok && carp_demote_adj_p)
1638 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
1639 "pfsync bulk done");
1640 sc->pfsync_sync_ok = 1;
1641#else
1642#if NCARP > 0
1643 if (!pfsync_sync_ok)
1644 carp_group_demote_adj(&sc->sc_if, -1);
1645#endif
1646 pfsync_sync_ok = 1;
1647#endif
1648#ifdef __FreeBSD__
1649 if (V_pf_status.debug >= PF_DEBUG_MISC)
1650#else
1651 if (pf_status.debug >= PF_DEBUG_MISC)
1652#endif
1653 printf("pfsync: received valid "
1654 "bulk update end\n");
1655 } else {
1656#ifdef __FreeBSD__
1657 if (V_pf_status.debug >= PF_DEBUG_MISC)
1658#else
1659 if (pf_status.debug >= PF_DEBUG_MISC)
1660#endif
1661 printf("pfsync: received invalid "
1662 "bulk update end: bad timestamp\n");
1663 }
1664 break;
1665 }
1666
1667 return (len);
1668}
1669
1670int
1671pfsync_in_tdb(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1672{
1673 int len = count * sizeof(struct pfsync_tdb);
1674
1675#if defined(IPSEC)
1676 struct pfsync_tdb *tp;
1677 struct mbuf *mp;
1678 int offp;
1679 int i;
1680 int s;
1681
1682 mp = m_pulldown(m, offset, len, &offp);
1683 if (mp == NULL) {
1684 V_pfsyncstats.pfsyncs_badlen++;
1685 return (-1);
1686 }
1687 tp = (struct pfsync_tdb *)(mp->m_data + offp);
1688
1689 s = splsoftnet();
1690#ifdef __FreeBSD__
1691 PF_LOCK();
1692#endif
1693 for (i = 0; i < count; i++)
1694 pfsync_update_net_tdb(&tp[i]);
1695#ifdef __FreeBSD__
1696 PF_UNLOCK();
1697#endif
1698 splx(s);
1699#endif
1700
1701 return (len);
1702}
1703
1704#if defined(IPSEC)
1705/* Update an in-kernel tdb. Silently fail if no tdb is found. */
1706void
1707pfsync_update_net_tdb(struct pfsync_tdb *pt)
1708{
1709 struct tdb *tdb;
1710 int s;
1711
1712 /* check for invalid values */
1713 if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
1714 (pt->dst.sa.sa_family != AF_INET &&
1715 pt->dst.sa.sa_family != AF_INET6))
1716 goto bad;
1717
1718 s = spltdb();
1719 tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
1720 if (tdb) {
1721 pt->rpl = ntohl(pt->rpl);
1722 pt->cur_bytes = betoh64(pt->cur_bytes);
1723
1724 /* Neither replay nor byte counter should ever decrease. */
1725 if (pt->rpl < tdb->tdb_rpl ||
1726 pt->cur_bytes < tdb->tdb_cur_bytes) {
1727 splx(s);
1728 goto bad;
1729 }
1730
1731 tdb->tdb_rpl = pt->rpl;
1732 tdb->tdb_cur_bytes = pt->cur_bytes;
1733 }
1734 splx(s);
1735 return;
1736
1737bad:
1738#ifdef __FreeBSD__
1739 if (V_pf_status.debug >= PF_DEBUG_MISC)
1740#else
1741 if (pf_status.debug >= PF_DEBUG_MISC)
1742#endif
1743 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
1744 "invalid value\n");
1745 V_pfsyncstats.pfsyncs_badstate++;
1746 return;
1747}
1748#endif
1749
1750
1751int
1752pfsync_in_eof(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1753{
1754 /* check if we are at the right place in the packet */
1755 if (offset != m->m_pkthdr.len - sizeof(struct pfsync_eof))
1756 V_pfsyncstats.pfsyncs_badact++;
1757
1758 /* we're done. free and let the caller return */
1759 m_freem(m);
1760 return (-1);
1761}
1762
1763int
1764pfsync_in_error(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1765{
1766 V_pfsyncstats.pfsyncs_badact++;
1767
1768 m_freem(m);
1769 return (-1);
1770}
1771
1772int
1773pfsyncoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
1774#ifdef __FreeBSD__
1775 struct route *rt)
1776#else
1777 struct rtentry *rt)
1778#endif
1779{
1780 m_freem(m);
1781 return (0);
1782}
1783
1784/* ARGSUSED */
1785int
1786pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1787{
1788#ifndef __FreeBSD__
1789 struct proc *p = curproc;
1790#endif
1791 struct pfsync_softc *sc = ifp->if_softc;
1792 struct ifreq *ifr = (struct ifreq *)data;
1793 struct ip_moptions *imo = &sc->sc_imo;
1794 struct pfsyncreq pfsyncr;
1795 struct ifnet *sifp;
1796 struct ip *ip;
1797 int s, error;
1798
1799 switch (cmd) {
1800#if 0
1801 case SIOCSIFADDR:
1802 case SIOCAIFADDR:
1803 case SIOCSIFDSTADDR:
1804#endif
1805 case SIOCSIFFLAGS:
1806#ifdef __FreeBSD__
1807 if (ifp->if_flags & IFF_UP)
1808 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1809 else
1810 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1811#else
1812 if (ifp->if_flags & IFF_UP)
1813 ifp->if_flags |= IFF_RUNNING;
1814 else
1815 ifp->if_flags &= ~IFF_RUNNING;
1816#endif
1817 break;
1818 case SIOCSIFMTU:
1819 if (ifr->ifr_mtu <= PFSYNC_MINPKT)
1820 return (EINVAL);
1821 if (ifr->ifr_mtu > MCLBYTES) /* XXX could be bigger */
1822 ifr->ifr_mtu = MCLBYTES;
1823 if (ifr->ifr_mtu < ifp->if_mtu) {
1824 s = splnet();
1825#ifdef __FreeBSD__
1826 PF_LOCK();
1827#endif
1828 pfsync_sendout();
1829#ifdef __FreeBSD__
1830 PF_UNLOCK();
1831#endif
1832 splx(s);
1833 }
1834 ifp->if_mtu = ifr->ifr_mtu;
1835 break;
1836 case SIOCGETPFSYNC:
1837 bzero(&pfsyncr, sizeof(pfsyncr));
1838 if (sc->sc_sync_if) {
1839 strlcpy(pfsyncr.pfsyncr_syncdev,
1840 sc->sc_sync_if->if_xname, IFNAMSIZ);
1841 }
1842 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
1843 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
1844 return (copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr)));
1845
1846 case SIOCSETPFSYNC:
1847#ifdef __FreeBSD__
1848 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
1849#else
1850 if ((error = suser(p, p->p_acflag)) != 0)
1851#endif
1852 return (error);
1853 if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr))))
1854 return (error);
1855
1856#ifdef __FreeBSD__
1857 PF_LOCK();
1858#endif
1859 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
1860#ifdef __FreeBSD__
1861 sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP);
1862#else
1863 sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
1864#endif
1865 else
1866 sc->sc_sync_peer.s_addr =
1867 pfsyncr.pfsyncr_syncpeer.s_addr;
1868
1869 if (pfsyncr.pfsyncr_maxupdates > 255)
1870#ifdef __FreeBSD__
1871 {
1872 PF_UNLOCK();
1873#endif
1874 return (EINVAL);
1875#ifdef __FreeBSD__
1876 }
1877#endif
1878 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
1879
1880 if (pfsyncr.pfsyncr_syncdev[0] == 0) {
1881 sc->sc_sync_if = NULL;
1882#ifdef __FreeBSD__
1883 PF_UNLOCK();
1884#endif
1885 if (imo->imo_num_memberships > 0) {
1886 in_delmulti(imo->imo_membership[
1887 --imo->imo_num_memberships]);
1888 imo->imo_multicast_ifp = NULL;
1889 }
1890 break;
1891 }
1892
1893#ifdef __FreeBSD__
1894 PF_UNLOCK();
1895#endif
1896 if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL)
1897 return (EINVAL);
1898
1899#ifdef __FreeBSD__
1900 PF_LOCK();
1901#endif
1902 s = splnet();
1903#ifdef __FreeBSD__
1904 if (sifp->if_mtu < sc->sc_ifp->if_mtu ||
1905#else
1906 if (sifp->if_mtu < sc->sc_if.if_mtu ||
1907#endif
1908 (sc->sc_sync_if != NULL &&
1909 sifp->if_mtu < sc->sc_sync_if->if_mtu) ||
1910 sifp->if_mtu < MCLBYTES - sizeof(struct ip))
1911 pfsync_sendout();
1912 sc->sc_sync_if = sifp;
1913
1914 if (imo->imo_num_memberships > 0) {
1915#ifdef __FreeBSD__
1916 PF_UNLOCK();
1917#endif
1918 in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
1919#ifdef __FreeBSD__
1920 PF_LOCK();
1921#endif
1922 imo->imo_multicast_ifp = NULL;
1923 }
1924
1925 if (sc->sc_sync_if &&
1926#ifdef __FreeBSD__
1927 sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
1928#else
1929 sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
1930#endif
1931 struct in_addr addr;
1932
1933 if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) {
1934 sc->sc_sync_if = NULL;
1935#ifdef __FreeBSD__
1936 PF_UNLOCK();
1937#endif
1938 splx(s);
1939 return (EADDRNOTAVAIL);
1940 }
1941
1942#ifdef __FreeBSD__
1943 addr.s_addr = htonl(INADDR_PFSYNC_GROUP);
1944#else
1945 addr.s_addr = INADDR_PFSYNC_GROUP;
1946#endif
1947
1948#ifdef __FreeBSD__
1949 PF_UNLOCK();
1950#endif
1951 if ((imo->imo_membership[0] =
1952 in_addmulti(&addr, sc->sc_sync_if)) == NULL) {
1953 sc->sc_sync_if = NULL;
1954 splx(s);
1955 return (ENOBUFS);
1956 }
1957#ifdef __FreeBSD__
1958 PF_LOCK();
1959#endif
1960 imo->imo_num_memberships++;
1961 imo->imo_multicast_ifp = sc->sc_sync_if;
1962 imo->imo_multicast_ttl = PFSYNC_DFLTTL;
1963 imo->imo_multicast_loop = 0;
1964 }
1965
1966 ip = &sc->sc_template;
1967 bzero(ip, sizeof(*ip));
1968 ip->ip_v = IPVERSION;
1969 ip->ip_hl = sizeof(sc->sc_template) >> 2;
1970 ip->ip_tos = IPTOS_LOWDELAY;
1971 /* len and id are set later */
1972#ifdef __FreeBSD__
1973 ip->ip_off = IP_DF;
1974#else
1975 ip->ip_off = htons(IP_DF);
1976#endif
1977 ip->ip_ttl = PFSYNC_DFLTTL;
1978 ip->ip_p = IPPROTO_PFSYNC;
1979 ip->ip_src.s_addr = INADDR_ANY;
1980 ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr;
1981
1982 if (sc->sc_sync_if) {
1983 /* Request a full state table update. */
1984 sc->sc_ureq_sent = time_uptime;
1985#ifdef __FreeBSD__
1986 if (sc->pfsync_sync_ok && carp_demote_adj_p)
1987 (*carp_demote_adj_p)(V_pfsync_carp_adj,
1988 "pfsync bulk start");
1989 sc->pfsync_sync_ok = 0;
1990#else
1991#if NCARP > 0
1992 if (pfsync_sync_ok)
1993 carp_group_demote_adj(&sc->sc_if, 1);
1994#endif
1995 pfsync_sync_ok = 0;
1996#endif
1997#ifdef __FreeBSD__
1998 if (V_pf_status.debug >= PF_DEBUG_MISC)
1999#else
2000 if (pf_status.debug >= PF_DEBUG_MISC)
2001#endif
2002 printf("pfsync: requesting bulk update\n");
2003#ifdef __FreeBSD__
2004 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
2005 pfsync_bulk_fail, V_pfsyncif);
2006#else
2007 timeout_add_sec(&sc->sc_bulkfail_tmo, 5);
2008#endif
2009 pfsync_request_update(0, 0);
2010 }
2011#ifdef __FreeBSD__
2012 PF_UNLOCK();
2013#endif
2014 splx(s);
2015
2016 break;
2017
2018 default:
2019 return (ENOTTY);
2020 }
2021
2022 return (0);
2023}
2024
2025int
2026pfsync_out_state(struct pf_state *st, struct mbuf *m, int offset)
2027{
2028 struct pfsync_state *sp = (struct pfsync_state *)(m->m_data + offset);
2029
2030 pfsync_state_export(sp, st);
2031
2032 return (sizeof(*sp));
2033}
2034
2035int
2036pfsync_out_iack(struct pf_state *st, struct mbuf *m, int offset)
2037{
2038 struct pfsync_ins_ack *iack =
2039 (struct pfsync_ins_ack *)(m->m_data + offset);
2040
2041 iack->id = st->id;
2042 iack->creatorid = st->creatorid;
2043
2044 return (sizeof(*iack));
2045}
2046
2047int
2048pfsync_out_upd_c(struct pf_state *st, struct mbuf *m, int offset)
2049{
2050 struct pfsync_upd_c *up = (struct pfsync_upd_c *)(m->m_data + offset);
2051
2052 up->id = st->id;
2053 pf_state_peer_hton(&st->src, &up->src);
2054 pf_state_peer_hton(&st->dst, &up->dst);
2055 up->creatorid = st->creatorid;
2056
2057 up->expire = pf_state_expires(st);
2058 if (up->expire <= time_second)
2059 up->expire = htonl(0);
2060 else
2061 up->expire = htonl(up->expire - time_second);
2062 up->timeout = st->timeout;
2063
2064 bzero(up->_pad, sizeof(up->_pad)); /* XXX */
2065
2066 return (sizeof(*up));
2067}
2068
2069int
2070pfsync_out_del(struct pf_state *st, struct mbuf *m, int offset)
2071{
2072 struct pfsync_del_c *dp = (struct pfsync_del_c *)(m->m_data + offset);
2073
2074 dp->id = st->id;
2075 dp->creatorid = st->creatorid;
2076
2077 SET(st->state_flags, PFSTATE_NOSYNC);
2078
2079 return (sizeof(*dp));
2080}
2081
2082void
2083pfsync_drop(struct pfsync_softc *sc)
2084{
2085 struct pf_state *st;
2086 struct pfsync_upd_req_item *ur;
2087#ifdef notyet
2088 struct tdb *t;
2089#endif
2090 int q;
2091
2092 for (q = 0; q < PFSYNC_S_COUNT; q++) {
2093 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2094 continue;
2095
2096 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) {
2097#ifdef PFSYNC_DEBUG
2098#ifdef __FreeBSD__
2099 KASSERT(st->sync_state == q,
2100 ("%s: st->sync_state == q",
2101 __FUNCTION__));
2102#else
2103 KASSERT(st->sync_state == q);
2104#endif
2105#endif
2106 st->sync_state = PFSYNC_S_NONE;
2107 }
2108 TAILQ_INIT(&sc->sc_qs[q]);
2109 }
2110
2111 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
2112 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
2113 pool_put(&sc->sc_pool, ur);
2114 }
2115
2116 sc->sc_plus = NULL;
2117
2118#ifdef notyet
2119 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) {
2120 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry)
2121 CLR(t->tdb_flags, TDBF_PFSYNC);
2122
2123 TAILQ_INIT(&sc->sc_tdb_q);
2124 }
2125#endif
2126
2127 sc->sc_len = PFSYNC_MINPKT;
2128}
2129
2130void
2131pfsync_sendout(void)
2132{
2133#ifdef __FreeBSD__
2134 struct pfsync_softc *sc = V_pfsyncif;
2135#else
2136 struct pfsync_softc *sc = pfsyncif;
2137#endif
2138#if NBPFILTER > 0
2139#ifdef __FreeBSD__
2140 struct ifnet *ifp = sc->sc_ifp;
2141#else
2142 struct ifnet *ifp = &sc->sc_if;
2143#endif
2144#endif
2145 struct mbuf *m;
2146 struct ip *ip;
2147 struct pfsync_header *ph;
2148 struct pfsync_subheader *subh;
2149 struct pf_state *st;
2150 struct pfsync_upd_req_item *ur;
2151#ifdef notyet
2152 struct tdb *t;
2153#endif
2154#ifdef __FreeBSD__
2155 size_t pktlen;
2156 int dummy_error;
2157#endif
2158 int offset;
2159 int q, count = 0;
2160
2161#ifdef __FreeBSD__
2162 PF_LOCK_ASSERT();
2163#else
2164 splassert(IPL_NET);
2165#endif
2166
2167 if (sc == NULL || sc->sc_len == PFSYNC_MINPKT)
2168 return;
2169
2170#if NBPFILTER > 0
2171 if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) {
2172#else
2173 if (sc->sc_sync_if == NULL) {
2174#endif
2175 pfsync_drop(sc);
2176 return;
2177 }
2178
2179 MGETHDR(m, M_DONTWAIT, MT_DATA);
2180 if (m == NULL) {
2181#ifdef __FreeBSD__
2182 sc->sc_ifp->if_oerrors++;
2183#else
2184 sc->sc_if.if_oerrors++;
2185#endif
2186 V_pfsyncstats.pfsyncs_onomem++;
2187 pfsync_drop(sc);
2188 return;
2189 }
2190
2191#ifdef __FreeBSD__
2192 pktlen = max_linkhdr + sc->sc_len;
2193 if (pktlen > MHLEN) {
2194 /* Find the right pool to allocate from. */
2195 /* XXX: This is ugly. */
2196 m_cljget(m, M_DONTWAIT, pktlen <= MSIZE ? MSIZE :
2197 pktlen <= MCLBYTES ? MCLBYTES :
2198#if MJUMPAGESIZE != MCLBYTES
2199 pktlen <= MJUMPAGESIZE ? MJUMPAGESIZE :
2200#endif
2201 pktlen <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES);
2202#else
2203 if (max_linkhdr + sc->sc_len > MHLEN) {
2204 MCLGETI(m, M_DONTWAIT, NULL, max_linkhdr + sc->sc_len);
2205#endif
2206 if (!ISSET(m->m_flags, M_EXT)) {
2207 m_free(m);
2208#ifdef __FreeBSD__
2209 sc->sc_ifp->if_oerrors++;
2210#else
2211 sc->sc_if.if_oerrors++;
2212#endif
2213 V_pfsyncstats.pfsyncs_onomem++;
2214 pfsync_drop(sc);
2215 return;
2216 }
2217 }
2218 m->m_data += max_linkhdr;
2219 m->m_len = m->m_pkthdr.len = sc->sc_len;
2220
2221 /* build the ip header */
2222 ip = (struct ip *)m->m_data;
2223 bcopy(&sc->sc_template, ip, sizeof(*ip));
2224 offset = sizeof(*ip);
2225
2226#ifdef __FreeBSD__
2227 ip->ip_len = m->m_pkthdr.len;
2228#else
2229 ip->ip_len = htons(m->m_pkthdr.len);
2230#endif
2231 ip->ip_id = htons(ip_randomid());
2232
2233 /* build the pfsync header */
2234 ph = (struct pfsync_header *)(m->m_data + offset);
2235 bzero(ph, sizeof(*ph));
2236 offset += sizeof(*ph);
2237
2238 ph->version = PFSYNC_VERSION;
2239 ph->len = htons(sc->sc_len - sizeof(*ip));
2240#ifdef __FreeBSD__
2241 bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
2242#else
2243 bcopy(pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
2244#endif
2245
2246 /* walk the queues */
2247 for (q = 0; q < PFSYNC_S_COUNT; q++) {
2248 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2249 continue;
2250
2251 subh = (struct pfsync_subheader *)(m->m_data + offset);
2252 offset += sizeof(*subh);
2253
2254 count = 0;
2255 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) {
2256#ifdef PFSYNC_DEBUG
2257#ifdef __FreeBSD__
2258 KASSERT(st->sync_state == q,
2259 ("%s: st->sync_state == q",
2260 __FUNCTION__));
2261#else
2262 KASSERT(st->sync_state == q);
2263#endif
2264#endif
2265
2266 offset += pfsync_qs[q].write(st, m, offset);
2267 st->sync_state = PFSYNC_S_NONE;
2268 count++;
2269 }
2270 TAILQ_INIT(&sc->sc_qs[q]);
2271
2272 bzero(subh, sizeof(*subh));
2273 subh->action = pfsync_qs[q].action;
2274 subh->count = htons(count);
2275 }
2276
2277 if (!TAILQ_EMPTY(&sc->sc_upd_req_list)) {
2278 subh = (struct pfsync_subheader *)(m->m_data + offset);
2279 offset += sizeof(*subh);
2280
2281 count = 0;
2282 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
2283 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
2284
2285 bcopy(&ur->ur_msg, m->m_data + offset,
2286 sizeof(ur->ur_msg));
2287 offset += sizeof(ur->ur_msg);
2288
2289 pool_put(&sc->sc_pool, ur);
2290
2291 count++;
2292 }
2293
2294 bzero(subh, sizeof(*subh));
2295 subh->action = PFSYNC_ACT_UPD_REQ;
2296 subh->count = htons(count);
2297 }
2298
2299 /* has someone built a custom region for us to add? */
2300 if (sc->sc_plus != NULL) {
2301 bcopy(sc->sc_plus, m->m_data + offset, sc->sc_pluslen);
2302 offset += sc->sc_pluslen;
2303
2304 sc->sc_plus = NULL;
2305 }
2306
2307#ifdef notyet
2308 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) {
2309 subh = (struct pfsync_subheader *)(m->m_data + offset);
2310 offset += sizeof(*subh);
2311
2312 count = 0;
2313 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry) {
2314 offset += pfsync_out_tdb(t, m, offset);
2315 CLR(t->tdb_flags, TDBF_PFSYNC);
2316
2317 count++;
2318 }
2319 TAILQ_INIT(&sc->sc_tdb_q);
2320
2321 bzero(subh, sizeof(*subh));
2322 subh->action = PFSYNC_ACT_TDB;
2323 subh->count = htons(count);
2324 }
2325#endif
2326
2327 subh = (struct pfsync_subheader *)(m->m_data + offset);
2328 offset += sizeof(*subh);
2329
2330 bzero(subh, sizeof(*subh));
2331 subh->action = PFSYNC_ACT_EOF;
2332 subh->count = htons(1);
2333
2334 /* XXX write checksum in EOF here */
2335
2336 /* we're done, let's put it on the wire */
2337#if NBPFILTER > 0
2338 if (ifp->if_bpf) {
2339 m->m_data += sizeof(*ip);
2340 m->m_len = m->m_pkthdr.len = sc->sc_len - sizeof(*ip);
2341#ifdef __FreeBSD__
2342 BPF_MTAP(ifp, m);
2343#else
2344 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2345#endif
2346 m->m_data -= sizeof(*ip);
2347 m->m_len = m->m_pkthdr.len = sc->sc_len;
2348 }
2349
2350 if (sc->sc_sync_if == NULL) {
2351 sc->sc_len = PFSYNC_MINPKT;
2352 m_freem(m);
2353 return;
2354 }
2355#endif
2356
2357#ifdef __FreeBSD__
2358 sc->sc_ifp->if_opackets++;
2359 sc->sc_ifp->if_obytes += m->m_pkthdr.len;
2360 sc->sc_len = PFSYNC_MINPKT;
2361
2362 IFQ_ENQUEUE(&sc->sc_ifp->if_snd, m, dummy_error);
2363 schednetisr(NETISR_PFSYNC);
2364#else
2365 sc->sc_if.if_opackets++;
2366 sc->sc_if.if_obytes += m->m_pkthdr.len;
2367
2368 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL) == 0)
2369 pfsyncstats.pfsyncs_opackets++;
2370 else
2371 pfsyncstats.pfsyncs_oerrors++;
2372
2373 /* start again */
2374 sc->sc_len = PFSYNC_MINPKT;
2375#endif
2376}
2377
2378void
2379pfsync_insert_state(struct pf_state *st)
2380{
2381#ifdef __FreeBSD__
2382 struct pfsync_softc *sc = V_pfsyncif;
2383#else
2384 struct pfsync_softc *sc = pfsyncif;
2385#endif
2386
2387#ifdef __FreeBSD__
2388 PF_LOCK_ASSERT();
2389#else
2390 splassert(IPL_SOFTNET);
2391#endif
2392
2393 if (ISSET(st->rule.ptr->rule_flag, PFRULE_NOSYNC) ||
2394 st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) {
2395 SET(st->state_flags, PFSTATE_NOSYNC);
2396 return;
2397 }
2398
2399 if (sc == NULL || ISSET(st->state_flags, PFSTATE_NOSYNC))
2400 return;
2401
2402#ifdef PFSYNC_DEBUG
2403#ifdef __FreeBSD__
2404 KASSERT(st->sync_state == PFSYNC_S_NONE,
2405 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__));
2406#else
2407 KASSERT(st->sync_state == PFSYNC_S_NONE);
2408#endif
2409#endif
2410
2411 if (sc->sc_len == PFSYNC_MINPKT)
2412#ifdef __FreeBSD__
2413 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2414 V_pfsyncif);
2415#else
2416 timeout_add_sec(&sc->sc_tmo, 1);
2417#endif
2418
2419 pfsync_q_ins(st, PFSYNC_S_INS);
2420
2421 if (ISSET(st->state_flags, PFSTATE_ACK))
2422#ifdef __FreeBSD__
2423 pfsync_sendout();
2424#else
2425 schednetisr(NETISR_PFSYNC);
2426#endif
2427 else
2428 st->sync_updates = 0;
2429}
2430
2431int defer = 10;
2432
2433int
2434pfsync_defer(struct pf_state *st, struct mbuf *m)
2435{
2436#ifdef __FreeBSD__
2437 struct pfsync_softc *sc = V_pfsyncif;
2438#else
2439 struct pfsync_softc *sc = pfsyncif;
2440#endif
2441 struct pfsync_deferral *pd;
2442
2443#ifdef __FreeBSD__
2444 PF_LOCK_ASSERT();
2445#else
2446 splassert(IPL_SOFTNET);
2447#endif
2448
2449 if (sc->sc_deferred >= 128)
2450 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
2451
2452 pd = pool_get(&sc->sc_pool, M_NOWAIT);
2453 if (pd == NULL)
2454 return (0);
2455 sc->sc_deferred++;
2456
2457#ifdef __FreeBSD__
2458 m->m_flags |= M_SKIP_FIREWALL;
2459#else
2460 m->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
2461#endif
2462 SET(st->state_flags, PFSTATE_ACK);
2463
2464 pd->pd_st = st;
2465 pd->pd_m = m;
2466
2467 TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry);
2468#ifdef __FreeBSD__
2469 callout_init(&pd->pd_tmo, CALLOUT_MPSAFE);
2470 callout_reset(&pd->pd_tmo, defer, pfsync_defer_tmo,
2471 pd);
2472#else
2473 timeout_set(&pd->pd_tmo, pfsync_defer_tmo, pd);
2474 timeout_add(&pd->pd_tmo, defer);
2475#endif
2476
2477 return (1);
2478}
2479
2480void
2481pfsync_undefer(struct pfsync_deferral *pd, int drop)
2482{
2483#ifdef __FreeBSD__
2484 struct pfsync_softc *sc = V_pfsyncif;
2485#else
2486 struct pfsync_softc *sc = pfsyncif;
2487#endif
2488 int s;
2489
2490#ifdef __FreeBSD__
2491 PF_LOCK_ASSERT();
2492#else
2493 splassert(IPL_SOFTNET);
2494#endif
2495
2496 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry);
2497 sc->sc_deferred--;
2498
2499 CLR(pd->pd_st->state_flags, PFSTATE_ACK);
2500 timeout_del(&pd->pd_tmo); /* bah */
2501 if (drop)
2502 m_freem(pd->pd_m);
2503 else {
2504 s = splnet();
2505#ifdef __FreeBSD__
2506 /* XXX: use pf_defered?! */
2507 PF_UNLOCK();
2508#endif
2509 ip_output(pd->pd_m, (void *)NULL, (void *)NULL, 0,
2510 (void *)NULL, (void *)NULL);
2511#ifdef __FreeBSD__
2512 PF_LOCK();
2513#endif
2514 splx(s);
2515 }
2516
2517 pool_put(&sc->sc_pool, pd);
2518}
2519
2520void
2521pfsync_defer_tmo(void *arg)
2522{
2523#if defined(__FreeBSD__) && defined(VIMAGE)
2524 struct pfsync_deferral *pd = arg;
2525#endif
2526 int s;
2527
2528 s = splsoftnet();
2529#ifdef __FreeBSD__
2530 CURVNET_SET(pd->pd_m->m_pkthdr.rcvif->if_vnet); /* XXX */
2531 PF_LOCK();
2532#endif
2533 pfsync_undefer(arg, 0);
2534#ifdef __FreeBSD__
2535 PF_UNLOCK();
2536 CURVNET_RESTORE();
2537#endif
2538 splx(s);
2539}
2540
2541void
2542pfsync_deferred(struct pf_state *st, int drop)
2543{
2544#ifdef __FreeBSD__
2545 struct pfsync_softc *sc = V_pfsyncif;
2546#else
2547 struct pfsync_softc *sc = pfsyncif;
2548#endif
2549 struct pfsync_deferral *pd;
2550
2551 TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) {
2552 if (pd->pd_st == st) {
2553 pfsync_undefer(pd, drop);
2554 return;
2555 }
2556 }
2557
2558 panic("pfsync_send_deferred: unable to find deferred state");
2559}
2560
2561u_int pfsync_upds = 0;
2562
2563void
2564pfsync_update_state(struct pf_state *st)
2565{
2566#ifdef __FreeBSD__
2567 struct pfsync_softc *sc = V_pfsyncif;
2568#else
2569 struct pfsync_softc *sc = pfsyncif;
2570#endif
2571 int sync = 0;
2572
2573#ifdef __FreeBSD__
2574 PF_LOCK_ASSERT();
2575#else
2576 splassert(IPL_SOFTNET);
2577#endif
2578
2579 if (sc == NULL)
2580 return;
2581
2582 if (ISSET(st->state_flags, PFSTATE_ACK))
2583 pfsync_deferred(st, 0);
2584 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2585 if (st->sync_state != PFSYNC_S_NONE)
2586 pfsync_q_del(st);
2587 return;
2588 }
2589
2590 if (sc->sc_len == PFSYNC_MINPKT)
2591#ifdef __FreeBSD__
2592 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2593 V_pfsyncif);
2594#else
2595 timeout_add_sec(&sc->sc_tmo, 1);
2596#endif
2597
2598 switch (st->sync_state) {
2599 case PFSYNC_S_UPD_C:
2600 case PFSYNC_S_UPD:
2601 case PFSYNC_S_INS:
2602 /* we're already handling it */
2603
2604 st->sync_updates++;
2605 if (st->sync_updates >= sc->sc_maxupdates)
2606 sync = 1;
2607 break;
2608
2609 case PFSYNC_S_IACK:
2610 pfsync_q_del(st);
2611 case PFSYNC_S_NONE:
2612 pfsync_q_ins(st, PFSYNC_S_UPD_C);
2613 st->sync_updates = 0;
2614 break;
2615
2616 default:
2617 panic("pfsync_update_state: unexpected sync state %d",
2618 st->sync_state);
2619 }
2620
2621 if (sync || (time_second - st->pfsync_time) < 2) {
2622 pfsync_upds++;
2623#ifdef __FreeBSD__
2624 pfsync_sendout();
2625#else
2626 schednetisr(NETISR_PFSYNC);
2627#endif
2628 }
2629}
2630
2631void
2632pfsync_request_update(u_int32_t creatorid, u_int64_t id)
2633{
2634#ifdef __FreeBSD__
2635 struct pfsync_softc *sc = V_pfsyncif;
2636#else
2637 struct pfsync_softc *sc = pfsyncif;
2638#endif
2639 struct pfsync_upd_req_item *item;
2640 size_t nlen = sizeof(struct pfsync_upd_req);
2641 int s;
2642
2643 PF_LOCK_ASSERT();
2644
2645 /*
2646 * this code does nothing to prevent multiple update requests for the
2647 * same state being generated.
2648 */
2649
2650 item = pool_get(&sc->sc_pool, PR_NOWAIT);
2651 if (item == NULL) {
2652 /* XXX stats */
2653 return;
2654 }
2655
2656 item->ur_msg.id = id;
2657 item->ur_msg.creatorid = creatorid;
2658
2659 if (TAILQ_EMPTY(&sc->sc_upd_req_list))
2660 nlen += sizeof(struct pfsync_subheader);
2661
2662#ifdef __FreeBSD__
2663 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
2664#else
2665 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2666#endif
2667 s = splnet();
2668 pfsync_sendout();
2669 splx(s);
2670
2671 nlen = sizeof(struct pfsync_subheader) +
2672 sizeof(struct pfsync_upd_req);
2673 }
2674
2675 TAILQ_INSERT_TAIL(&sc->sc_upd_req_list, item, ur_entry);
2676 sc->sc_len += nlen;
2677
2678#ifdef __FreeBSD__
2679 pfsync_sendout();
2680#else
2681 schednetisr(NETISR_PFSYNC);
2682#endif
2683}
2684
2685void
2686pfsync_update_state_req(struct pf_state *st)
2687{
2688#ifdef __FreeBSD__
2689 struct pfsync_softc *sc = V_pfsyncif;
2690#else
2691 struct pfsync_softc *sc = pfsyncif;
2692#endif
2693
2694 PF_LOCK_ASSERT();
2695
2696 if (sc == NULL)
2697 panic("pfsync_update_state_req: nonexistant instance");
2698
2699 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2700 if (st->sync_state != PFSYNC_S_NONE)
2701 pfsync_q_del(st);
2702 return;
2703 }
2704
2705 switch (st->sync_state) {
2706 case PFSYNC_S_UPD_C:
2707 case PFSYNC_S_IACK:
2708 pfsync_q_del(st);
2709 case PFSYNC_S_NONE:
2710 pfsync_q_ins(st, PFSYNC_S_UPD);
2711#ifdef __FreeBSD__
2712 pfsync_sendout();
2713#else
2714 schednetisr(NETISR_PFSYNC);
2715#endif
2716 return;
2717
2718 case PFSYNC_S_INS:
2719 case PFSYNC_S_UPD:
2720 case PFSYNC_S_DEL:
2721 /* we're already handling it */
2722 return;
2723
2724 default:
2725 panic("pfsync_update_state_req: unexpected sync state %d",
2726 st->sync_state);
2727 }
2728}
2729
2730void
2731pfsync_delete_state(struct pf_state *st)
2732{
2733#ifdef __FreeBSD__
2734 struct pfsync_softc *sc = V_pfsyncif;
2735#else
2736 struct pfsync_softc *sc = pfsyncif;
2737#endif
2738
2739#ifdef __FreeBSD__
2740 PF_LOCK_ASSERT();
2741#else
2742 splassert(IPL_SOFTNET);
2743#endif
2744
2745 if (sc == NULL)
2746 return;
2747
2748 if (ISSET(st->state_flags, PFSTATE_ACK))
2749 pfsync_deferred(st, 1);
2750 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2751 if (st->sync_state != PFSYNC_S_NONE)
2752 pfsync_q_del(st);
2753 return;
2754 }
2755
2756 if (sc->sc_len == PFSYNC_MINPKT)
2757#ifdef __FreeBSD__
2758 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2759 V_pfsyncif);
2760#else
2761 timeout_add_sec(&sc->sc_tmo, 1);
2762#endif
2763
2764 switch (st->sync_state) {
2765 case PFSYNC_S_INS:
2766 /* we never got to tell the world so just forget about it */
2767 pfsync_q_del(st);
2768 return;
2769
2770 case PFSYNC_S_UPD_C:
2771 case PFSYNC_S_UPD:
2772 case PFSYNC_S_IACK:
2773 pfsync_q_del(st);
2774 /* FALLTHROUGH to putting it on the del list */
2775
2776 case PFSYNC_S_NONE:
2777 pfsync_q_ins(st, PFSYNC_S_DEL);
2778 return;
2779
2780 default:
2781 panic("pfsync_delete_state: unexpected sync state %d",
2782 st->sync_state);
2783 }
2784}
2785
2786void
2787pfsync_clear_states(u_int32_t creatorid, const char *ifname)
2788{
2789 struct {
2790 struct pfsync_subheader subh;
2791 struct pfsync_clr clr;
2792 } __packed r;
2793
2794#ifdef __FreeBSD__
2795 struct pfsync_softc *sc = V_pfsyncif;
2796#else
2797 struct pfsync_softc *sc = pfsyncif;
2798#endif
2799
2800#ifdef __FreeBSD__
2801 PF_LOCK_ASSERT();
2802#else
2803 splassert(IPL_SOFTNET);
2804#endif
2805
2806 if (sc == NULL)
2807 return;
2808
2809 bzero(&r, sizeof(r));
2810
2811 r.subh.action = PFSYNC_ACT_CLR;
2812 r.subh.count = htons(1);
2813
2814 strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname));
2815 r.clr.creatorid = creatorid;
2816
2817 pfsync_send_plus(&r, sizeof(r));
2818}
2819
2820void
2821pfsync_q_ins(struct pf_state *st, int q)
2822{
2823#ifdef __FreeBSD__
2824 struct pfsync_softc *sc = V_pfsyncif;
2825#else
2826 struct pfsync_softc *sc = pfsyncif;
2827#endif
2828 size_t nlen = pfsync_qs[q].len;
2829 int s;
2830
2831 PF_LOCK_ASSERT();
2832
2833#ifdef __FreeBSD__
2834 KASSERT(st->sync_state == PFSYNC_S_NONE,
2835 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__));
2836#else
2837 KASSERT(st->sync_state == PFSYNC_S_NONE);
2838#endif
2839
2840#if 1 || defined(PFSYNC_DEBUG)
2841 if (sc->sc_len < PFSYNC_MINPKT)
2842#ifdef __FreeBSD__
2843 panic("pfsync pkt len is too low %zu", sc->sc_len);
2844#else
2845 panic("pfsync pkt len is too low %d", sc->sc_len);
2846#endif
2847#endif
2848 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2849 nlen += sizeof(struct pfsync_subheader);
2850
2851#ifdef __FreeBSD__
2852 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
2853#else
2854 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2855#endif
2856 s = splnet();
2857 pfsync_sendout();
2858 splx(s);
2859
2860 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len;
2861 }
2862
2863 sc->sc_len += nlen;
2864 TAILQ_INSERT_TAIL(&sc->sc_qs[q], st, sync_list);
2865 st->sync_state = q;
2866}
2867
2868void
2869pfsync_q_del(struct pf_state *st)
2870{
2871#ifdef __FreeBSD__
2872 struct pfsync_softc *sc = V_pfsyncif;
2873#else
2874 struct pfsync_softc *sc = pfsyncif;
2875#endif
2876 int q = st->sync_state;
2877
2878#ifdef __FreeBSD__
2879 KASSERT(st->sync_state != PFSYNC_S_NONE,
2880 ("%s: st->sync_state != PFSYNC_S_NONE", __FUNCTION__));
2881#else
2882 KASSERT(st->sync_state != PFSYNC_S_NONE);
2883#endif
2884
2885 sc->sc_len -= pfsync_qs[q].len;
2886 TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list);
2887 st->sync_state = PFSYNC_S_NONE;
2888
2889 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2890 sc->sc_len -= sizeof(struct pfsync_subheader);
2891}
2892
2893#ifdef notyet
2894void
2895pfsync_update_tdb(struct tdb *t, int output)
2896{
2897#ifdef __FreeBSD__
2898 struct pfsync_softc *sc = V_pfsyncif;
2899#else
2900 struct pfsync_softc *sc = pfsyncif;
2901#endif
2902 size_t nlen = sizeof(struct pfsync_tdb);
2903 int s;
2904
2905 if (sc == NULL)
2906 return;
2907
2908 if (!ISSET(t->tdb_flags, TDBF_PFSYNC)) {
2909 if (TAILQ_EMPTY(&sc->sc_tdb_q))
2910 nlen += sizeof(struct pfsync_subheader);
2911
2912 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2913 s = splnet();
2914 PF_LOCK();
2915 pfsync_sendout();
2916 PF_UNLOCK();
2917 splx(s);
2918
2919 nlen = sizeof(struct pfsync_subheader) +
2920 sizeof(struct pfsync_tdb);
2921 }
2922
2923 sc->sc_len += nlen;
2924 TAILQ_INSERT_TAIL(&sc->sc_tdb_q, t, tdb_sync_entry);
2925 SET(t->tdb_flags, TDBF_PFSYNC);
2926 t->tdb_updates = 0;
2927 } else {
2928 if (++t->tdb_updates >= sc->sc_maxupdates)
2929 schednetisr(NETISR_PFSYNC);
2930 }
2931
2932 if (output)
2933 SET(t->tdb_flags, TDBF_PFSYNC_RPL);
2934 else
2935 CLR(t->tdb_flags, TDBF_PFSYNC_RPL);
2936}
2937
2938void
2939pfsync_delete_tdb(struct tdb *t)
2940{
2941#ifdef __FreeBSD__
2942 struct pfsync_softc *sc = V_pfsyncif;
2943#else
2944 struct pfsync_softc *sc = pfsyncif;
2945#endif
2946
2947 if (sc == NULL || !ISSET(t->tdb_flags, TDBF_PFSYNC))
2948 return;
2949
2950 sc->sc_len -= sizeof(struct pfsync_tdb);
2951 TAILQ_REMOVE(&sc->sc_tdb_q, t, tdb_sync_entry);
2952 CLR(t->tdb_flags, TDBF_PFSYNC);
2953
2954 if (TAILQ_EMPTY(&sc->sc_tdb_q))
2955 sc->sc_len -= sizeof(struct pfsync_subheader);
2956}
2957
2958int
2959pfsync_out_tdb(struct tdb *t, struct mbuf *m, int offset)
2960{
2961 struct pfsync_tdb *ut = (struct pfsync_tdb *)(m->m_data + offset);
2962
2963 bzero(ut, sizeof(*ut));
2964 ut->spi = t->tdb_spi;
2965 bcopy(&t->tdb_dst, &ut->dst, sizeof(ut->dst));
2966 /*
2967 * When a failover happens, the master's rpl is probably above
2968 * what we see here (we may be up to a second late), so
2969 * increase it a bit for outbound tdbs to manage most such
2970 * situations.
2971 *
2972 * For now, just add an offset that is likely to be larger
2973 * than the number of packets we can see in one second. The RFC
2974 * just says the next packet must have a higher seq value.
2975 *
2976 * XXX What is a good algorithm for this? We could use
2977 * a rate-determined increase, but to know it, we would have
2978 * to extend struct tdb.
2979 * XXX pt->rpl can wrap over MAXINT, but if so the real tdb
2980 * will soon be replaced anyway. For now, just don't handle
2981 * this edge case.
2982 */
2983#define RPL_INCR 16384
2984 ut->rpl = htonl(t->tdb_rpl + (ISSET(t->tdb_flags, TDBF_PFSYNC_RPL) ?
2985 RPL_INCR : 0));
2986 ut->cur_bytes = htobe64(t->tdb_cur_bytes);
2987 ut->sproto = t->tdb_sproto;
2988
2989 return (sizeof(*ut));
2990}
2991#endif
2992
2993void
2994pfsync_bulk_start(void)
2995{
2996#ifdef __FreeBSD__
2997 struct pfsync_softc *sc = V_pfsyncif;
2998#else
2999 struct pfsync_softc *sc = pfsyncif;
3000#endif
3001
3002#ifdef __FreeBSD__
3003 if (V_pf_status.debug >= PF_DEBUG_MISC)
3004#else
3005 if (pf_status.debug >= PF_DEBUG_MISC)
3006#endif
3007 printf("pfsync: received bulk update request\n");
3008
3009#ifdef __FreeBSD__
3010 PF_LOCK();
3011 if (TAILQ_EMPTY(&V_state_list))
3012#else
3013 if (TAILQ_EMPTY(&state_list))
3014#endif
3015 pfsync_bulk_status(PFSYNC_BUS_END);
3016 else {
3017 sc->sc_ureq_received = time_uptime;
3018 if (sc->sc_bulk_next == NULL)
3019#ifdef __FreeBSD__
3020 sc->sc_bulk_next = TAILQ_FIRST(&V_state_list);
3021#else
3022 sc->sc_bulk_next = TAILQ_FIRST(&state_list);
3023#endif
3024 sc->sc_bulk_last = sc->sc_bulk_next;
3025
3026 pfsync_bulk_status(PFSYNC_BUS_START);
3027 callout_reset(&sc->sc_bulk_tmo, 1,
3028 pfsync_bulk_update, sc);
3029 }
3030#ifdef __FreeBSD__
3031 PF_UNLOCK();
3032#endif
3033}
3034
3035void
3036pfsync_bulk_update(void *arg)
3037{
3038 struct pfsync_softc *sc = arg;
3039 struct pf_state *st = sc->sc_bulk_next;
3040 int i = 0;
3041 int s;
3042
3043 PF_LOCK_ASSERT();
3044
3045 s = splsoftnet();
3046#ifdef __FreeBSD__
3047 CURVNET_SET(sc->sc_ifp->if_vnet);
3048#endif
3049 for (;;) {
3050 if (st->sync_state == PFSYNC_S_NONE &&
3051 st->timeout < PFTM_MAX &&
3052 st->pfsync_time <= sc->sc_ureq_received) {
3053 pfsync_update_state_req(st);
3054 i++;
3055 }
3056
3057 st = TAILQ_NEXT(st, entry_list);
3058 if (st == NULL)
3059#ifdef __FreeBSD__
3060 st = TAILQ_FIRST(&V_state_list);
3061#else
3062 st = TAILQ_FIRST(&state_list);
3063#endif
3064
3065 if (st == sc->sc_bulk_last) {
3066 /* we're done */
3067 sc->sc_bulk_next = NULL;
3068 sc->sc_bulk_last = NULL;
3069 pfsync_bulk_status(PFSYNC_BUS_END);
3070 break;
3071 }
3072
3073#ifdef __FreeBSD__
3074 if (i > 1 && (sc->sc_ifp->if_mtu - sc->sc_len) <
3075#else
3076 if (i > 1 && (sc->sc_if.if_mtu - sc->sc_len) <
3077#endif
3078 sizeof(struct pfsync_state)) {
3079 /* we've filled a packet */
3080 sc->sc_bulk_next = st;
3081#ifdef __FreeBSD__
3082 callout_reset(&sc->sc_bulk_tmo, 1,
3083 pfsync_bulk_update, sc);
3084#else
3085 timeout_add(&sc->sc_bulk_tmo, 1);
3086#endif
3087 break;
3088 }
3089 }
3090
3091#ifdef __FreeBSD__
3092 CURVNET_RESTORE();
3093#endif
3094 splx(s);
3095}
3096
3097void
3098pfsync_bulk_status(u_int8_t status)
3099{
3100 struct {
3101 struct pfsync_subheader subh;
3102 struct pfsync_bus bus;
3103 } __packed r;
3104
3105#ifdef __FreeBSD__
3106 struct pfsync_softc *sc = V_pfsyncif;
3107#else
3108 struct pfsync_softc *sc = pfsyncif;
3109#endif
3110
3111 PF_LOCK_ASSERT();
3112
3113 bzero(&r, sizeof(r));
3114
3115 r.subh.action = PFSYNC_ACT_BUS;
3116 r.subh.count = htons(1);
3117
3118#ifdef __FreeBSD__
3119 r.bus.creatorid = V_pf_status.hostid;
3120#else
3121 r.bus.creatorid = pf_status.hostid;
3122#endif
3123 r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received);
3124 r.bus.status = status;
3125
3126 pfsync_send_plus(&r, sizeof(r));
3127}
3128
3129void
3130pfsync_bulk_fail(void *arg)
3131{
3132 struct pfsync_softc *sc = arg;
3133
3134#ifdef __FreeBSD__
3135 CURVNET_SET(sc->sc_ifp->if_vnet);
3136#endif
3137
3138 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
3139 /* Try again */
3140#ifdef __FreeBSD__
3141 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
3142 pfsync_bulk_fail, V_pfsyncif);
3143#else
3144 timeout_add_sec(&sc->sc_bulkfail_tmo, 5);
3145#endif
3146 PF_LOCK();
3147 pfsync_request_update(0, 0);
3148 PF_UNLOCK();
3149 } else {
3150 /* Pretend like the transfer was ok */
3151 sc->sc_ureq_sent = 0;
3152 sc->sc_bulk_tries = 0;
3153#ifdef __FreeBSD__
3154 if (!sc->pfsync_sync_ok && carp_demote_adj_p)
3155 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
3156 "pfsync bulk fail");
3157 sc->pfsync_sync_ok = 1;
3158#else
3159#if NCARP > 0
3160 if (!pfsync_sync_ok)
3161 carp_group_demote_adj(&sc->sc_if, -1);
3162#endif
3163 pfsync_sync_ok = 1;
3164#endif
3165#ifdef __FreeBSD__
3166 if (V_pf_status.debug >= PF_DEBUG_MISC)
3167#else
3168 if (pf_status.debug >= PF_DEBUG_MISC)
3169#endif
3170 printf("pfsync: failed to receive bulk update\n");
3171 }
3172
3173#ifdef __FreeBSD__
3174 CURVNET_RESTORE();
3175#endif
3176}
3177
3178void
3179pfsync_send_plus(void *plus, size_t pluslen)
3180{
3181#ifdef __FreeBSD__
3182 struct pfsync_softc *sc = V_pfsyncif;
3183#else
3184 struct pfsync_softc *sc = pfsyncif;
3185#endif
3186 int s;
3187
3188 PF_LOCK_ASSERT();
3189
3190#ifdef __FreeBSD__
3191 if (sc->sc_len + pluslen > sc->sc_ifp->if_mtu) {
3192#else
3193 if (sc->sc_len + pluslen > sc->sc_if.if_mtu) {
3194#endif
3195 s = splnet();
3196 pfsync_sendout();
3197 splx(s);
3198 }
3199
3200 sc->sc_plus = plus;
3201 sc->sc_len += (sc->sc_pluslen = pluslen);
3202
3203 s = splnet();
3204 pfsync_sendout();
3205 splx(s);
3206}
3207
3208int
3209pfsync_up(void)
3210{
3211#ifdef __FreeBSD__
3212 struct pfsync_softc *sc = V_pfsyncif;
3213#else
3214 struct pfsync_softc *sc = pfsyncif;
3215#endif
3216
3217#ifdef __FreeBSD__
3218 if (sc == NULL || !ISSET(sc->sc_ifp->if_flags, IFF_DRV_RUNNING))
3219#else
3220 if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING))
3221#endif
3222 return (0);
3223
3224 return (1);
3225}
3226
3227int
3228pfsync_state_in_use(struct pf_state *st)
3229{
3230#ifdef __FreeBSD__
3231 struct pfsync_softc *sc = V_pfsyncif;
3232#else
3233 struct pfsync_softc *sc = pfsyncif;
3234#endif
3235
3236 if (sc == NULL)
3237 return (0);
3238
3239 if (st->sync_state != PFSYNC_S_NONE ||
3240 st == sc->sc_bulk_next ||
3241 st == sc->sc_bulk_last)
3242 return (1);
3243
3244 return (0);
3245}
3246
3247u_int pfsync_ints;
3248u_int pfsync_tmos;
3249
3250void
3251pfsync_timeout(void *arg)
3252{
3253#if defined(__FreeBSD__) && defined(VIMAGE)
3254 struct pfsync_softc *sc = arg;
3255#endif
3256 int s;
3257
3258#ifdef __FreeBSD__
3259 CURVNET_SET(sc->sc_ifp->if_vnet);
3260#endif
3261
3262 pfsync_tmos++;
3263
3264 s = splnet();
3265#ifdef __FreeBSD__
3266 PF_LOCK();
3267#endif
3268 pfsync_sendout();
3269#ifdef __FreeBSD__
3270 PF_UNLOCK();
3271#endif
3272 splx(s);
3273
3274#ifdef __FreeBSD__
3275 CURVNET_RESTORE();
3276#endif
3277}
3278
3279/* this is a softnet/netisr handler */
3280void
3281#ifdef __FreeBSD__
3282pfsyncintr(void *arg)
3283{
3284 struct pfsync_softc *sc = arg;
3285 struct mbuf *m, *n;
3286
3287 CURVNET_SET(sc->sc_ifp->if_vnet);
3288 pfsync_ints++;
3289
3290 IF_DEQUEUE_ALL(&sc->sc_ifp->if_snd, m);
3291
3292 for (; m != NULL; m = n) {
3293
3294 n = m->m_nextpkt;
3295 m->m_nextpkt = NULL;
3296 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL)
3297 == 0)
3298 V_pfsyncstats.pfsyncs_opackets++;
3299 else
3300 V_pfsyncstats.pfsyncs_oerrors++;
3301 }
3302 CURVNET_RESTORE();
3303}
3304#else
3305pfsyncintr(void)
3306{
3307 int s;
3308
3309 pfsync_ints++;
3310
3311 s = splnet();
3312 pfsync_sendout();
3313 splx(s);
3314}
3315#endif
3316
3317int
3318pfsync_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
3319 size_t newlen)
3320{
3321
3322#ifdef notyet
3323 /* All sysctl names at this level are terminal. */
3324 if (namelen != 1)
3325 return (ENOTDIR);
3326
3327 switch (name[0]) {
3328 case PFSYNCCTL_STATS:
3329 if (newp != NULL)
3330 return (EPERM);
3331 return (sysctl_struct(oldp, oldlenp, newp, newlen,
3332 &V_pfsyncstats, sizeof(V_pfsyncstats)));
3333 }
3334#endif
3335 return (ENOPROTOOPT);
3336}
3337
3338#ifdef __FreeBSD__
3339void
3340pfsync_ifdetach(void *arg, struct ifnet *ifp)
3341{
3342 struct pfsync_softc *sc = (struct pfsync_softc *)arg;
3343 struct ip_moptions *imo;
3344
3345 if (sc == NULL || sc->sc_sync_if != ifp)
3346 return; /* not for us; unlocked read */
3347
3348 CURVNET_SET(sc->sc_ifp->if_vnet);
3349
3350 PF_LOCK();
3351
3352 /* Deal with a member interface going away from under us. */
3353 sc->sc_sync_if = NULL;
3354 imo = &sc->sc_imo;
3355 if (imo->imo_num_memberships > 0) {
3356 KASSERT(imo->imo_num_memberships == 1,
3357 ("%s: imo_num_memberships != 1", __func__));
3358 /*
3359 * Our event handler is always called after protocol
3360 * domains have been detached from the underlying ifnet.
3361 * Do not call in_delmulti(); we held a single reference
3362 * which the protocol domain has purged in in_purgemaddrs().
3363 */
3364 PF_UNLOCK();
3365 imo->imo_membership[--imo->imo_num_memberships] = NULL;
3366 PF_LOCK();
3367 imo->imo_multicast_ifp = NULL;
3368 }
3369
3370 PF_UNLOCK();
3371
3372 CURVNET_RESTORE();
3373}
3374
3375static int
3376vnet_pfsync_init(const void *unused)
3377{
3378 int error = 0;
3379
3380 pfsyncattach(0);
3381
3382 error = swi_add(NULL, "pfsync", pfsyncintr, V_pfsyncif,
3383 SWI_NET, INTR_MPSAFE, &pfsync_swi.pfsync_swi_cookie);
3384 if (error)
3385 panic("%s: swi_add %d", __func__, error);
3386
3387 PF_LOCK();
3388 pfsync_state_import_ptr = pfsync_state_import;
3389 pfsync_up_ptr = pfsync_up;
3390 pfsync_insert_state_ptr = pfsync_insert_state;
3391 pfsync_update_state_ptr = pfsync_update_state;
3392 pfsync_delete_state_ptr = pfsync_delete_state;
3393 pfsync_clear_states_ptr = pfsync_clear_states;
3394 pfsync_state_in_use_ptr = pfsync_state_in_use;
3395 pfsync_defer_ptr = pfsync_defer;
3396 PF_UNLOCK();
3397
3398 return (0);
3399}
3400
3401static int
3402vnet_pfsync_uninit(const void *unused)
3403{
3404
3405 swi_remove(pfsync_swi.pfsync_swi_cookie);
3406
3407 PF_LOCK();
3408 pfsync_state_import_ptr = NULL;
3409 pfsync_up_ptr = NULL;
3410 pfsync_insert_state_ptr = NULL;
3411 pfsync_update_state_ptr = NULL;
3412 pfsync_delete_state_ptr = NULL;
3413 pfsync_clear_states_ptr = NULL;
3414 pfsync_state_in_use_ptr = NULL;
3415 pfsync_defer_ptr = NULL;
3416 PF_UNLOCK();
3417
3418 if_clone_detach(&pfsync_cloner);
3419
3420 return (0);
3421}
3422
3423/* Define startup order. */
3424#define PFSYNC_SYSINIT_ORDER SI_SUB_PROTO_IF
3425#define PFSYNC_MODEVENT_ORDER (SI_ORDER_FIRST) /* On boot slot in here. */
3426#define PFSYNC_VNET_ORDER (PFSYNC_MODEVENT_ORDER + 2) /* Later still. */
3427
3428/*
3429 * Starting up.
3430 * VNET_SYSINIT is called for each existing vnet and each new vnet.
3431 */
3432VNET_SYSINIT(vnet_pfsync_init, PFSYNC_SYSINIT_ORDER, PFSYNC_VNET_ORDER,
3433 vnet_pfsync_init, NULL);
3434
3435/*
3436 * Closing up shop. These are done in REVERSE ORDER,
3437 * Not called on reboot.
3438 * VNET_SYSUNINIT is called for each exiting vnet as it exits.
3439 */
3440VNET_SYSUNINIT(vnet_pfsync_uninit, PFSYNC_SYSINIT_ORDER, PFSYNC_VNET_ORDER,
3441 vnet_pfsync_uninit, NULL);
3442static int
3443pfsync_modevent(module_t mod, int type, void *data)
3444{
3445 int error = 0;
3446
3447 switch (type) {
3448 case MOD_LOAD:
3449#ifndef __FreeBSD__
3450 pfsyncattach(0);
3451#endif
3452 break;
3453 case MOD_UNLOAD:
3454#ifndef __FreeBSD__
3455 if_clone_detach(&pfsync_cloner);
3456#endif
3457 break;
3458 default:
3459 error = EINVAL;
3460 break;
3461 }
3462
3463 return error;
3464}
3465
3466static moduledata_t pfsync_mod = {
3467 "pfsync",
3468 pfsync_modevent,
3469 0
3470};
3471
3472#define PFSYNC_MODVER 1
3473
3474DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
3475MODULE_VERSION(pfsync, PFSYNC_MODVER);
3476MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER);
3477#endif /* __FreeBSD__ */
54
55#ifdef DEV_PFSYNC
56#define NPFSYNC DEV_PFSYNC
57#else
58#define NPFSYNC 0
59#endif
60#endif /* __FreeBSD__ */
61
62#include <sys/param.h>
63#include <sys/kernel.h>
64#ifdef __FreeBSD__
65#include <sys/bus.h>
66#include <sys/interrupt.h>
67#include <sys/priv.h>
68#endif
69#include <sys/proc.h>
70#include <sys/systm.h>
71#include <sys/time.h>
72#include <sys/mbuf.h>
73#include <sys/socket.h>
74#ifdef __FreeBSD__
75#include <sys/endian.h>
76#include <sys/malloc.h>
77#include <sys/module.h>
78#include <sys/sockio.h>
79#include <sys/taskqueue.h>
80#include <sys/lock.h>
81#include <sys/mutex.h>
82#else
83#include <sys/ioctl.h>
84#include <sys/timeout.h>
85#endif
86#include <sys/sysctl.h>
87#ifndef __FreeBSD__
88#include <sys/pool.h>
89#endif
90
91#include <net/if.h>
92#ifdef __FreeBSD__
93#include <net/if_clone.h>
94#endif
95#include <net/if_types.h>
96#include <net/route.h>
97#include <net/bpf.h>
98#include <net/netisr.h>
99#ifdef __FreeBSD__
100#include <net/vnet.h>
101#endif
102
103#include <netinet/in.h>
104#include <netinet/if_ether.h>
105#include <netinet/tcp.h>
106#include <netinet/tcp_seq.h>
107
108#ifdef INET
109#include <netinet/in_systm.h>
110#include <netinet/in_var.h>
111#include <netinet/ip.h>
112#include <netinet/ip_var.h>
113#endif
114
115#ifdef INET6
116#include <netinet6/nd6.h>
117#endif /* INET6 */
118
119#ifdef __FreeBSD__
120#include <netinet/ip_carp.h>
121#else
122#include "carp.h"
123#if NCARP > 0
124#include <netinet/ip_carp.h>
125#endif
126#endif
127
128#include <net/pfvar.h>
129#include <net/if_pfsync.h>
130
131#ifndef __FreeBSD__
132#include "bpfilter.h"
133#include "pfsync.h"
134#endif
135
136#define PFSYNC_MINPKT ( \
137 sizeof(struct ip) + \
138 sizeof(struct pfsync_header) + \
139 sizeof(struct pfsync_subheader) + \
140 sizeof(struct pfsync_eof))
141
142struct pfsync_pkt {
143 struct ip *ip;
144 struct in_addr src;
145 u_int8_t flags;
146};
147
148int pfsync_input_hmac(struct mbuf *, int);
149
150int pfsync_upd_tcp(struct pf_state *, struct pfsync_state_peer *,
151 struct pfsync_state_peer *);
152
153int pfsync_in_clr(struct pfsync_pkt *, struct mbuf *, int, int);
154int pfsync_in_ins(struct pfsync_pkt *, struct mbuf *, int, int);
155int pfsync_in_iack(struct pfsync_pkt *, struct mbuf *, int, int);
156int pfsync_in_upd(struct pfsync_pkt *, struct mbuf *, int, int);
157int pfsync_in_upd_c(struct pfsync_pkt *, struct mbuf *, int, int);
158int pfsync_in_ureq(struct pfsync_pkt *, struct mbuf *, int, int);
159int pfsync_in_del(struct pfsync_pkt *, struct mbuf *, int, int);
160int pfsync_in_del_c(struct pfsync_pkt *, struct mbuf *, int, int);
161int pfsync_in_bus(struct pfsync_pkt *, struct mbuf *, int, int);
162int pfsync_in_tdb(struct pfsync_pkt *, struct mbuf *, int, int);
163int pfsync_in_eof(struct pfsync_pkt *, struct mbuf *, int, int);
164
165int pfsync_in_error(struct pfsync_pkt *, struct mbuf *, int, int);
166
167int (*pfsync_acts[])(struct pfsync_pkt *, struct mbuf *, int, int) = {
168 pfsync_in_clr, /* PFSYNC_ACT_CLR */
169 pfsync_in_ins, /* PFSYNC_ACT_INS */
170 pfsync_in_iack, /* PFSYNC_ACT_INS_ACK */
171 pfsync_in_upd, /* PFSYNC_ACT_UPD */
172 pfsync_in_upd_c, /* PFSYNC_ACT_UPD_C */
173 pfsync_in_ureq, /* PFSYNC_ACT_UPD_REQ */
174 pfsync_in_del, /* PFSYNC_ACT_DEL */
175 pfsync_in_del_c, /* PFSYNC_ACT_DEL_C */
176 pfsync_in_error, /* PFSYNC_ACT_INS_F */
177 pfsync_in_error, /* PFSYNC_ACT_DEL_F */
178 pfsync_in_bus, /* PFSYNC_ACT_BUS */
179 pfsync_in_tdb, /* PFSYNC_ACT_TDB */
180 pfsync_in_eof /* PFSYNC_ACT_EOF */
181};
182
183struct pfsync_q {
184 int (*write)(struct pf_state *, struct mbuf *, int);
185 size_t len;
186 u_int8_t action;
187};
188
189/* we have one of these for every PFSYNC_S_ */
190int pfsync_out_state(struct pf_state *, struct mbuf *, int);
191int pfsync_out_iack(struct pf_state *, struct mbuf *, int);
192int pfsync_out_upd_c(struct pf_state *, struct mbuf *, int);
193int pfsync_out_del(struct pf_state *, struct mbuf *, int);
194
195struct pfsync_q pfsync_qs[] = {
196 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_INS },
197 { pfsync_out_iack, sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK },
198 { pfsync_out_state, sizeof(struct pfsync_state), PFSYNC_ACT_UPD },
199 { pfsync_out_upd_c, sizeof(struct pfsync_upd_c), PFSYNC_ACT_UPD_C },
200 { pfsync_out_del, sizeof(struct pfsync_del_c), PFSYNC_ACT_DEL_C }
201};
202
203void pfsync_q_ins(struct pf_state *, int);
204void pfsync_q_del(struct pf_state *);
205
206struct pfsync_upd_req_item {
207 TAILQ_ENTRY(pfsync_upd_req_item) ur_entry;
208 struct pfsync_upd_req ur_msg;
209};
210TAILQ_HEAD(pfsync_upd_reqs, pfsync_upd_req_item);
211
212struct pfsync_deferral {
213 TAILQ_ENTRY(pfsync_deferral) pd_entry;
214 struct pf_state *pd_st;
215 struct mbuf *pd_m;
216#ifdef __FreeBSD__
217 struct callout pd_tmo;
218#else
219 struct timeout pd_tmo;
220#endif
221};
222TAILQ_HEAD(pfsync_deferrals, pfsync_deferral);
223
224#define PFSYNC_PLSIZE MAX(sizeof(struct pfsync_upd_req_item), \
225 sizeof(struct pfsync_deferral))
226
227#ifdef notyet
228int pfsync_out_tdb(struct tdb *, struct mbuf *, int);
229#endif
230
231struct pfsync_softc {
232#ifdef __FreeBSD__
233 struct ifnet *sc_ifp;
234#else
235 struct ifnet sc_if;
236#endif
237 struct ifnet *sc_sync_if;
238
239#ifdef __FreeBSD__
240 uma_zone_t sc_pool;
241#else
242 struct pool sc_pool;
243#endif
244
245 struct ip_moptions sc_imo;
246
247 struct in_addr sc_sync_peer;
248 u_int8_t sc_maxupdates;
249#ifdef __FreeBSD__
250 int pfsync_sync_ok;
251#endif
252
253 struct ip sc_template;
254
255 struct pf_state_queue sc_qs[PFSYNC_S_COUNT];
256 size_t sc_len;
257
258 struct pfsync_upd_reqs sc_upd_req_list;
259
260 struct pfsync_deferrals sc_deferrals;
261 u_int sc_deferred;
262
263 void *sc_plus;
264 size_t sc_pluslen;
265
266 u_int32_t sc_ureq_sent;
267 int sc_bulk_tries;
268#ifdef __FreeBSD__
269 struct callout sc_bulkfail_tmo;
270#else
271 struct timeout sc_bulkfail_tmo;
272#endif
273
274 u_int32_t sc_ureq_received;
275 struct pf_state *sc_bulk_next;
276 struct pf_state *sc_bulk_last;
277#ifdef __FreeBSD__
278 struct callout sc_bulk_tmo;
279#else
280 struct timeout sc_bulk_tmo;
281#endif
282
283 TAILQ_HEAD(, tdb) sc_tdb_q;
284
285#ifdef __FreeBSD__
286 struct callout sc_tmo;
287#else
288 struct timeout sc_tmo;
289#endif
290#ifdef __FreeBSD__
291 eventhandler_tag sc_detachtag;
292#endif
293
294};
295
296#ifdef __FreeBSD__
297static VNET_DEFINE(struct pfsync_softc *, pfsyncif) = NULL;
298#define V_pfsyncif VNET(pfsyncif)
299
300static VNET_DEFINE(struct pfsyncstats, pfsyncstats);
301#define V_pfsyncstats VNET(pfsyncstats)
302static VNET_DEFINE(int, pfsync_carp_adj) = CARP_MAXSKEW;
303#define V_pfsync_carp_adj VNET(pfsync_carp_adj)
304
305SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC");
306SYSCTL_VNET_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_RW,
307 &VNET_NAME(pfsyncstats), pfsyncstats,
308 "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)");
309SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_RW,
310 &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment");
311#else
312struct pfsync_softc *pfsyncif = NULL;
313struct pfsyncstats pfsyncstats;
314#define V_pfsyncstats pfsyncstats
315#endif
316
317#ifdef __FreeBSD__
318static void pfsyncintr(void *);
319struct pfsync_swi {
320 void * pfsync_swi_cookie;
321};
322static struct pfsync_swi pfsync_swi;
323#define schednetisr(p) swi_sched(pfsync_swi.pfsync_swi_cookie, 0)
324#define NETISR_PFSYNC
325#endif
326
327void pfsyncattach(int);
328#ifdef __FreeBSD__
329int pfsync_clone_create(struct if_clone *, int, caddr_t);
330void pfsync_clone_destroy(struct ifnet *);
331#else
332int pfsync_clone_create(struct if_clone *, int);
333int pfsync_clone_destroy(struct ifnet *);
334#endif
335int pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
336 struct pf_state_peer *);
337void pfsync_update_net_tdb(struct pfsync_tdb *);
338int pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
339#ifdef __FreeBSD__
340 struct route *);
341#else
342 struct rtentry *);
343#endif
344int pfsyncioctl(struct ifnet *, u_long, caddr_t);
345void pfsyncstart(struct ifnet *);
346
347struct mbuf *pfsync_if_dequeue(struct ifnet *);
348struct mbuf *pfsync_get_mbuf(struct pfsync_softc *);
349
350void pfsync_deferred(struct pf_state *, int);
351void pfsync_undefer(struct pfsync_deferral *, int);
352void pfsync_defer_tmo(void *);
353
354void pfsync_request_update(u_int32_t, u_int64_t);
355void pfsync_update_state_req(struct pf_state *);
356
357void pfsync_drop(struct pfsync_softc *);
358void pfsync_sendout(void);
359void pfsync_send_plus(void *, size_t);
360int pfsync_tdb_sendout(struct pfsync_softc *);
361int pfsync_sendout_mbuf(struct pfsync_softc *, struct mbuf *);
362void pfsync_timeout(void *);
363void pfsync_tdb_timeout(void *);
364void pfsync_send_bus(struct pfsync_softc *, u_int8_t);
365
366void pfsync_bulk_start(void);
367void pfsync_bulk_status(u_int8_t);
368void pfsync_bulk_update(void *);
369void pfsync_bulk_fail(void *);
370
371#ifdef __FreeBSD__
372void pfsync_ifdetach(void *, struct ifnet *);
373
374/* XXX: ugly */
375#define betoh64 (unsigned long long)be64toh
376#define timeout_del callout_stop
377#endif
378
379#define PFSYNC_MAX_BULKTRIES 12
380#ifndef __FreeBSD__
381int pfsync_sync_ok;
382#endif
383
384#ifdef __FreeBSD__
385IFC_SIMPLE_DECLARE(pfsync, 1);
386#else
387struct if_clone pfsync_cloner =
388 IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy);
389#endif
390
391void
392pfsyncattach(int npfsync)
393{
394 if_clone_attach(&pfsync_cloner);
395}
396int
397#ifdef __FreeBSD__
398pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param)
399#else
400pfsync_clone_create(struct if_clone *ifc, int unit)
401#endif
402{
403 struct pfsync_softc *sc;
404 struct ifnet *ifp;
405 int q;
406
407 if (unit != 0)
408 return (EINVAL);
409
410#ifndef __FreeBSD__
411 pfsync_sync_ok = 1;
412#endif
413
414 sc = malloc(sizeof(struct pfsync_softc), M_DEVBUF, M_NOWAIT | M_ZERO);
415 if (sc == NULL)
416 return (ENOMEM);
417
418 for (q = 0; q < PFSYNC_S_COUNT; q++)
419 TAILQ_INIT(&sc->sc_qs[q]);
420
421#ifdef __FreeBSD__
422 sc->pfsync_sync_ok = 1;
423 sc->sc_pool = uma_zcreate("pfsync", PFSYNC_PLSIZE,
424 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
425 if (sc->sc_pool == NULL) {
426 free(sc, M_DEVBUF);
427 return (ENOMEM);
428 }
429#else
430 pool_init(&sc->sc_pool, PFSYNC_PLSIZE, 0, 0, 0, "pfsync", NULL);
431#endif
432 TAILQ_INIT(&sc->sc_upd_req_list);
433 TAILQ_INIT(&sc->sc_deferrals);
434 sc->sc_deferred = 0;
435
436 TAILQ_INIT(&sc->sc_tdb_q);
437
438 sc->sc_len = PFSYNC_MINPKT;
439 sc->sc_maxupdates = 128;
440
441#ifdef __FreeBSD__
442 sc->sc_imo.imo_membership = (struct in_multi **)malloc(
443 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_DEVBUF,
444 M_NOWAIT | M_ZERO);
445 sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS;
446 sc->sc_imo.imo_multicast_vif = -1;
447#else
448 sc->sc_imo.imo_membership = (struct in_multi **)malloc(
449 (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_IPMOPTS,
450 M_WAITOK | M_ZERO);
451 sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS;
452#endif
453
454#ifdef __FreeBSD__
455 ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC);
456 if (ifp == NULL) {
457 free(sc->sc_imo.imo_membership, M_DEVBUF);
458 uma_zdestroy(sc->sc_pool);
459 free(sc, M_DEVBUF);
460 return (ENOSPC);
461 }
462 if_initname(ifp, ifc->ifc_name, unit);
463
464 sc->sc_detachtag = EVENTHANDLER_REGISTER(ifnet_departure_event,
465#ifdef __FreeBSD__
466 pfsync_ifdetach, V_pfsyncif, EVENTHANDLER_PRI_ANY);
467#else
468 pfsync_ifdetach, pfsyncif, EVENTHANDLER_PRI_ANY);
469#endif
470 if (sc->sc_detachtag == NULL) {
471 if_free(ifp);
472 free(sc->sc_imo.imo_membership, M_DEVBUF);
473 uma_zdestroy(sc->sc_pool);
474 free(sc, M_DEVBUF);
475 return (ENOSPC);
476 }
477#else
478 ifp = &sc->sc_if;
479 snprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit);
480#endif
481 ifp->if_softc = sc;
482 ifp->if_ioctl = pfsyncioctl;
483 ifp->if_output = pfsyncoutput;
484 ifp->if_start = pfsyncstart;
485 ifp->if_type = IFT_PFSYNC;
486 ifp->if_snd.ifq_maxlen = ifqmaxlen;
487 ifp->if_hdrlen = sizeof(struct pfsync_header);
488 ifp->if_mtu = 1500; /* XXX */
489#ifdef __FreeBSD__
490 callout_init(&sc->sc_tmo, CALLOUT_MPSAFE);
491 callout_init_mtx(&sc->sc_bulk_tmo, &pf_task_mtx, 0);
492 callout_init(&sc->sc_bulkfail_tmo, CALLOUT_MPSAFE);
493#else
494 ifp->if_hardmtu = MCLBYTES; /* XXX */
495 timeout_set(&sc->sc_tmo, pfsync_timeout, sc);
496 timeout_set(&sc->sc_bulk_tmo, pfsync_bulk_update, sc);
497 timeout_set(&sc->sc_bulkfail_tmo, pfsync_bulk_fail, sc);
498#endif
499
500 if_attach(ifp);
501#ifndef __FreeBSD__
502 if_alloc_sadl(ifp);
503
504#if NCARP > 0
505 if_addgroup(ifp, "carp");
506#endif
507#endif
508
509#if NBPFILTER > 0
510#ifdef __FreeBSD__
511 bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
512#else
513 bpfattach(&sc->sc_if.if_bpf, ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
514#endif
515#endif
516
517#ifdef __FreeBSD__
518 V_pfsyncif = sc;
519#else
520 pfsyncif = sc;
521#endif
522
523 return (0);
524}
525
526#ifdef __FreeBSD__
527void
528#else
529int
530#endif
531pfsync_clone_destroy(struct ifnet *ifp)
532{
533 struct pfsync_softc *sc = ifp->if_softc;
534
535#ifdef __FreeBSD__
536 EVENTHANDLER_DEREGISTER(ifnet_departure_event, sc->sc_detachtag);
537 PF_LOCK();
538#endif
539 timeout_del(&sc->sc_bulk_tmo);
540 timeout_del(&sc->sc_tmo);
541#ifdef __FreeBSD__
542 PF_UNLOCK();
543 if (!sc->pfsync_sync_ok && carp_demote_adj_p)
544 (*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy");
545#else
546#if NCARP > 0
547 if (!pfsync_sync_ok)
548 carp_group_demote_adj(&sc->sc_if, -1);
549#endif
550#endif
551#if NBPFILTER > 0
552 bpfdetach(ifp);
553#endif
554 if_detach(ifp);
555
556 pfsync_drop(sc);
557
558 while (sc->sc_deferred > 0)
559 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
560
561#ifdef __FreeBSD__
562 UMA_DESTROY(sc->sc_pool);
563#else
564 pool_destroy(&sc->sc_pool);
565#endif
566#ifdef __FreeBSD__
567 if_free(ifp);
568 free(sc->sc_imo.imo_membership, M_DEVBUF);
569#else
570 free(sc->sc_imo.imo_membership, M_IPMOPTS);
571#endif
572 free(sc, M_DEVBUF);
573
574#ifdef __FreeBSD__
575 V_pfsyncif = NULL;
576#else
577 pfsyncif = NULL;
578#endif
579
580#ifndef __FreeBSD__
581 return (0);
582#endif
583}
584
585struct mbuf *
586pfsync_if_dequeue(struct ifnet *ifp)
587{
588 struct mbuf *m;
589#ifndef __FreeBSD__
590 int s;
591#endif
592
593#ifdef __FreeBSD__
594 IF_LOCK(&ifp->if_snd);
595 _IF_DROP(&ifp->if_snd);
596 _IF_DEQUEUE(&ifp->if_snd, m);
597 IF_UNLOCK(&ifp->if_snd);
598#else
599 s = splnet();
600 IF_DEQUEUE(&ifp->if_snd, m);
601 splx(s);
602#endif
603
604 return (m);
605}
606
607/*
608 * Start output on the pfsync interface.
609 */
610void
611pfsyncstart(struct ifnet *ifp)
612{
613 struct mbuf *m;
614
615 while ((m = pfsync_if_dequeue(ifp)) != NULL) {
616#ifndef __FreeBSD__
617 IF_DROP(&ifp->if_snd);
618#endif
619 m_freem(m);
620 }
621}
622
623int
624pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
625 struct pf_state_peer *d)
626{
627 if (s->scrub.scrub_flag && d->scrub == NULL) {
628#ifdef __FreeBSD__
629 d->scrub = pool_get(&V_pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
630#else
631 d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
632#endif
633 if (d->scrub == NULL)
634 return (ENOMEM);
635 }
636
637 return (0);
638}
639
640#ifndef __FreeBSD__
641void
642pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
643{
644 bzero(sp, sizeof(struct pfsync_state));
645
646 /* copy from state key */
647 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
648 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
649 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
650 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
651 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
652 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
653 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
654 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
655 sp->proto = st->key[PF_SK_WIRE]->proto;
656 sp->af = st->key[PF_SK_WIRE]->af;
657
658 /* copy from state */
659 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
660 bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
661 sp->creation = htonl(time_second - st->creation);
662 sp->expire = pf_state_expires(st);
663 if (sp->expire <= time_second)
664 sp->expire = htonl(0);
665 else
666 sp->expire = htonl(sp->expire - time_second);
667
668 sp->direction = st->direction;
669 sp->log = st->log;
670 sp->timeout = st->timeout;
671 sp->state_flags = st->state_flags;
672 if (st->src_node)
673 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
674 if (st->nat_src_node)
675 sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
676
677 bcopy(&st->id, &sp->id, sizeof(sp->id));
678 sp->creatorid = st->creatorid;
679 pf_state_peer_hton(&st->src, &sp->src);
680 pf_state_peer_hton(&st->dst, &sp->dst);
681
682 if (st->rule.ptr == NULL)
683 sp->rule = htonl(-1);
684 else
685 sp->rule = htonl(st->rule.ptr->nr);
686 if (st->anchor.ptr == NULL)
687 sp->anchor = htonl(-1);
688 else
689 sp->anchor = htonl(st->anchor.ptr->nr);
690 if (st->nat_rule.ptr == NULL)
691 sp->nat_rule = htonl(-1);
692 else
693 sp->nat_rule = htonl(st->nat_rule.ptr->nr);
694
695 pf_state_counter_hton(st->packets[0], sp->packets[0]);
696 pf_state_counter_hton(st->packets[1], sp->packets[1]);
697 pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
698 pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
699
700}
701#endif
702
703int
704pfsync_state_import(struct pfsync_state *sp, u_int8_t flags)
705{
706 struct pf_state *st = NULL;
707 struct pf_state_key *skw = NULL, *sks = NULL;
708 struct pf_rule *r = NULL;
709 struct pfi_kif *kif;
710 int pool_flags;
711 int error;
712
713 PF_LOCK_ASSERT();
714
715#ifdef __FreeBSD__
716 if (sp->creatorid == 0 && V_pf_status.debug >= PF_DEBUG_MISC) {
717#else
718 if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) {
719#endif
720 printf("pfsync_state_import: invalid creator id:"
721 " %08x\n", ntohl(sp->creatorid));
722 return (EINVAL);
723 }
724
725 if ((kif = pfi_kif_get(sp->ifname)) == NULL) {
726#ifdef __FreeBSD__
727 if (V_pf_status.debug >= PF_DEBUG_MISC)
728#else
729 if (pf_status.debug >= PF_DEBUG_MISC)
730#endif
731 printf("pfsync_state_import: "
732 "unknown interface: %s\n", sp->ifname);
733 if (flags & PFSYNC_SI_IOCTL)
734 return (EINVAL);
735 return (0); /* skip this state */
736 }
737
738 /*
739 * If the ruleset checksums match or the state is coming from the ioctl,
740 * it's safe to associate the state with the rule of that number.
741 */
742 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
743 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) <
744 pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
745 r = pf_main_ruleset.rules[
746 PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
747 else
748#ifdef __FreeBSD__
749 r = &V_pf_default_rule;
750#else
751 r = &pf_default_rule;
752#endif
753
754 if ((r->max_states && r->states_cur >= r->max_states))
755 goto cleanup;
756
757#ifdef __FreeBSD__
758 if (flags & PFSYNC_SI_IOCTL)
759 pool_flags = PR_WAITOK | PR_ZERO;
760 else
761 pool_flags = PR_NOWAIT | PR_ZERO;
762
763 if ((st = pool_get(&V_pf_state_pl, pool_flags)) == NULL)
764 goto cleanup;
765#else
766 if (flags & PFSYNC_SI_IOCTL)
767 pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO;
768 else
769 pool_flags = PR_LIMITFAIL | PR_ZERO;
770
771 if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL)
772 goto cleanup;
773#endif
774
775 if ((skw = pf_alloc_state_key(pool_flags)) == NULL)
776 goto cleanup;
777
778 if (PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0],
779 &sp->key[PF_SK_STACK].addr[0], sp->af) ||
780 PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1],
781 &sp->key[PF_SK_STACK].addr[1], sp->af) ||
782 sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] ||
783 sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1]) {
784 if ((sks = pf_alloc_state_key(pool_flags)) == NULL)
785 goto cleanup;
786 } else
787 sks = skw;
788
789 /* allocate memory for scrub info */
790 if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
791 pfsync_alloc_scrub_memory(&sp->dst, &st->dst))
792 goto cleanup;
793
794 /* copy to state key(s) */
795 skw->addr[0] = sp->key[PF_SK_WIRE].addr[0];
796 skw->addr[1] = sp->key[PF_SK_WIRE].addr[1];
797 skw->port[0] = sp->key[PF_SK_WIRE].port[0];
798 skw->port[1] = sp->key[PF_SK_WIRE].port[1];
799 skw->proto = sp->proto;
800 skw->af = sp->af;
801 if (sks != skw) {
802 sks->addr[0] = sp->key[PF_SK_STACK].addr[0];
803 sks->addr[1] = sp->key[PF_SK_STACK].addr[1];
804 sks->port[0] = sp->key[PF_SK_STACK].port[0];
805 sks->port[1] = sp->key[PF_SK_STACK].port[1];
806 sks->proto = sp->proto;
807 sks->af = sp->af;
808 }
809
810 /* copy to state */
811 bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr));
812 st->creation = time_second - ntohl(sp->creation);
813 st->expire = time_second;
814 if (sp->expire) {
815 /* XXX No adaptive scaling. */
816 st->expire -= r->timeout[sp->timeout] - ntohl(sp->expire);
817 }
818
819 st->expire = ntohl(sp->expire) + time_second;
820 st->direction = sp->direction;
821 st->log = sp->log;
822 st->timeout = sp->timeout;
823 st->state_flags = sp->state_flags;
824
825 bcopy(sp->id, &st->id, sizeof(st->id));
826 st->creatorid = sp->creatorid;
827 pf_state_peer_ntoh(&sp->src, &st->src);
828 pf_state_peer_ntoh(&sp->dst, &st->dst);
829
830 st->rule.ptr = r;
831 st->nat_rule.ptr = NULL;
832 st->anchor.ptr = NULL;
833 st->rt_kif = NULL;
834
835 st->pfsync_time = time_second;
836 st->sync_state = PFSYNC_S_NONE;
837
838 /* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
839 r->states_cur++;
840 r->states_tot++;
841
842 if (!ISSET(flags, PFSYNC_SI_IOCTL))
843 SET(st->state_flags, PFSTATE_NOSYNC);
844
845 if ((error = pf_state_insert(kif, skw, sks, st)) != 0) {
846 /* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */
847 r->states_cur--;
848 goto cleanup_state;
849 }
850
851 if (!ISSET(flags, PFSYNC_SI_IOCTL)) {
852 CLR(st->state_flags, PFSTATE_NOSYNC);
853 if (ISSET(st->state_flags, PFSTATE_ACK)) {
854 pfsync_q_ins(st, PFSYNC_S_IACK);
855#ifdef __FreeBSD__
856 pfsync_sendout();
857#else
858 schednetisr(NETISR_PFSYNC);
859#endif
860 }
861 }
862 CLR(st->state_flags, PFSTATE_ACK);
863
864 return (0);
865
866cleanup:
867 error = ENOMEM;
868 if (skw == sks)
869 sks = NULL;
870#ifdef __FreeBSD__
871 if (skw != NULL)
872 pool_put(&V_pf_state_key_pl, skw);
873 if (sks != NULL)
874 pool_put(&V_pf_state_key_pl, sks);
875#else
876 if (skw != NULL)
877 pool_put(&pf_state_key_pl, skw);
878 if (sks != NULL)
879 pool_put(&pf_state_key_pl, sks);
880#endif
881
882cleanup_state: /* pf_state_insert frees the state keys */
883 if (st) {
884#ifdef __FreeBSD__
885 if (st->dst.scrub)
886 pool_put(&V_pf_state_scrub_pl, st->dst.scrub);
887 if (st->src.scrub)
888 pool_put(&V_pf_state_scrub_pl, st->src.scrub);
889 pool_put(&V_pf_state_pl, st);
890#else
891 if (st->dst.scrub)
892 pool_put(&pf_state_scrub_pl, st->dst.scrub);
893 if (st->src.scrub)
894 pool_put(&pf_state_scrub_pl, st->src.scrub);
895 pool_put(&pf_state_pl, st);
896#endif
897 }
898 return (error);
899}
900
901void
902#ifdef __FreeBSD__
903pfsync_input(struct mbuf *m, __unused int off)
904#else
905pfsync_input(struct mbuf *m, ...)
906#endif
907{
908#ifdef __FreeBSD__
909 struct pfsync_softc *sc = V_pfsyncif;
910#else
911 struct pfsync_softc *sc = pfsyncif;
912#endif
913 struct pfsync_pkt pkt;
914 struct ip *ip = mtod(m, struct ip *);
915 struct pfsync_header *ph;
916 struct pfsync_subheader subh;
917
918 int offset;
919 int rv;
920
921 V_pfsyncstats.pfsyncs_ipackets++;
922
923 /* verify that we have a sync interface configured */
924#ifdef __FreeBSD__
925 if (!sc || !sc->sc_sync_if || !V_pf_status.running)
926#else
927 if (!sc || !sc->sc_sync_if || !pf_status.running)
928#endif
929 goto done;
930
931 /* verify that the packet came in on the right interface */
932 if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
933 V_pfsyncstats.pfsyncs_badif++;
934 goto done;
935 }
936
937#ifdef __FreeBSD__
938 sc->sc_ifp->if_ipackets++;
939 sc->sc_ifp->if_ibytes += m->m_pkthdr.len;
940#else
941 sc->sc_if.if_ipackets++;
942 sc->sc_if.if_ibytes += m->m_pkthdr.len;
943#endif
944 /* verify that the IP TTL is 255. */
945 if (ip->ip_ttl != PFSYNC_DFLTTL) {
946 V_pfsyncstats.pfsyncs_badttl++;
947 goto done;
948 }
949
950 offset = ip->ip_hl << 2;
951 if (m->m_pkthdr.len < offset + sizeof(*ph)) {
952 V_pfsyncstats.pfsyncs_hdrops++;
953 goto done;
954 }
955
956 if (offset + sizeof(*ph) > m->m_len) {
957 if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
958 V_pfsyncstats.pfsyncs_hdrops++;
959 return;
960 }
961 ip = mtod(m, struct ip *);
962 }
963 ph = (struct pfsync_header *)((char *)ip + offset);
964
965 /* verify the version */
966 if (ph->version != PFSYNC_VERSION) {
967 V_pfsyncstats.pfsyncs_badver++;
968 goto done;
969 }
970
971#if 0
972 if (pfsync_input_hmac(m, offset) != 0) {
973 /* XXX stats */
974 goto done;
975 }
976#endif
977
978 /* Cheaper to grab this now than having to mess with mbufs later */
979 pkt.ip = ip;
980 pkt.src = ip->ip_src;
981 pkt.flags = 0;
982
983#ifdef __FreeBSD__
984 if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
985#else
986 if (!bcmp(&ph->pfcksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
987#endif
988 pkt.flags |= PFSYNC_SI_CKSUM;
989
990 offset += sizeof(*ph);
991 for (;;) {
992 m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
993 offset += sizeof(subh);
994
995 if (subh.action >= PFSYNC_ACT_MAX) {
996 V_pfsyncstats.pfsyncs_badact++;
997 goto done;
998 }
999
1000 rv = (*pfsync_acts[subh.action])(&pkt, m, offset,
1001 ntohs(subh.count));
1002 if (rv == -1)
1003 return;
1004
1005 offset += rv;
1006 }
1007
1008done:
1009 m_freem(m);
1010}
1011
1012int
1013pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1014{
1015 struct pfsync_clr *clr;
1016 struct mbuf *mp;
1017 int len = sizeof(*clr) * count;
1018 int i, offp;
1019
1020 struct pf_state *st, *nexts;
1021 struct pf_state_key *sk, *nextsk;
1022 struct pf_state_item *si;
1023 u_int32_t creatorid;
1024 int s;
1025
1026 mp = m_pulldown(m, offset, len, &offp);
1027 if (mp == NULL) {
1028 V_pfsyncstats.pfsyncs_badlen++;
1029 return (-1);
1030 }
1031 clr = (struct pfsync_clr *)(mp->m_data + offp);
1032
1033 s = splsoftnet();
1034#ifdef __FreeBSD__
1035 PF_LOCK();
1036#endif
1037 for (i = 0; i < count; i++) {
1038 creatorid = clr[i].creatorid;
1039
1040 if (clr[i].ifname[0] == '\0') {
1041#ifdef __FreeBSD__
1042 for (st = RB_MIN(pf_state_tree_id, &V_tree_id);
1043 st; st = nexts) {
1044 nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, st);
1045#else
1046 for (st = RB_MIN(pf_state_tree_id, &tree_id);
1047 st; st = nexts) {
1048 nexts = RB_NEXT(pf_state_tree_id, &tree_id, st);
1049#endif
1050 if (st->creatorid == creatorid) {
1051 SET(st->state_flags, PFSTATE_NOSYNC);
1052 pf_unlink_state(st);
1053 }
1054 }
1055 } else {
1056 if (pfi_kif_get(clr[i].ifname) == NULL)
1057 continue;
1058
1059 /* XXX correct? */
1060#ifdef __FreeBSD__
1061 for (sk = RB_MIN(pf_state_tree, &V_pf_statetbl);
1062#else
1063 for (sk = RB_MIN(pf_state_tree, &pf_statetbl);
1064#endif
1065 sk; sk = nextsk) {
1066 nextsk = RB_NEXT(pf_state_tree,
1067#ifdef __FreeBSD__
1068 &V_pf_statetbl, sk);
1069#else
1070 &pf_statetbl, sk);
1071#endif
1072 TAILQ_FOREACH(si, &sk->states, entry) {
1073 if (si->s->creatorid == creatorid) {
1074 SET(si->s->state_flags,
1075 PFSTATE_NOSYNC);
1076 pf_unlink_state(si->s);
1077 }
1078 }
1079 }
1080 }
1081 }
1082#ifdef __FreeBSD__
1083 PF_UNLOCK();
1084#endif
1085 splx(s);
1086
1087 return (len);
1088}
1089
1090int
1091pfsync_in_ins(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1092{
1093 struct mbuf *mp;
1094 struct pfsync_state *sa, *sp;
1095 int len = sizeof(*sp) * count;
1096 int i, offp;
1097
1098 int s;
1099
1100 mp = m_pulldown(m, offset, len, &offp);
1101 if (mp == NULL) {
1102 V_pfsyncstats.pfsyncs_badlen++;
1103 return (-1);
1104 }
1105 sa = (struct pfsync_state *)(mp->m_data + offp);
1106
1107 s = splsoftnet();
1108#ifdef __FreeBSD__
1109 PF_LOCK();
1110#endif
1111 for (i = 0; i < count; i++) {
1112 sp = &sa[i];
1113
1114 /* check for invalid values */
1115 if (sp->timeout >= PFTM_MAX ||
1116 sp->src.state > PF_TCPS_PROXY_DST ||
1117 sp->dst.state > PF_TCPS_PROXY_DST ||
1118 sp->direction > PF_OUT ||
1119 (sp->af != AF_INET && sp->af != AF_INET6)) {
1120#ifdef __FreeBSD__
1121 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1122#else
1123 if (pf_status.debug >= PF_DEBUG_MISC) {
1124#endif
1125 printf("pfsync_input: PFSYNC5_ACT_INS: "
1126 "invalid value\n");
1127 }
1128 V_pfsyncstats.pfsyncs_badval++;
1129 continue;
1130 }
1131
1132 if (pfsync_state_import(sp, pkt->flags) == ENOMEM) {
1133 /* drop out, but process the rest of the actions */
1134 break;
1135 }
1136 }
1137#ifdef __FreeBSD__
1138 PF_UNLOCK();
1139#endif
1140 splx(s);
1141
1142 return (len);
1143}
1144
1145int
1146pfsync_in_iack(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1147{
1148 struct pfsync_ins_ack *ia, *iaa;
1149 struct pf_state_cmp id_key;
1150 struct pf_state *st;
1151
1152 struct mbuf *mp;
1153 int len = count * sizeof(*ia);
1154 int offp, i;
1155 int s;
1156
1157 mp = m_pulldown(m, offset, len, &offp);
1158 if (mp == NULL) {
1159 V_pfsyncstats.pfsyncs_badlen++;
1160 return (-1);
1161 }
1162 iaa = (struct pfsync_ins_ack *)(mp->m_data + offp);
1163
1164 s = splsoftnet();
1165#ifdef __FreeBSD__
1166 PF_LOCK();
1167#endif
1168 for (i = 0; i < count; i++) {
1169 ia = &iaa[i];
1170
1171 bcopy(&ia->id, &id_key.id, sizeof(id_key.id));
1172 id_key.creatorid = ia->creatorid;
1173
1174 st = pf_find_state_byid(&id_key);
1175 if (st == NULL)
1176 continue;
1177
1178 if (ISSET(st->state_flags, PFSTATE_ACK))
1179 pfsync_deferred(st, 0);
1180 }
1181#ifdef __FreeBSD__
1182 PF_UNLOCK();
1183#endif
1184 splx(s);
1185 /*
1186 * XXX this is not yet implemented, but we know the size of the
1187 * message so we can skip it.
1188 */
1189
1190 return (count * sizeof(struct pfsync_ins_ack));
1191}
1192
1193int
1194pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src,
1195 struct pfsync_state_peer *dst)
1196{
1197 int sfail = 0;
1198
1199 /*
1200 * The state should never go backwards except
1201 * for syn-proxy states. Neither should the
1202 * sequence window slide backwards.
1203 */
1204 if (st->src.state > src->state &&
1205 (st->src.state < PF_TCPS_PROXY_SRC ||
1206 src->state >= PF_TCPS_PROXY_SRC))
1207 sfail = 1;
1208 else if (SEQ_GT(st->src.seqlo, ntohl(src->seqlo)))
1209 sfail = 3;
1210 else if (st->dst.state > dst->state) {
1211 /* There might still be useful
1212 * information about the src state here,
1213 * so import that part of the update,
1214 * then "fail" so we send the updated
1215 * state back to the peer who is missing
1216 * our what we know. */
1217 pf_state_peer_ntoh(src, &st->src);
1218 /* XXX do anything with timeouts? */
1219 sfail = 7;
1220 } else if (st->dst.state >= TCPS_SYN_SENT &&
1221 SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo)))
1222 sfail = 4;
1223
1224 return (sfail);
1225}
1226
1227int
1228pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1229{
1230 struct pfsync_state *sa, *sp;
1231 struct pf_state_cmp id_key;
1232 struct pf_state_key *sk;
1233 struct pf_state *st;
1234 int sfail;
1235
1236 struct mbuf *mp;
1237 int len = count * sizeof(*sp);
1238 int offp, i;
1239 int s;
1240
1241 mp = m_pulldown(m, offset, len, &offp);
1242 if (mp == NULL) {
1243 V_pfsyncstats.pfsyncs_badlen++;
1244 return (-1);
1245 }
1246 sa = (struct pfsync_state *)(mp->m_data + offp);
1247
1248 s = splsoftnet();
1249#ifdef __FreeBSD__
1250 PF_LOCK();
1251#endif
1252 for (i = 0; i < count; i++) {
1253 sp = &sa[i];
1254
1255 /* check for invalid values */
1256 if (sp->timeout >= PFTM_MAX ||
1257 sp->src.state > PF_TCPS_PROXY_DST ||
1258 sp->dst.state > PF_TCPS_PROXY_DST) {
1259#ifdef __FreeBSD__
1260 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1261#else
1262 if (pf_status.debug >= PF_DEBUG_MISC) {
1263#endif
1264 printf("pfsync_input: PFSYNC_ACT_UPD: "
1265 "invalid value\n");
1266 }
1267 V_pfsyncstats.pfsyncs_badval++;
1268 continue;
1269 }
1270
1271 bcopy(sp->id, &id_key.id, sizeof(id_key.id));
1272 id_key.creatorid = sp->creatorid;
1273
1274 st = pf_find_state_byid(&id_key);
1275 if (st == NULL) {
1276 /* insert the update */
1277 if (pfsync_state_import(sp, 0))
1278 V_pfsyncstats.pfsyncs_badstate++;
1279 continue;
1280 }
1281
1282 if (ISSET(st->state_flags, PFSTATE_ACK))
1283 pfsync_deferred(st, 1);
1284
1285 sk = st->key[PF_SK_WIRE]; /* XXX right one? */
1286 sfail = 0;
1287 if (sk->proto == IPPROTO_TCP)
1288 sfail = pfsync_upd_tcp(st, &sp->src, &sp->dst);
1289 else {
1290 /*
1291 * Non-TCP protocol state machine always go
1292 * forwards
1293 */
1294 if (st->src.state > sp->src.state)
1295 sfail = 5;
1296 else if (st->dst.state > sp->dst.state)
1297 sfail = 6;
1298 }
1299
1300 if (sfail) {
1301#ifdef __FreeBSD__
1302 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1303#else
1304 if (pf_status.debug >= PF_DEBUG_MISC) {
1305#endif
1306 printf("pfsync: %s stale update (%d)"
1307 " id: %016llx creatorid: %08x\n",
1308 (sfail < 7 ? "ignoring" : "partial"),
1309 sfail, betoh64(st->id),
1310 ntohl(st->creatorid));
1311 }
1312 V_pfsyncstats.pfsyncs_stale++;
1313
1314 pfsync_update_state(st);
1315#ifdef __FreeBSD__
1316 pfsync_sendout();
1317#else
1318 schednetisr(NETISR_PFSYNC);
1319#endif
1320 continue;
1321 }
1322 pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
1323 pf_state_peer_ntoh(&sp->src, &st->src);
1324 pf_state_peer_ntoh(&sp->dst, &st->dst);
1325 st->expire = ntohl(sp->expire) + time_second;
1326 st->timeout = sp->timeout;
1327 st->pfsync_time = time_second;
1328 }
1329#ifdef __FreeBSD__
1330 PF_UNLOCK();
1331#endif
1332 splx(s);
1333
1334 return (len);
1335}
1336
1337int
1338pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1339{
1340 struct pfsync_upd_c *ua, *up;
1341 struct pf_state_key *sk;
1342 struct pf_state_cmp id_key;
1343 struct pf_state *st;
1344
1345 int len = count * sizeof(*up);
1346 int sfail;
1347
1348 struct mbuf *mp;
1349 int offp, i;
1350 int s;
1351
1352 mp = m_pulldown(m, offset, len, &offp);
1353 if (mp == NULL) {
1354 V_pfsyncstats.pfsyncs_badlen++;
1355 return (-1);
1356 }
1357 ua = (struct pfsync_upd_c *)(mp->m_data + offp);
1358
1359 s = splsoftnet();
1360#ifdef __FreeBSD__
1361 PF_LOCK();
1362#endif
1363 for (i = 0; i < count; i++) {
1364 up = &ua[i];
1365
1366 /* check for invalid values */
1367 if (up->timeout >= PFTM_MAX ||
1368 up->src.state > PF_TCPS_PROXY_DST ||
1369 up->dst.state > PF_TCPS_PROXY_DST) {
1370#ifdef __FreeBSD__
1371 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1372#else
1373 if (pf_status.debug >= PF_DEBUG_MISC) {
1374#endif
1375 printf("pfsync_input: "
1376 "PFSYNC_ACT_UPD_C: "
1377 "invalid value\n");
1378 }
1379 V_pfsyncstats.pfsyncs_badval++;
1380 continue;
1381 }
1382
1383 bcopy(&up->id, &id_key.id, sizeof(id_key.id));
1384 id_key.creatorid = up->creatorid;
1385
1386 st = pf_find_state_byid(&id_key);
1387 if (st == NULL) {
1388 /* We don't have this state. Ask for it. */
1389 pfsync_request_update(id_key.creatorid, id_key.id);
1390 continue;
1391 }
1392
1393 if (ISSET(st->state_flags, PFSTATE_ACK))
1394 pfsync_deferred(st, 1);
1395
1396 sk = st->key[PF_SK_WIRE]; /* XXX right one? */
1397 sfail = 0;
1398 if (sk->proto == IPPROTO_TCP)
1399 sfail = pfsync_upd_tcp(st, &up->src, &up->dst);
1400 else {
1401 /*
1402 * Non-TCP protocol state machine always go forwards
1403 */
1404 if (st->src.state > up->src.state)
1405 sfail = 5;
1406 else if (st->dst.state > up->dst.state)
1407 sfail = 6;
1408 }
1409
1410 if (sfail) {
1411#ifdef __FreeBSD__
1412 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1413#else
1414 if (pf_status.debug >= PF_DEBUG_MISC) {
1415#endif
1416 printf("pfsync: ignoring stale update "
1417 "(%d) id: %016llx "
1418 "creatorid: %08x\n", sfail,
1419 betoh64(st->id),
1420 ntohl(st->creatorid));
1421 }
1422 V_pfsyncstats.pfsyncs_stale++;
1423
1424 pfsync_update_state(st);
1425#ifdef __FreeBSD__
1426 pfsync_sendout();
1427#else
1428 schednetisr(NETISR_PFSYNC);
1429#endif
1430 continue;
1431 }
1432 pfsync_alloc_scrub_memory(&up->dst, &st->dst);
1433 pf_state_peer_ntoh(&up->src, &st->src);
1434 pf_state_peer_ntoh(&up->dst, &st->dst);
1435 st->expire = ntohl(up->expire) + time_second;
1436 st->timeout = up->timeout;
1437 st->pfsync_time = time_second;
1438 }
1439#ifdef __FreeBSD__
1440 PF_UNLOCK();
1441#endif
1442 splx(s);
1443
1444 return (len);
1445}
1446
1447int
1448pfsync_in_ureq(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1449{
1450 struct pfsync_upd_req *ur, *ura;
1451 struct mbuf *mp;
1452 int len = count * sizeof(*ur);
1453 int i, offp;
1454
1455 struct pf_state_cmp id_key;
1456 struct pf_state *st;
1457
1458 mp = m_pulldown(m, offset, len, &offp);
1459 if (mp == NULL) {
1460 V_pfsyncstats.pfsyncs_badlen++;
1461 return (-1);
1462 }
1463 ura = (struct pfsync_upd_req *)(mp->m_data + offp);
1464
1465 for (i = 0; i < count; i++) {
1466 ur = &ura[i];
1467
1468 bcopy(&ur->id, &id_key.id, sizeof(id_key.id));
1469 id_key.creatorid = ur->creatorid;
1470
1471 if (id_key.id == 0 && id_key.creatorid == 0)
1472 pfsync_bulk_start();
1473 else {
1474 st = pf_find_state_byid(&id_key);
1475 if (st == NULL) {
1476 V_pfsyncstats.pfsyncs_badstate++;
1477 continue;
1478 }
1479 if (ISSET(st->state_flags, PFSTATE_NOSYNC))
1480 continue;
1481
1482 PF_LOCK();
1483 pfsync_update_state_req(st);
1484 PF_UNLOCK();
1485 }
1486 }
1487
1488 return (len);
1489}
1490
1491int
1492pfsync_in_del(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1493{
1494 struct mbuf *mp;
1495 struct pfsync_state *sa, *sp;
1496 struct pf_state_cmp id_key;
1497 struct pf_state *st;
1498 int len = count * sizeof(*sp);
1499 int offp, i;
1500 int s;
1501
1502 mp = m_pulldown(m, offset, len, &offp);
1503 if (mp == NULL) {
1504 V_pfsyncstats.pfsyncs_badlen++;
1505 return (-1);
1506 }
1507 sa = (struct pfsync_state *)(mp->m_data + offp);
1508
1509 s = splsoftnet();
1510#ifdef __FreeBSD__
1511 PF_LOCK();
1512#endif
1513 for (i = 0; i < count; i++) {
1514 sp = &sa[i];
1515
1516 bcopy(sp->id, &id_key.id, sizeof(id_key.id));
1517 id_key.creatorid = sp->creatorid;
1518
1519 st = pf_find_state_byid(&id_key);
1520 if (st == NULL) {
1521 V_pfsyncstats.pfsyncs_badstate++;
1522 continue;
1523 }
1524 SET(st->state_flags, PFSTATE_NOSYNC);
1525 pf_unlink_state(st);
1526 }
1527#ifdef __FreeBSD__
1528 PF_UNLOCK();
1529#endif
1530 splx(s);
1531
1532 return (len);
1533}
1534
1535int
1536pfsync_in_del_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1537{
1538 struct mbuf *mp;
1539 struct pfsync_del_c *sa, *sp;
1540 struct pf_state_cmp id_key;
1541 struct pf_state *st;
1542 int len = count * sizeof(*sp);
1543 int offp, i;
1544 int s;
1545
1546 mp = m_pulldown(m, offset, len, &offp);
1547 if (mp == NULL) {
1548 V_pfsyncstats.pfsyncs_badlen++;
1549 return (-1);
1550 }
1551 sa = (struct pfsync_del_c *)(mp->m_data + offp);
1552
1553 s = splsoftnet();
1554#ifdef __FreeBSD__
1555 PF_LOCK();
1556#endif
1557 for (i = 0; i < count; i++) {
1558 sp = &sa[i];
1559
1560 bcopy(&sp->id, &id_key.id, sizeof(id_key.id));
1561 id_key.creatorid = sp->creatorid;
1562
1563 st = pf_find_state_byid(&id_key);
1564 if (st == NULL) {
1565 V_pfsyncstats.pfsyncs_badstate++;
1566 continue;
1567 }
1568
1569 SET(st->state_flags, PFSTATE_NOSYNC);
1570 pf_unlink_state(st);
1571 }
1572#ifdef __FreeBSD__
1573 PF_UNLOCK();
1574#endif
1575 splx(s);
1576
1577 return (len);
1578}
1579
1580int
1581pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1582{
1583#ifdef __FreeBSD__
1584 struct pfsync_softc *sc = V_pfsyncif;
1585#else
1586 struct pfsync_softc *sc = pfsyncif;
1587#endif
1588 struct pfsync_bus *bus;
1589 struct mbuf *mp;
1590 int len = count * sizeof(*bus);
1591 int offp;
1592
1593 /* If we're not waiting for a bulk update, who cares. */
1594 if (sc->sc_ureq_sent == 0)
1595 return (len);
1596
1597 mp = m_pulldown(m, offset, len, &offp);
1598 if (mp == NULL) {
1599 V_pfsyncstats.pfsyncs_badlen++;
1600 return (-1);
1601 }
1602 bus = (struct pfsync_bus *)(mp->m_data + offp);
1603
1604 switch (bus->status) {
1605 case PFSYNC_BUS_START:
1606#ifdef __FreeBSD__
1607 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail,
1608 V_pfsyncif);
1609#else
1610 timeout_add_sec(&sc->sc_bulkfail_tmo, 5); /* XXX magic */
1611#endif
1612#ifdef XXX
1613 pf_pool_limits[PF_LIMIT_STATES].limit /
1614 (PFSYNC_BULKPACKETS * sc->sc_maxcount));
1615#endif
1616#ifdef __FreeBSD__
1617 if (V_pf_status.debug >= PF_DEBUG_MISC)
1618#else
1619 if (pf_status.debug >= PF_DEBUG_MISC)
1620#endif
1621 printf("pfsync: received bulk update start\n");
1622 break;
1623
1624 case PFSYNC_BUS_END:
1625 if (time_uptime - ntohl(bus->endtime) >=
1626 sc->sc_ureq_sent) {
1627 /* that's it, we're happy */
1628 sc->sc_ureq_sent = 0;
1629 sc->sc_bulk_tries = 0;
1630 timeout_del(&sc->sc_bulkfail_tmo);
1631#ifdef __FreeBSD__
1632 if (!sc->pfsync_sync_ok && carp_demote_adj_p)
1633 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
1634 "pfsync bulk done");
1635 sc->pfsync_sync_ok = 1;
1636#else
1637#if NCARP > 0
1638 if (!pfsync_sync_ok)
1639 carp_group_demote_adj(&sc->sc_if, -1);
1640#endif
1641 pfsync_sync_ok = 1;
1642#endif
1643#ifdef __FreeBSD__
1644 if (V_pf_status.debug >= PF_DEBUG_MISC)
1645#else
1646 if (pf_status.debug >= PF_DEBUG_MISC)
1647#endif
1648 printf("pfsync: received valid "
1649 "bulk update end\n");
1650 } else {
1651#ifdef __FreeBSD__
1652 if (V_pf_status.debug >= PF_DEBUG_MISC)
1653#else
1654 if (pf_status.debug >= PF_DEBUG_MISC)
1655#endif
1656 printf("pfsync: received invalid "
1657 "bulk update end: bad timestamp\n");
1658 }
1659 break;
1660 }
1661
1662 return (len);
1663}
1664
1665int
1666pfsync_in_tdb(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1667{
1668 int len = count * sizeof(struct pfsync_tdb);
1669
1670#if defined(IPSEC)
1671 struct pfsync_tdb *tp;
1672 struct mbuf *mp;
1673 int offp;
1674 int i;
1675 int s;
1676
1677 mp = m_pulldown(m, offset, len, &offp);
1678 if (mp == NULL) {
1679 V_pfsyncstats.pfsyncs_badlen++;
1680 return (-1);
1681 }
1682 tp = (struct pfsync_tdb *)(mp->m_data + offp);
1683
1684 s = splsoftnet();
1685#ifdef __FreeBSD__
1686 PF_LOCK();
1687#endif
1688 for (i = 0; i < count; i++)
1689 pfsync_update_net_tdb(&tp[i]);
1690#ifdef __FreeBSD__
1691 PF_UNLOCK();
1692#endif
1693 splx(s);
1694#endif
1695
1696 return (len);
1697}
1698
1699#if defined(IPSEC)
1700/* Update an in-kernel tdb. Silently fail if no tdb is found. */
1701void
1702pfsync_update_net_tdb(struct pfsync_tdb *pt)
1703{
1704 struct tdb *tdb;
1705 int s;
1706
1707 /* check for invalid values */
1708 if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
1709 (pt->dst.sa.sa_family != AF_INET &&
1710 pt->dst.sa.sa_family != AF_INET6))
1711 goto bad;
1712
1713 s = spltdb();
1714 tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
1715 if (tdb) {
1716 pt->rpl = ntohl(pt->rpl);
1717 pt->cur_bytes = betoh64(pt->cur_bytes);
1718
1719 /* Neither replay nor byte counter should ever decrease. */
1720 if (pt->rpl < tdb->tdb_rpl ||
1721 pt->cur_bytes < tdb->tdb_cur_bytes) {
1722 splx(s);
1723 goto bad;
1724 }
1725
1726 tdb->tdb_rpl = pt->rpl;
1727 tdb->tdb_cur_bytes = pt->cur_bytes;
1728 }
1729 splx(s);
1730 return;
1731
1732bad:
1733#ifdef __FreeBSD__
1734 if (V_pf_status.debug >= PF_DEBUG_MISC)
1735#else
1736 if (pf_status.debug >= PF_DEBUG_MISC)
1737#endif
1738 printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
1739 "invalid value\n");
1740 V_pfsyncstats.pfsyncs_badstate++;
1741 return;
1742}
1743#endif
1744
1745
1746int
1747pfsync_in_eof(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1748{
1749 /* check if we are at the right place in the packet */
1750 if (offset != m->m_pkthdr.len - sizeof(struct pfsync_eof))
1751 V_pfsyncstats.pfsyncs_badact++;
1752
1753 /* we're done. free and let the caller return */
1754 m_freem(m);
1755 return (-1);
1756}
1757
1758int
1759pfsync_in_error(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1760{
1761 V_pfsyncstats.pfsyncs_badact++;
1762
1763 m_freem(m);
1764 return (-1);
1765}
1766
1767int
1768pfsyncoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
1769#ifdef __FreeBSD__
1770 struct route *rt)
1771#else
1772 struct rtentry *rt)
1773#endif
1774{
1775 m_freem(m);
1776 return (0);
1777}
1778
1779/* ARGSUSED */
1780int
1781pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1782{
1783#ifndef __FreeBSD__
1784 struct proc *p = curproc;
1785#endif
1786 struct pfsync_softc *sc = ifp->if_softc;
1787 struct ifreq *ifr = (struct ifreq *)data;
1788 struct ip_moptions *imo = &sc->sc_imo;
1789 struct pfsyncreq pfsyncr;
1790 struct ifnet *sifp;
1791 struct ip *ip;
1792 int s, error;
1793
1794 switch (cmd) {
1795#if 0
1796 case SIOCSIFADDR:
1797 case SIOCAIFADDR:
1798 case SIOCSIFDSTADDR:
1799#endif
1800 case SIOCSIFFLAGS:
1801#ifdef __FreeBSD__
1802 if (ifp->if_flags & IFF_UP)
1803 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1804 else
1805 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1806#else
1807 if (ifp->if_flags & IFF_UP)
1808 ifp->if_flags |= IFF_RUNNING;
1809 else
1810 ifp->if_flags &= ~IFF_RUNNING;
1811#endif
1812 break;
1813 case SIOCSIFMTU:
1814 if (ifr->ifr_mtu <= PFSYNC_MINPKT)
1815 return (EINVAL);
1816 if (ifr->ifr_mtu > MCLBYTES) /* XXX could be bigger */
1817 ifr->ifr_mtu = MCLBYTES;
1818 if (ifr->ifr_mtu < ifp->if_mtu) {
1819 s = splnet();
1820#ifdef __FreeBSD__
1821 PF_LOCK();
1822#endif
1823 pfsync_sendout();
1824#ifdef __FreeBSD__
1825 PF_UNLOCK();
1826#endif
1827 splx(s);
1828 }
1829 ifp->if_mtu = ifr->ifr_mtu;
1830 break;
1831 case SIOCGETPFSYNC:
1832 bzero(&pfsyncr, sizeof(pfsyncr));
1833 if (sc->sc_sync_if) {
1834 strlcpy(pfsyncr.pfsyncr_syncdev,
1835 sc->sc_sync_if->if_xname, IFNAMSIZ);
1836 }
1837 pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
1838 pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
1839 return (copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr)));
1840
1841 case SIOCSETPFSYNC:
1842#ifdef __FreeBSD__
1843 if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
1844#else
1845 if ((error = suser(p, p->p_acflag)) != 0)
1846#endif
1847 return (error);
1848 if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr))))
1849 return (error);
1850
1851#ifdef __FreeBSD__
1852 PF_LOCK();
1853#endif
1854 if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
1855#ifdef __FreeBSD__
1856 sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP);
1857#else
1858 sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
1859#endif
1860 else
1861 sc->sc_sync_peer.s_addr =
1862 pfsyncr.pfsyncr_syncpeer.s_addr;
1863
1864 if (pfsyncr.pfsyncr_maxupdates > 255)
1865#ifdef __FreeBSD__
1866 {
1867 PF_UNLOCK();
1868#endif
1869 return (EINVAL);
1870#ifdef __FreeBSD__
1871 }
1872#endif
1873 sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
1874
1875 if (pfsyncr.pfsyncr_syncdev[0] == 0) {
1876 sc->sc_sync_if = NULL;
1877#ifdef __FreeBSD__
1878 PF_UNLOCK();
1879#endif
1880 if (imo->imo_num_memberships > 0) {
1881 in_delmulti(imo->imo_membership[
1882 --imo->imo_num_memberships]);
1883 imo->imo_multicast_ifp = NULL;
1884 }
1885 break;
1886 }
1887
1888#ifdef __FreeBSD__
1889 PF_UNLOCK();
1890#endif
1891 if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL)
1892 return (EINVAL);
1893
1894#ifdef __FreeBSD__
1895 PF_LOCK();
1896#endif
1897 s = splnet();
1898#ifdef __FreeBSD__
1899 if (sifp->if_mtu < sc->sc_ifp->if_mtu ||
1900#else
1901 if (sifp->if_mtu < sc->sc_if.if_mtu ||
1902#endif
1903 (sc->sc_sync_if != NULL &&
1904 sifp->if_mtu < sc->sc_sync_if->if_mtu) ||
1905 sifp->if_mtu < MCLBYTES - sizeof(struct ip))
1906 pfsync_sendout();
1907 sc->sc_sync_if = sifp;
1908
1909 if (imo->imo_num_memberships > 0) {
1910#ifdef __FreeBSD__
1911 PF_UNLOCK();
1912#endif
1913 in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
1914#ifdef __FreeBSD__
1915 PF_LOCK();
1916#endif
1917 imo->imo_multicast_ifp = NULL;
1918 }
1919
1920 if (sc->sc_sync_if &&
1921#ifdef __FreeBSD__
1922 sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
1923#else
1924 sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
1925#endif
1926 struct in_addr addr;
1927
1928 if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) {
1929 sc->sc_sync_if = NULL;
1930#ifdef __FreeBSD__
1931 PF_UNLOCK();
1932#endif
1933 splx(s);
1934 return (EADDRNOTAVAIL);
1935 }
1936
1937#ifdef __FreeBSD__
1938 addr.s_addr = htonl(INADDR_PFSYNC_GROUP);
1939#else
1940 addr.s_addr = INADDR_PFSYNC_GROUP;
1941#endif
1942
1943#ifdef __FreeBSD__
1944 PF_UNLOCK();
1945#endif
1946 if ((imo->imo_membership[0] =
1947 in_addmulti(&addr, sc->sc_sync_if)) == NULL) {
1948 sc->sc_sync_if = NULL;
1949 splx(s);
1950 return (ENOBUFS);
1951 }
1952#ifdef __FreeBSD__
1953 PF_LOCK();
1954#endif
1955 imo->imo_num_memberships++;
1956 imo->imo_multicast_ifp = sc->sc_sync_if;
1957 imo->imo_multicast_ttl = PFSYNC_DFLTTL;
1958 imo->imo_multicast_loop = 0;
1959 }
1960
1961 ip = &sc->sc_template;
1962 bzero(ip, sizeof(*ip));
1963 ip->ip_v = IPVERSION;
1964 ip->ip_hl = sizeof(sc->sc_template) >> 2;
1965 ip->ip_tos = IPTOS_LOWDELAY;
1966 /* len and id are set later */
1967#ifdef __FreeBSD__
1968 ip->ip_off = IP_DF;
1969#else
1970 ip->ip_off = htons(IP_DF);
1971#endif
1972 ip->ip_ttl = PFSYNC_DFLTTL;
1973 ip->ip_p = IPPROTO_PFSYNC;
1974 ip->ip_src.s_addr = INADDR_ANY;
1975 ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr;
1976
1977 if (sc->sc_sync_if) {
1978 /* Request a full state table update. */
1979 sc->sc_ureq_sent = time_uptime;
1980#ifdef __FreeBSD__
1981 if (sc->pfsync_sync_ok && carp_demote_adj_p)
1982 (*carp_demote_adj_p)(V_pfsync_carp_adj,
1983 "pfsync bulk start");
1984 sc->pfsync_sync_ok = 0;
1985#else
1986#if NCARP > 0
1987 if (pfsync_sync_ok)
1988 carp_group_demote_adj(&sc->sc_if, 1);
1989#endif
1990 pfsync_sync_ok = 0;
1991#endif
1992#ifdef __FreeBSD__
1993 if (V_pf_status.debug >= PF_DEBUG_MISC)
1994#else
1995 if (pf_status.debug >= PF_DEBUG_MISC)
1996#endif
1997 printf("pfsync: requesting bulk update\n");
1998#ifdef __FreeBSD__
1999 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
2000 pfsync_bulk_fail, V_pfsyncif);
2001#else
2002 timeout_add_sec(&sc->sc_bulkfail_tmo, 5);
2003#endif
2004 pfsync_request_update(0, 0);
2005 }
2006#ifdef __FreeBSD__
2007 PF_UNLOCK();
2008#endif
2009 splx(s);
2010
2011 break;
2012
2013 default:
2014 return (ENOTTY);
2015 }
2016
2017 return (0);
2018}
2019
2020int
2021pfsync_out_state(struct pf_state *st, struct mbuf *m, int offset)
2022{
2023 struct pfsync_state *sp = (struct pfsync_state *)(m->m_data + offset);
2024
2025 pfsync_state_export(sp, st);
2026
2027 return (sizeof(*sp));
2028}
2029
2030int
2031pfsync_out_iack(struct pf_state *st, struct mbuf *m, int offset)
2032{
2033 struct pfsync_ins_ack *iack =
2034 (struct pfsync_ins_ack *)(m->m_data + offset);
2035
2036 iack->id = st->id;
2037 iack->creatorid = st->creatorid;
2038
2039 return (sizeof(*iack));
2040}
2041
2042int
2043pfsync_out_upd_c(struct pf_state *st, struct mbuf *m, int offset)
2044{
2045 struct pfsync_upd_c *up = (struct pfsync_upd_c *)(m->m_data + offset);
2046
2047 up->id = st->id;
2048 pf_state_peer_hton(&st->src, &up->src);
2049 pf_state_peer_hton(&st->dst, &up->dst);
2050 up->creatorid = st->creatorid;
2051
2052 up->expire = pf_state_expires(st);
2053 if (up->expire <= time_second)
2054 up->expire = htonl(0);
2055 else
2056 up->expire = htonl(up->expire - time_second);
2057 up->timeout = st->timeout;
2058
2059 bzero(up->_pad, sizeof(up->_pad)); /* XXX */
2060
2061 return (sizeof(*up));
2062}
2063
2064int
2065pfsync_out_del(struct pf_state *st, struct mbuf *m, int offset)
2066{
2067 struct pfsync_del_c *dp = (struct pfsync_del_c *)(m->m_data + offset);
2068
2069 dp->id = st->id;
2070 dp->creatorid = st->creatorid;
2071
2072 SET(st->state_flags, PFSTATE_NOSYNC);
2073
2074 return (sizeof(*dp));
2075}
2076
2077void
2078pfsync_drop(struct pfsync_softc *sc)
2079{
2080 struct pf_state *st;
2081 struct pfsync_upd_req_item *ur;
2082#ifdef notyet
2083 struct tdb *t;
2084#endif
2085 int q;
2086
2087 for (q = 0; q < PFSYNC_S_COUNT; q++) {
2088 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2089 continue;
2090
2091 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) {
2092#ifdef PFSYNC_DEBUG
2093#ifdef __FreeBSD__
2094 KASSERT(st->sync_state == q,
2095 ("%s: st->sync_state == q",
2096 __FUNCTION__));
2097#else
2098 KASSERT(st->sync_state == q);
2099#endif
2100#endif
2101 st->sync_state = PFSYNC_S_NONE;
2102 }
2103 TAILQ_INIT(&sc->sc_qs[q]);
2104 }
2105
2106 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
2107 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
2108 pool_put(&sc->sc_pool, ur);
2109 }
2110
2111 sc->sc_plus = NULL;
2112
2113#ifdef notyet
2114 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) {
2115 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry)
2116 CLR(t->tdb_flags, TDBF_PFSYNC);
2117
2118 TAILQ_INIT(&sc->sc_tdb_q);
2119 }
2120#endif
2121
2122 sc->sc_len = PFSYNC_MINPKT;
2123}
2124
2125void
2126pfsync_sendout(void)
2127{
2128#ifdef __FreeBSD__
2129 struct pfsync_softc *sc = V_pfsyncif;
2130#else
2131 struct pfsync_softc *sc = pfsyncif;
2132#endif
2133#if NBPFILTER > 0
2134#ifdef __FreeBSD__
2135 struct ifnet *ifp = sc->sc_ifp;
2136#else
2137 struct ifnet *ifp = &sc->sc_if;
2138#endif
2139#endif
2140 struct mbuf *m;
2141 struct ip *ip;
2142 struct pfsync_header *ph;
2143 struct pfsync_subheader *subh;
2144 struct pf_state *st;
2145 struct pfsync_upd_req_item *ur;
2146#ifdef notyet
2147 struct tdb *t;
2148#endif
2149#ifdef __FreeBSD__
2150 size_t pktlen;
2151 int dummy_error;
2152#endif
2153 int offset;
2154 int q, count = 0;
2155
2156#ifdef __FreeBSD__
2157 PF_LOCK_ASSERT();
2158#else
2159 splassert(IPL_NET);
2160#endif
2161
2162 if (sc == NULL || sc->sc_len == PFSYNC_MINPKT)
2163 return;
2164
2165#if NBPFILTER > 0
2166 if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) {
2167#else
2168 if (sc->sc_sync_if == NULL) {
2169#endif
2170 pfsync_drop(sc);
2171 return;
2172 }
2173
2174 MGETHDR(m, M_DONTWAIT, MT_DATA);
2175 if (m == NULL) {
2176#ifdef __FreeBSD__
2177 sc->sc_ifp->if_oerrors++;
2178#else
2179 sc->sc_if.if_oerrors++;
2180#endif
2181 V_pfsyncstats.pfsyncs_onomem++;
2182 pfsync_drop(sc);
2183 return;
2184 }
2185
2186#ifdef __FreeBSD__
2187 pktlen = max_linkhdr + sc->sc_len;
2188 if (pktlen > MHLEN) {
2189 /* Find the right pool to allocate from. */
2190 /* XXX: This is ugly. */
2191 m_cljget(m, M_DONTWAIT, pktlen <= MSIZE ? MSIZE :
2192 pktlen <= MCLBYTES ? MCLBYTES :
2193#if MJUMPAGESIZE != MCLBYTES
2194 pktlen <= MJUMPAGESIZE ? MJUMPAGESIZE :
2195#endif
2196 pktlen <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES);
2197#else
2198 if (max_linkhdr + sc->sc_len > MHLEN) {
2199 MCLGETI(m, M_DONTWAIT, NULL, max_linkhdr + sc->sc_len);
2200#endif
2201 if (!ISSET(m->m_flags, M_EXT)) {
2202 m_free(m);
2203#ifdef __FreeBSD__
2204 sc->sc_ifp->if_oerrors++;
2205#else
2206 sc->sc_if.if_oerrors++;
2207#endif
2208 V_pfsyncstats.pfsyncs_onomem++;
2209 pfsync_drop(sc);
2210 return;
2211 }
2212 }
2213 m->m_data += max_linkhdr;
2214 m->m_len = m->m_pkthdr.len = sc->sc_len;
2215
2216 /* build the ip header */
2217 ip = (struct ip *)m->m_data;
2218 bcopy(&sc->sc_template, ip, sizeof(*ip));
2219 offset = sizeof(*ip);
2220
2221#ifdef __FreeBSD__
2222 ip->ip_len = m->m_pkthdr.len;
2223#else
2224 ip->ip_len = htons(m->m_pkthdr.len);
2225#endif
2226 ip->ip_id = htons(ip_randomid());
2227
2228 /* build the pfsync header */
2229 ph = (struct pfsync_header *)(m->m_data + offset);
2230 bzero(ph, sizeof(*ph));
2231 offset += sizeof(*ph);
2232
2233 ph->version = PFSYNC_VERSION;
2234 ph->len = htons(sc->sc_len - sizeof(*ip));
2235#ifdef __FreeBSD__
2236 bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
2237#else
2238 bcopy(pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
2239#endif
2240
2241 /* walk the queues */
2242 for (q = 0; q < PFSYNC_S_COUNT; q++) {
2243 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2244 continue;
2245
2246 subh = (struct pfsync_subheader *)(m->m_data + offset);
2247 offset += sizeof(*subh);
2248
2249 count = 0;
2250 TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) {
2251#ifdef PFSYNC_DEBUG
2252#ifdef __FreeBSD__
2253 KASSERT(st->sync_state == q,
2254 ("%s: st->sync_state == q",
2255 __FUNCTION__));
2256#else
2257 KASSERT(st->sync_state == q);
2258#endif
2259#endif
2260
2261 offset += pfsync_qs[q].write(st, m, offset);
2262 st->sync_state = PFSYNC_S_NONE;
2263 count++;
2264 }
2265 TAILQ_INIT(&sc->sc_qs[q]);
2266
2267 bzero(subh, sizeof(*subh));
2268 subh->action = pfsync_qs[q].action;
2269 subh->count = htons(count);
2270 }
2271
2272 if (!TAILQ_EMPTY(&sc->sc_upd_req_list)) {
2273 subh = (struct pfsync_subheader *)(m->m_data + offset);
2274 offset += sizeof(*subh);
2275
2276 count = 0;
2277 while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
2278 TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
2279
2280 bcopy(&ur->ur_msg, m->m_data + offset,
2281 sizeof(ur->ur_msg));
2282 offset += sizeof(ur->ur_msg);
2283
2284 pool_put(&sc->sc_pool, ur);
2285
2286 count++;
2287 }
2288
2289 bzero(subh, sizeof(*subh));
2290 subh->action = PFSYNC_ACT_UPD_REQ;
2291 subh->count = htons(count);
2292 }
2293
2294 /* has someone built a custom region for us to add? */
2295 if (sc->sc_plus != NULL) {
2296 bcopy(sc->sc_plus, m->m_data + offset, sc->sc_pluslen);
2297 offset += sc->sc_pluslen;
2298
2299 sc->sc_plus = NULL;
2300 }
2301
2302#ifdef notyet
2303 if (!TAILQ_EMPTY(&sc->sc_tdb_q)) {
2304 subh = (struct pfsync_subheader *)(m->m_data + offset);
2305 offset += sizeof(*subh);
2306
2307 count = 0;
2308 TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry) {
2309 offset += pfsync_out_tdb(t, m, offset);
2310 CLR(t->tdb_flags, TDBF_PFSYNC);
2311
2312 count++;
2313 }
2314 TAILQ_INIT(&sc->sc_tdb_q);
2315
2316 bzero(subh, sizeof(*subh));
2317 subh->action = PFSYNC_ACT_TDB;
2318 subh->count = htons(count);
2319 }
2320#endif
2321
2322 subh = (struct pfsync_subheader *)(m->m_data + offset);
2323 offset += sizeof(*subh);
2324
2325 bzero(subh, sizeof(*subh));
2326 subh->action = PFSYNC_ACT_EOF;
2327 subh->count = htons(1);
2328
2329 /* XXX write checksum in EOF here */
2330
2331 /* we're done, let's put it on the wire */
2332#if NBPFILTER > 0
2333 if (ifp->if_bpf) {
2334 m->m_data += sizeof(*ip);
2335 m->m_len = m->m_pkthdr.len = sc->sc_len - sizeof(*ip);
2336#ifdef __FreeBSD__
2337 BPF_MTAP(ifp, m);
2338#else
2339 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2340#endif
2341 m->m_data -= sizeof(*ip);
2342 m->m_len = m->m_pkthdr.len = sc->sc_len;
2343 }
2344
2345 if (sc->sc_sync_if == NULL) {
2346 sc->sc_len = PFSYNC_MINPKT;
2347 m_freem(m);
2348 return;
2349 }
2350#endif
2351
2352#ifdef __FreeBSD__
2353 sc->sc_ifp->if_opackets++;
2354 sc->sc_ifp->if_obytes += m->m_pkthdr.len;
2355 sc->sc_len = PFSYNC_MINPKT;
2356
2357 IFQ_ENQUEUE(&sc->sc_ifp->if_snd, m, dummy_error);
2358 schednetisr(NETISR_PFSYNC);
2359#else
2360 sc->sc_if.if_opackets++;
2361 sc->sc_if.if_obytes += m->m_pkthdr.len;
2362
2363 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL) == 0)
2364 pfsyncstats.pfsyncs_opackets++;
2365 else
2366 pfsyncstats.pfsyncs_oerrors++;
2367
2368 /* start again */
2369 sc->sc_len = PFSYNC_MINPKT;
2370#endif
2371}
2372
2373void
2374pfsync_insert_state(struct pf_state *st)
2375{
2376#ifdef __FreeBSD__
2377 struct pfsync_softc *sc = V_pfsyncif;
2378#else
2379 struct pfsync_softc *sc = pfsyncif;
2380#endif
2381
2382#ifdef __FreeBSD__
2383 PF_LOCK_ASSERT();
2384#else
2385 splassert(IPL_SOFTNET);
2386#endif
2387
2388 if (ISSET(st->rule.ptr->rule_flag, PFRULE_NOSYNC) ||
2389 st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) {
2390 SET(st->state_flags, PFSTATE_NOSYNC);
2391 return;
2392 }
2393
2394 if (sc == NULL || ISSET(st->state_flags, PFSTATE_NOSYNC))
2395 return;
2396
2397#ifdef PFSYNC_DEBUG
2398#ifdef __FreeBSD__
2399 KASSERT(st->sync_state == PFSYNC_S_NONE,
2400 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__));
2401#else
2402 KASSERT(st->sync_state == PFSYNC_S_NONE);
2403#endif
2404#endif
2405
2406 if (sc->sc_len == PFSYNC_MINPKT)
2407#ifdef __FreeBSD__
2408 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2409 V_pfsyncif);
2410#else
2411 timeout_add_sec(&sc->sc_tmo, 1);
2412#endif
2413
2414 pfsync_q_ins(st, PFSYNC_S_INS);
2415
2416 if (ISSET(st->state_flags, PFSTATE_ACK))
2417#ifdef __FreeBSD__
2418 pfsync_sendout();
2419#else
2420 schednetisr(NETISR_PFSYNC);
2421#endif
2422 else
2423 st->sync_updates = 0;
2424}
2425
2426int defer = 10;
2427
2428int
2429pfsync_defer(struct pf_state *st, struct mbuf *m)
2430{
2431#ifdef __FreeBSD__
2432 struct pfsync_softc *sc = V_pfsyncif;
2433#else
2434 struct pfsync_softc *sc = pfsyncif;
2435#endif
2436 struct pfsync_deferral *pd;
2437
2438#ifdef __FreeBSD__
2439 PF_LOCK_ASSERT();
2440#else
2441 splassert(IPL_SOFTNET);
2442#endif
2443
2444 if (sc->sc_deferred >= 128)
2445 pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
2446
2447 pd = pool_get(&sc->sc_pool, M_NOWAIT);
2448 if (pd == NULL)
2449 return (0);
2450 sc->sc_deferred++;
2451
2452#ifdef __FreeBSD__
2453 m->m_flags |= M_SKIP_FIREWALL;
2454#else
2455 m->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
2456#endif
2457 SET(st->state_flags, PFSTATE_ACK);
2458
2459 pd->pd_st = st;
2460 pd->pd_m = m;
2461
2462 TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry);
2463#ifdef __FreeBSD__
2464 callout_init(&pd->pd_tmo, CALLOUT_MPSAFE);
2465 callout_reset(&pd->pd_tmo, defer, pfsync_defer_tmo,
2466 pd);
2467#else
2468 timeout_set(&pd->pd_tmo, pfsync_defer_tmo, pd);
2469 timeout_add(&pd->pd_tmo, defer);
2470#endif
2471
2472 return (1);
2473}
2474
2475void
2476pfsync_undefer(struct pfsync_deferral *pd, int drop)
2477{
2478#ifdef __FreeBSD__
2479 struct pfsync_softc *sc = V_pfsyncif;
2480#else
2481 struct pfsync_softc *sc = pfsyncif;
2482#endif
2483 int s;
2484
2485#ifdef __FreeBSD__
2486 PF_LOCK_ASSERT();
2487#else
2488 splassert(IPL_SOFTNET);
2489#endif
2490
2491 TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry);
2492 sc->sc_deferred--;
2493
2494 CLR(pd->pd_st->state_flags, PFSTATE_ACK);
2495 timeout_del(&pd->pd_tmo); /* bah */
2496 if (drop)
2497 m_freem(pd->pd_m);
2498 else {
2499 s = splnet();
2500#ifdef __FreeBSD__
2501 /* XXX: use pf_defered?! */
2502 PF_UNLOCK();
2503#endif
2504 ip_output(pd->pd_m, (void *)NULL, (void *)NULL, 0,
2505 (void *)NULL, (void *)NULL);
2506#ifdef __FreeBSD__
2507 PF_LOCK();
2508#endif
2509 splx(s);
2510 }
2511
2512 pool_put(&sc->sc_pool, pd);
2513}
2514
2515void
2516pfsync_defer_tmo(void *arg)
2517{
2518#if defined(__FreeBSD__) && defined(VIMAGE)
2519 struct pfsync_deferral *pd = arg;
2520#endif
2521 int s;
2522
2523 s = splsoftnet();
2524#ifdef __FreeBSD__
2525 CURVNET_SET(pd->pd_m->m_pkthdr.rcvif->if_vnet); /* XXX */
2526 PF_LOCK();
2527#endif
2528 pfsync_undefer(arg, 0);
2529#ifdef __FreeBSD__
2530 PF_UNLOCK();
2531 CURVNET_RESTORE();
2532#endif
2533 splx(s);
2534}
2535
2536void
2537pfsync_deferred(struct pf_state *st, int drop)
2538{
2539#ifdef __FreeBSD__
2540 struct pfsync_softc *sc = V_pfsyncif;
2541#else
2542 struct pfsync_softc *sc = pfsyncif;
2543#endif
2544 struct pfsync_deferral *pd;
2545
2546 TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) {
2547 if (pd->pd_st == st) {
2548 pfsync_undefer(pd, drop);
2549 return;
2550 }
2551 }
2552
2553 panic("pfsync_send_deferred: unable to find deferred state");
2554}
2555
2556u_int pfsync_upds = 0;
2557
2558void
2559pfsync_update_state(struct pf_state *st)
2560{
2561#ifdef __FreeBSD__
2562 struct pfsync_softc *sc = V_pfsyncif;
2563#else
2564 struct pfsync_softc *sc = pfsyncif;
2565#endif
2566 int sync = 0;
2567
2568#ifdef __FreeBSD__
2569 PF_LOCK_ASSERT();
2570#else
2571 splassert(IPL_SOFTNET);
2572#endif
2573
2574 if (sc == NULL)
2575 return;
2576
2577 if (ISSET(st->state_flags, PFSTATE_ACK))
2578 pfsync_deferred(st, 0);
2579 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2580 if (st->sync_state != PFSYNC_S_NONE)
2581 pfsync_q_del(st);
2582 return;
2583 }
2584
2585 if (sc->sc_len == PFSYNC_MINPKT)
2586#ifdef __FreeBSD__
2587 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2588 V_pfsyncif);
2589#else
2590 timeout_add_sec(&sc->sc_tmo, 1);
2591#endif
2592
2593 switch (st->sync_state) {
2594 case PFSYNC_S_UPD_C:
2595 case PFSYNC_S_UPD:
2596 case PFSYNC_S_INS:
2597 /* we're already handling it */
2598
2599 st->sync_updates++;
2600 if (st->sync_updates >= sc->sc_maxupdates)
2601 sync = 1;
2602 break;
2603
2604 case PFSYNC_S_IACK:
2605 pfsync_q_del(st);
2606 case PFSYNC_S_NONE:
2607 pfsync_q_ins(st, PFSYNC_S_UPD_C);
2608 st->sync_updates = 0;
2609 break;
2610
2611 default:
2612 panic("pfsync_update_state: unexpected sync state %d",
2613 st->sync_state);
2614 }
2615
2616 if (sync || (time_second - st->pfsync_time) < 2) {
2617 pfsync_upds++;
2618#ifdef __FreeBSD__
2619 pfsync_sendout();
2620#else
2621 schednetisr(NETISR_PFSYNC);
2622#endif
2623 }
2624}
2625
2626void
2627pfsync_request_update(u_int32_t creatorid, u_int64_t id)
2628{
2629#ifdef __FreeBSD__
2630 struct pfsync_softc *sc = V_pfsyncif;
2631#else
2632 struct pfsync_softc *sc = pfsyncif;
2633#endif
2634 struct pfsync_upd_req_item *item;
2635 size_t nlen = sizeof(struct pfsync_upd_req);
2636 int s;
2637
2638 PF_LOCK_ASSERT();
2639
2640 /*
2641 * this code does nothing to prevent multiple update requests for the
2642 * same state being generated.
2643 */
2644
2645 item = pool_get(&sc->sc_pool, PR_NOWAIT);
2646 if (item == NULL) {
2647 /* XXX stats */
2648 return;
2649 }
2650
2651 item->ur_msg.id = id;
2652 item->ur_msg.creatorid = creatorid;
2653
2654 if (TAILQ_EMPTY(&sc->sc_upd_req_list))
2655 nlen += sizeof(struct pfsync_subheader);
2656
2657#ifdef __FreeBSD__
2658 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
2659#else
2660 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2661#endif
2662 s = splnet();
2663 pfsync_sendout();
2664 splx(s);
2665
2666 nlen = sizeof(struct pfsync_subheader) +
2667 sizeof(struct pfsync_upd_req);
2668 }
2669
2670 TAILQ_INSERT_TAIL(&sc->sc_upd_req_list, item, ur_entry);
2671 sc->sc_len += nlen;
2672
2673#ifdef __FreeBSD__
2674 pfsync_sendout();
2675#else
2676 schednetisr(NETISR_PFSYNC);
2677#endif
2678}
2679
2680void
2681pfsync_update_state_req(struct pf_state *st)
2682{
2683#ifdef __FreeBSD__
2684 struct pfsync_softc *sc = V_pfsyncif;
2685#else
2686 struct pfsync_softc *sc = pfsyncif;
2687#endif
2688
2689 PF_LOCK_ASSERT();
2690
2691 if (sc == NULL)
2692 panic("pfsync_update_state_req: nonexistant instance");
2693
2694 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2695 if (st->sync_state != PFSYNC_S_NONE)
2696 pfsync_q_del(st);
2697 return;
2698 }
2699
2700 switch (st->sync_state) {
2701 case PFSYNC_S_UPD_C:
2702 case PFSYNC_S_IACK:
2703 pfsync_q_del(st);
2704 case PFSYNC_S_NONE:
2705 pfsync_q_ins(st, PFSYNC_S_UPD);
2706#ifdef __FreeBSD__
2707 pfsync_sendout();
2708#else
2709 schednetisr(NETISR_PFSYNC);
2710#endif
2711 return;
2712
2713 case PFSYNC_S_INS:
2714 case PFSYNC_S_UPD:
2715 case PFSYNC_S_DEL:
2716 /* we're already handling it */
2717 return;
2718
2719 default:
2720 panic("pfsync_update_state_req: unexpected sync state %d",
2721 st->sync_state);
2722 }
2723}
2724
2725void
2726pfsync_delete_state(struct pf_state *st)
2727{
2728#ifdef __FreeBSD__
2729 struct pfsync_softc *sc = V_pfsyncif;
2730#else
2731 struct pfsync_softc *sc = pfsyncif;
2732#endif
2733
2734#ifdef __FreeBSD__
2735 PF_LOCK_ASSERT();
2736#else
2737 splassert(IPL_SOFTNET);
2738#endif
2739
2740 if (sc == NULL)
2741 return;
2742
2743 if (ISSET(st->state_flags, PFSTATE_ACK))
2744 pfsync_deferred(st, 1);
2745 if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2746 if (st->sync_state != PFSYNC_S_NONE)
2747 pfsync_q_del(st);
2748 return;
2749 }
2750
2751 if (sc->sc_len == PFSYNC_MINPKT)
2752#ifdef __FreeBSD__
2753 callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2754 V_pfsyncif);
2755#else
2756 timeout_add_sec(&sc->sc_tmo, 1);
2757#endif
2758
2759 switch (st->sync_state) {
2760 case PFSYNC_S_INS:
2761 /* we never got to tell the world so just forget about it */
2762 pfsync_q_del(st);
2763 return;
2764
2765 case PFSYNC_S_UPD_C:
2766 case PFSYNC_S_UPD:
2767 case PFSYNC_S_IACK:
2768 pfsync_q_del(st);
2769 /* FALLTHROUGH to putting it on the del list */
2770
2771 case PFSYNC_S_NONE:
2772 pfsync_q_ins(st, PFSYNC_S_DEL);
2773 return;
2774
2775 default:
2776 panic("pfsync_delete_state: unexpected sync state %d",
2777 st->sync_state);
2778 }
2779}
2780
2781void
2782pfsync_clear_states(u_int32_t creatorid, const char *ifname)
2783{
2784 struct {
2785 struct pfsync_subheader subh;
2786 struct pfsync_clr clr;
2787 } __packed r;
2788
2789#ifdef __FreeBSD__
2790 struct pfsync_softc *sc = V_pfsyncif;
2791#else
2792 struct pfsync_softc *sc = pfsyncif;
2793#endif
2794
2795#ifdef __FreeBSD__
2796 PF_LOCK_ASSERT();
2797#else
2798 splassert(IPL_SOFTNET);
2799#endif
2800
2801 if (sc == NULL)
2802 return;
2803
2804 bzero(&r, sizeof(r));
2805
2806 r.subh.action = PFSYNC_ACT_CLR;
2807 r.subh.count = htons(1);
2808
2809 strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname));
2810 r.clr.creatorid = creatorid;
2811
2812 pfsync_send_plus(&r, sizeof(r));
2813}
2814
2815void
2816pfsync_q_ins(struct pf_state *st, int q)
2817{
2818#ifdef __FreeBSD__
2819 struct pfsync_softc *sc = V_pfsyncif;
2820#else
2821 struct pfsync_softc *sc = pfsyncif;
2822#endif
2823 size_t nlen = pfsync_qs[q].len;
2824 int s;
2825
2826 PF_LOCK_ASSERT();
2827
2828#ifdef __FreeBSD__
2829 KASSERT(st->sync_state == PFSYNC_S_NONE,
2830 ("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__));
2831#else
2832 KASSERT(st->sync_state == PFSYNC_S_NONE);
2833#endif
2834
2835#if 1 || defined(PFSYNC_DEBUG)
2836 if (sc->sc_len < PFSYNC_MINPKT)
2837#ifdef __FreeBSD__
2838 panic("pfsync pkt len is too low %zu", sc->sc_len);
2839#else
2840 panic("pfsync pkt len is too low %d", sc->sc_len);
2841#endif
2842#endif
2843 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2844 nlen += sizeof(struct pfsync_subheader);
2845
2846#ifdef __FreeBSD__
2847 if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
2848#else
2849 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2850#endif
2851 s = splnet();
2852 pfsync_sendout();
2853 splx(s);
2854
2855 nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len;
2856 }
2857
2858 sc->sc_len += nlen;
2859 TAILQ_INSERT_TAIL(&sc->sc_qs[q], st, sync_list);
2860 st->sync_state = q;
2861}
2862
2863void
2864pfsync_q_del(struct pf_state *st)
2865{
2866#ifdef __FreeBSD__
2867 struct pfsync_softc *sc = V_pfsyncif;
2868#else
2869 struct pfsync_softc *sc = pfsyncif;
2870#endif
2871 int q = st->sync_state;
2872
2873#ifdef __FreeBSD__
2874 KASSERT(st->sync_state != PFSYNC_S_NONE,
2875 ("%s: st->sync_state != PFSYNC_S_NONE", __FUNCTION__));
2876#else
2877 KASSERT(st->sync_state != PFSYNC_S_NONE);
2878#endif
2879
2880 sc->sc_len -= pfsync_qs[q].len;
2881 TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list);
2882 st->sync_state = PFSYNC_S_NONE;
2883
2884 if (TAILQ_EMPTY(&sc->sc_qs[q]))
2885 sc->sc_len -= sizeof(struct pfsync_subheader);
2886}
2887
2888#ifdef notyet
2889void
2890pfsync_update_tdb(struct tdb *t, int output)
2891{
2892#ifdef __FreeBSD__
2893 struct pfsync_softc *sc = V_pfsyncif;
2894#else
2895 struct pfsync_softc *sc = pfsyncif;
2896#endif
2897 size_t nlen = sizeof(struct pfsync_tdb);
2898 int s;
2899
2900 if (sc == NULL)
2901 return;
2902
2903 if (!ISSET(t->tdb_flags, TDBF_PFSYNC)) {
2904 if (TAILQ_EMPTY(&sc->sc_tdb_q))
2905 nlen += sizeof(struct pfsync_subheader);
2906
2907 if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2908 s = splnet();
2909 PF_LOCK();
2910 pfsync_sendout();
2911 PF_UNLOCK();
2912 splx(s);
2913
2914 nlen = sizeof(struct pfsync_subheader) +
2915 sizeof(struct pfsync_tdb);
2916 }
2917
2918 sc->sc_len += nlen;
2919 TAILQ_INSERT_TAIL(&sc->sc_tdb_q, t, tdb_sync_entry);
2920 SET(t->tdb_flags, TDBF_PFSYNC);
2921 t->tdb_updates = 0;
2922 } else {
2923 if (++t->tdb_updates >= sc->sc_maxupdates)
2924 schednetisr(NETISR_PFSYNC);
2925 }
2926
2927 if (output)
2928 SET(t->tdb_flags, TDBF_PFSYNC_RPL);
2929 else
2930 CLR(t->tdb_flags, TDBF_PFSYNC_RPL);
2931}
2932
2933void
2934pfsync_delete_tdb(struct tdb *t)
2935{
2936#ifdef __FreeBSD__
2937 struct pfsync_softc *sc = V_pfsyncif;
2938#else
2939 struct pfsync_softc *sc = pfsyncif;
2940#endif
2941
2942 if (sc == NULL || !ISSET(t->tdb_flags, TDBF_PFSYNC))
2943 return;
2944
2945 sc->sc_len -= sizeof(struct pfsync_tdb);
2946 TAILQ_REMOVE(&sc->sc_tdb_q, t, tdb_sync_entry);
2947 CLR(t->tdb_flags, TDBF_PFSYNC);
2948
2949 if (TAILQ_EMPTY(&sc->sc_tdb_q))
2950 sc->sc_len -= sizeof(struct pfsync_subheader);
2951}
2952
2953int
2954pfsync_out_tdb(struct tdb *t, struct mbuf *m, int offset)
2955{
2956 struct pfsync_tdb *ut = (struct pfsync_tdb *)(m->m_data + offset);
2957
2958 bzero(ut, sizeof(*ut));
2959 ut->spi = t->tdb_spi;
2960 bcopy(&t->tdb_dst, &ut->dst, sizeof(ut->dst));
2961 /*
2962 * When a failover happens, the master's rpl is probably above
2963 * what we see here (we may be up to a second late), so
2964 * increase it a bit for outbound tdbs to manage most such
2965 * situations.
2966 *
2967 * For now, just add an offset that is likely to be larger
2968 * than the number of packets we can see in one second. The RFC
2969 * just says the next packet must have a higher seq value.
2970 *
2971 * XXX What is a good algorithm for this? We could use
2972 * a rate-determined increase, but to know it, we would have
2973 * to extend struct tdb.
2974 * XXX pt->rpl can wrap over MAXINT, but if so the real tdb
2975 * will soon be replaced anyway. For now, just don't handle
2976 * this edge case.
2977 */
2978#define RPL_INCR 16384
2979 ut->rpl = htonl(t->tdb_rpl + (ISSET(t->tdb_flags, TDBF_PFSYNC_RPL) ?
2980 RPL_INCR : 0));
2981 ut->cur_bytes = htobe64(t->tdb_cur_bytes);
2982 ut->sproto = t->tdb_sproto;
2983
2984 return (sizeof(*ut));
2985}
2986#endif
2987
2988void
2989pfsync_bulk_start(void)
2990{
2991#ifdef __FreeBSD__
2992 struct pfsync_softc *sc = V_pfsyncif;
2993#else
2994 struct pfsync_softc *sc = pfsyncif;
2995#endif
2996
2997#ifdef __FreeBSD__
2998 if (V_pf_status.debug >= PF_DEBUG_MISC)
2999#else
3000 if (pf_status.debug >= PF_DEBUG_MISC)
3001#endif
3002 printf("pfsync: received bulk update request\n");
3003
3004#ifdef __FreeBSD__
3005 PF_LOCK();
3006 if (TAILQ_EMPTY(&V_state_list))
3007#else
3008 if (TAILQ_EMPTY(&state_list))
3009#endif
3010 pfsync_bulk_status(PFSYNC_BUS_END);
3011 else {
3012 sc->sc_ureq_received = time_uptime;
3013 if (sc->sc_bulk_next == NULL)
3014#ifdef __FreeBSD__
3015 sc->sc_bulk_next = TAILQ_FIRST(&V_state_list);
3016#else
3017 sc->sc_bulk_next = TAILQ_FIRST(&state_list);
3018#endif
3019 sc->sc_bulk_last = sc->sc_bulk_next;
3020
3021 pfsync_bulk_status(PFSYNC_BUS_START);
3022 callout_reset(&sc->sc_bulk_tmo, 1,
3023 pfsync_bulk_update, sc);
3024 }
3025#ifdef __FreeBSD__
3026 PF_UNLOCK();
3027#endif
3028}
3029
3030void
3031pfsync_bulk_update(void *arg)
3032{
3033 struct pfsync_softc *sc = arg;
3034 struct pf_state *st = sc->sc_bulk_next;
3035 int i = 0;
3036 int s;
3037
3038 PF_LOCK_ASSERT();
3039
3040 s = splsoftnet();
3041#ifdef __FreeBSD__
3042 CURVNET_SET(sc->sc_ifp->if_vnet);
3043#endif
3044 for (;;) {
3045 if (st->sync_state == PFSYNC_S_NONE &&
3046 st->timeout < PFTM_MAX &&
3047 st->pfsync_time <= sc->sc_ureq_received) {
3048 pfsync_update_state_req(st);
3049 i++;
3050 }
3051
3052 st = TAILQ_NEXT(st, entry_list);
3053 if (st == NULL)
3054#ifdef __FreeBSD__
3055 st = TAILQ_FIRST(&V_state_list);
3056#else
3057 st = TAILQ_FIRST(&state_list);
3058#endif
3059
3060 if (st == sc->sc_bulk_last) {
3061 /* we're done */
3062 sc->sc_bulk_next = NULL;
3063 sc->sc_bulk_last = NULL;
3064 pfsync_bulk_status(PFSYNC_BUS_END);
3065 break;
3066 }
3067
3068#ifdef __FreeBSD__
3069 if (i > 1 && (sc->sc_ifp->if_mtu - sc->sc_len) <
3070#else
3071 if (i > 1 && (sc->sc_if.if_mtu - sc->sc_len) <
3072#endif
3073 sizeof(struct pfsync_state)) {
3074 /* we've filled a packet */
3075 sc->sc_bulk_next = st;
3076#ifdef __FreeBSD__
3077 callout_reset(&sc->sc_bulk_tmo, 1,
3078 pfsync_bulk_update, sc);
3079#else
3080 timeout_add(&sc->sc_bulk_tmo, 1);
3081#endif
3082 break;
3083 }
3084 }
3085
3086#ifdef __FreeBSD__
3087 CURVNET_RESTORE();
3088#endif
3089 splx(s);
3090}
3091
3092void
3093pfsync_bulk_status(u_int8_t status)
3094{
3095 struct {
3096 struct pfsync_subheader subh;
3097 struct pfsync_bus bus;
3098 } __packed r;
3099
3100#ifdef __FreeBSD__
3101 struct pfsync_softc *sc = V_pfsyncif;
3102#else
3103 struct pfsync_softc *sc = pfsyncif;
3104#endif
3105
3106 PF_LOCK_ASSERT();
3107
3108 bzero(&r, sizeof(r));
3109
3110 r.subh.action = PFSYNC_ACT_BUS;
3111 r.subh.count = htons(1);
3112
3113#ifdef __FreeBSD__
3114 r.bus.creatorid = V_pf_status.hostid;
3115#else
3116 r.bus.creatorid = pf_status.hostid;
3117#endif
3118 r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received);
3119 r.bus.status = status;
3120
3121 pfsync_send_plus(&r, sizeof(r));
3122}
3123
3124void
3125pfsync_bulk_fail(void *arg)
3126{
3127 struct pfsync_softc *sc = arg;
3128
3129#ifdef __FreeBSD__
3130 CURVNET_SET(sc->sc_ifp->if_vnet);
3131#endif
3132
3133 if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
3134 /* Try again */
3135#ifdef __FreeBSD__
3136 callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
3137 pfsync_bulk_fail, V_pfsyncif);
3138#else
3139 timeout_add_sec(&sc->sc_bulkfail_tmo, 5);
3140#endif
3141 PF_LOCK();
3142 pfsync_request_update(0, 0);
3143 PF_UNLOCK();
3144 } else {
3145 /* Pretend like the transfer was ok */
3146 sc->sc_ureq_sent = 0;
3147 sc->sc_bulk_tries = 0;
3148#ifdef __FreeBSD__
3149 if (!sc->pfsync_sync_ok && carp_demote_adj_p)
3150 (*carp_demote_adj_p)(-V_pfsync_carp_adj,
3151 "pfsync bulk fail");
3152 sc->pfsync_sync_ok = 1;
3153#else
3154#if NCARP > 0
3155 if (!pfsync_sync_ok)
3156 carp_group_demote_adj(&sc->sc_if, -1);
3157#endif
3158 pfsync_sync_ok = 1;
3159#endif
3160#ifdef __FreeBSD__
3161 if (V_pf_status.debug >= PF_DEBUG_MISC)
3162#else
3163 if (pf_status.debug >= PF_DEBUG_MISC)
3164#endif
3165 printf("pfsync: failed to receive bulk update\n");
3166 }
3167
3168#ifdef __FreeBSD__
3169 CURVNET_RESTORE();
3170#endif
3171}
3172
3173void
3174pfsync_send_plus(void *plus, size_t pluslen)
3175{
3176#ifdef __FreeBSD__
3177 struct pfsync_softc *sc = V_pfsyncif;
3178#else
3179 struct pfsync_softc *sc = pfsyncif;
3180#endif
3181 int s;
3182
3183 PF_LOCK_ASSERT();
3184
3185#ifdef __FreeBSD__
3186 if (sc->sc_len + pluslen > sc->sc_ifp->if_mtu) {
3187#else
3188 if (sc->sc_len + pluslen > sc->sc_if.if_mtu) {
3189#endif
3190 s = splnet();
3191 pfsync_sendout();
3192 splx(s);
3193 }
3194
3195 sc->sc_plus = plus;
3196 sc->sc_len += (sc->sc_pluslen = pluslen);
3197
3198 s = splnet();
3199 pfsync_sendout();
3200 splx(s);
3201}
3202
3203int
3204pfsync_up(void)
3205{
3206#ifdef __FreeBSD__
3207 struct pfsync_softc *sc = V_pfsyncif;
3208#else
3209 struct pfsync_softc *sc = pfsyncif;
3210#endif
3211
3212#ifdef __FreeBSD__
3213 if (sc == NULL || !ISSET(sc->sc_ifp->if_flags, IFF_DRV_RUNNING))
3214#else
3215 if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING))
3216#endif
3217 return (0);
3218
3219 return (1);
3220}
3221
3222int
3223pfsync_state_in_use(struct pf_state *st)
3224{
3225#ifdef __FreeBSD__
3226 struct pfsync_softc *sc = V_pfsyncif;
3227#else
3228 struct pfsync_softc *sc = pfsyncif;
3229#endif
3230
3231 if (sc == NULL)
3232 return (0);
3233
3234 if (st->sync_state != PFSYNC_S_NONE ||
3235 st == sc->sc_bulk_next ||
3236 st == sc->sc_bulk_last)
3237 return (1);
3238
3239 return (0);
3240}
3241
3242u_int pfsync_ints;
3243u_int pfsync_tmos;
3244
3245void
3246pfsync_timeout(void *arg)
3247{
3248#if defined(__FreeBSD__) && defined(VIMAGE)
3249 struct pfsync_softc *sc = arg;
3250#endif
3251 int s;
3252
3253#ifdef __FreeBSD__
3254 CURVNET_SET(sc->sc_ifp->if_vnet);
3255#endif
3256
3257 pfsync_tmos++;
3258
3259 s = splnet();
3260#ifdef __FreeBSD__
3261 PF_LOCK();
3262#endif
3263 pfsync_sendout();
3264#ifdef __FreeBSD__
3265 PF_UNLOCK();
3266#endif
3267 splx(s);
3268
3269#ifdef __FreeBSD__
3270 CURVNET_RESTORE();
3271#endif
3272}
3273
3274/* this is a softnet/netisr handler */
3275void
3276#ifdef __FreeBSD__
3277pfsyncintr(void *arg)
3278{
3279 struct pfsync_softc *sc = arg;
3280 struct mbuf *m, *n;
3281
3282 CURVNET_SET(sc->sc_ifp->if_vnet);
3283 pfsync_ints++;
3284
3285 IF_DEQUEUE_ALL(&sc->sc_ifp->if_snd, m);
3286
3287 for (; m != NULL; m = n) {
3288
3289 n = m->m_nextpkt;
3290 m->m_nextpkt = NULL;
3291 if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL)
3292 == 0)
3293 V_pfsyncstats.pfsyncs_opackets++;
3294 else
3295 V_pfsyncstats.pfsyncs_oerrors++;
3296 }
3297 CURVNET_RESTORE();
3298}
3299#else
3300pfsyncintr(void)
3301{
3302 int s;
3303
3304 pfsync_ints++;
3305
3306 s = splnet();
3307 pfsync_sendout();
3308 splx(s);
3309}
3310#endif
3311
3312int
3313pfsync_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
3314 size_t newlen)
3315{
3316
3317#ifdef notyet
3318 /* All sysctl names at this level are terminal. */
3319 if (namelen != 1)
3320 return (ENOTDIR);
3321
3322 switch (name[0]) {
3323 case PFSYNCCTL_STATS:
3324 if (newp != NULL)
3325 return (EPERM);
3326 return (sysctl_struct(oldp, oldlenp, newp, newlen,
3327 &V_pfsyncstats, sizeof(V_pfsyncstats)));
3328 }
3329#endif
3330 return (ENOPROTOOPT);
3331}
3332
3333#ifdef __FreeBSD__
3334void
3335pfsync_ifdetach(void *arg, struct ifnet *ifp)
3336{
3337 struct pfsync_softc *sc = (struct pfsync_softc *)arg;
3338 struct ip_moptions *imo;
3339
3340 if (sc == NULL || sc->sc_sync_if != ifp)
3341 return; /* not for us; unlocked read */
3342
3343 CURVNET_SET(sc->sc_ifp->if_vnet);
3344
3345 PF_LOCK();
3346
3347 /* Deal with a member interface going away from under us. */
3348 sc->sc_sync_if = NULL;
3349 imo = &sc->sc_imo;
3350 if (imo->imo_num_memberships > 0) {
3351 KASSERT(imo->imo_num_memberships == 1,
3352 ("%s: imo_num_memberships != 1", __func__));
3353 /*
3354 * Our event handler is always called after protocol
3355 * domains have been detached from the underlying ifnet.
3356 * Do not call in_delmulti(); we held a single reference
3357 * which the protocol domain has purged in in_purgemaddrs().
3358 */
3359 PF_UNLOCK();
3360 imo->imo_membership[--imo->imo_num_memberships] = NULL;
3361 PF_LOCK();
3362 imo->imo_multicast_ifp = NULL;
3363 }
3364
3365 PF_UNLOCK();
3366
3367 CURVNET_RESTORE();
3368}
3369
3370static int
3371vnet_pfsync_init(const void *unused)
3372{
3373 int error = 0;
3374
3375 pfsyncattach(0);
3376
3377 error = swi_add(NULL, "pfsync", pfsyncintr, V_pfsyncif,
3378 SWI_NET, INTR_MPSAFE, &pfsync_swi.pfsync_swi_cookie);
3379 if (error)
3380 panic("%s: swi_add %d", __func__, error);
3381
3382 PF_LOCK();
3383 pfsync_state_import_ptr = pfsync_state_import;
3384 pfsync_up_ptr = pfsync_up;
3385 pfsync_insert_state_ptr = pfsync_insert_state;
3386 pfsync_update_state_ptr = pfsync_update_state;
3387 pfsync_delete_state_ptr = pfsync_delete_state;
3388 pfsync_clear_states_ptr = pfsync_clear_states;
3389 pfsync_state_in_use_ptr = pfsync_state_in_use;
3390 pfsync_defer_ptr = pfsync_defer;
3391 PF_UNLOCK();
3392
3393 return (0);
3394}
3395
3396static int
3397vnet_pfsync_uninit(const void *unused)
3398{
3399
3400 swi_remove(pfsync_swi.pfsync_swi_cookie);
3401
3402 PF_LOCK();
3403 pfsync_state_import_ptr = NULL;
3404 pfsync_up_ptr = NULL;
3405 pfsync_insert_state_ptr = NULL;
3406 pfsync_update_state_ptr = NULL;
3407 pfsync_delete_state_ptr = NULL;
3408 pfsync_clear_states_ptr = NULL;
3409 pfsync_state_in_use_ptr = NULL;
3410 pfsync_defer_ptr = NULL;
3411 PF_UNLOCK();
3412
3413 if_clone_detach(&pfsync_cloner);
3414
3415 return (0);
3416}
3417
3418/* Define startup order. */
3419#define PFSYNC_SYSINIT_ORDER SI_SUB_PROTO_IF
3420#define PFSYNC_MODEVENT_ORDER (SI_ORDER_FIRST) /* On boot slot in here. */
3421#define PFSYNC_VNET_ORDER (PFSYNC_MODEVENT_ORDER + 2) /* Later still. */
3422
3423/*
3424 * Starting up.
3425 * VNET_SYSINIT is called for each existing vnet and each new vnet.
3426 */
3427VNET_SYSINIT(vnet_pfsync_init, PFSYNC_SYSINIT_ORDER, PFSYNC_VNET_ORDER,
3428 vnet_pfsync_init, NULL);
3429
3430/*
3431 * Closing up shop. These are done in REVERSE ORDER,
3432 * Not called on reboot.
3433 * VNET_SYSUNINIT is called for each exiting vnet as it exits.
3434 */
3435VNET_SYSUNINIT(vnet_pfsync_uninit, PFSYNC_SYSINIT_ORDER, PFSYNC_VNET_ORDER,
3436 vnet_pfsync_uninit, NULL);
3437static int
3438pfsync_modevent(module_t mod, int type, void *data)
3439{
3440 int error = 0;
3441
3442 switch (type) {
3443 case MOD_LOAD:
3444#ifndef __FreeBSD__
3445 pfsyncattach(0);
3446#endif
3447 break;
3448 case MOD_UNLOAD:
3449#ifndef __FreeBSD__
3450 if_clone_detach(&pfsync_cloner);
3451#endif
3452 break;
3453 default:
3454 error = EINVAL;
3455 break;
3456 }
3457
3458 return error;
3459}
3460
3461static moduledata_t pfsync_mod = {
3462 "pfsync",
3463 pfsync_modevent,
3464 0
3465};
3466
3467#define PFSYNC_MODVER 1
3468
3469DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
3470MODULE_VERSION(pfsync, PFSYNC_MODVER);
3471MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER);
3472#endif /* __FreeBSD__ */