if_pfsync.c revision 225736
1/*	$OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $	*/
2
3/*
4 * Copyright (c) 2002 Michael Shalayeff
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org>
31 *
32 * Permission to use, copy, modify, and distribute this software for any
33 * purpose with or without fee is hereby granted, provided that the above
34 * copyright notice and this permission notice appear in all copies.
35 *
36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
43 */
44
45#ifdef __FreeBSD__
46#include "opt_inet.h"
47#include "opt_inet6.h"
48#include "opt_bpf.h"
49#include "opt_pf.h"
50
51#include <sys/cdefs.h>
52__FBSDID("$FreeBSD: stable/9/sys/contrib/pf/net/if_pfsync.c 224936 2011-08-17 13:02:50Z pluknet $");
53
54#ifdef DEV_BPF
55#define	NBPFILTER	DEV_BPF
56#else
57#define	NBPFILTER	0
58#endif
59
60#ifdef DEV_PFSYNC
61#define	NPFSYNC		DEV_PFSYNC
62#else
63#define	NPFSYNC		0
64#endif
65
66#ifdef DEV_CARP
67#define	NCARP		DEV_CARP
68#else
69#define	NCARP		0
70#endif
71#endif /* __FreeBSD__ */
72
73#include <sys/param.h>
74#include <sys/kernel.h>
75#ifdef __FreeBSD__
76#include <sys/bus.h>
77#include <sys/interrupt.h>
78#include <sys/priv.h>
79#endif
80#include <sys/proc.h>
81#include <sys/systm.h>
82#include <sys/time.h>
83#include <sys/mbuf.h>
84#include <sys/socket.h>
85#ifdef __FreeBSD__
86#include <sys/endian.h>
87#include <sys/malloc.h>
88#include <sys/module.h>
89#include <sys/sockio.h>
90#include <sys/taskqueue.h>
91#include <sys/lock.h>
92#include <sys/mutex.h>
93#else
94#include <sys/ioctl.h>
95#include <sys/timeout.h>
96#endif
97#include <sys/sysctl.h>
98#ifndef __FreeBSD__
99#include <sys/pool.h>
100#endif
101
102#include <net/if.h>
103#ifdef __FreeBSD__
104#include <net/if_clone.h>
105#endif
106#include <net/if_types.h>
107#include <net/route.h>
108#include <net/bpf.h>
109#include <net/netisr.h>
110#ifdef __FreeBSD__
111#include <net/vnet.h>
112#endif
113
114#include <netinet/in.h>
115#include <netinet/if_ether.h>
116#include <netinet/tcp.h>
117#include <netinet/tcp_seq.h>
118
119#ifdef	INET
120#include <netinet/in_systm.h>
121#include <netinet/in_var.h>
122#include <netinet/ip.h>
123#include <netinet/ip_var.h>
124#endif
125
126#ifdef INET6
127#include <netinet6/nd6.h>
128#endif /* INET6 */
129
130#ifndef __FreeBSD__
131#include "carp.h"
132#endif
133#if NCARP > 0
134#include <netinet/ip_carp.h>
135#endif
136
137#include <net/pfvar.h>
138#include <net/if_pfsync.h>
139
140#ifndef __FreeBSD__
141#include "bpfilter.h"
142#include "pfsync.h"
143#endif
144
145#define PFSYNC_MINPKT ( \
146	sizeof(struct ip) + \
147	sizeof(struct pfsync_header) + \
148	sizeof(struct pfsync_subheader) + \
149	sizeof(struct pfsync_eof))
150
151struct pfsync_pkt {
152	struct ip *ip;
153	struct in_addr src;
154	u_int8_t flags;
155};
156
157int	pfsync_input_hmac(struct mbuf *, int);
158
159int	pfsync_upd_tcp(struct pf_state *, struct pfsync_state_peer *,
160	    struct pfsync_state_peer *);
161
162int	pfsync_in_clr(struct pfsync_pkt *, struct mbuf *, int, int);
163int	pfsync_in_ins(struct pfsync_pkt *, struct mbuf *, int, int);
164int	pfsync_in_iack(struct pfsync_pkt *, struct mbuf *, int, int);
165int	pfsync_in_upd(struct pfsync_pkt *, struct mbuf *, int, int);
166int	pfsync_in_upd_c(struct pfsync_pkt *, struct mbuf *, int, int);
167int	pfsync_in_ureq(struct pfsync_pkt *, struct mbuf *, int, int);
168int	pfsync_in_del(struct pfsync_pkt *, struct mbuf *, int, int);
169int	pfsync_in_del_c(struct pfsync_pkt *, struct mbuf *, int, int);
170int	pfsync_in_bus(struct pfsync_pkt *, struct mbuf *, int, int);
171int	pfsync_in_tdb(struct pfsync_pkt *, struct mbuf *, int, int);
172int	pfsync_in_eof(struct pfsync_pkt *, struct mbuf *, int, int);
173
174int	pfsync_in_error(struct pfsync_pkt *, struct mbuf *, int, int);
175
176int	(*pfsync_acts[])(struct pfsync_pkt *, struct mbuf *, int, int) = {
177	pfsync_in_clr,			/* PFSYNC_ACT_CLR */
178	pfsync_in_ins,			/* PFSYNC_ACT_INS */
179	pfsync_in_iack,			/* PFSYNC_ACT_INS_ACK */
180	pfsync_in_upd,			/* PFSYNC_ACT_UPD */
181	pfsync_in_upd_c,		/* PFSYNC_ACT_UPD_C */
182	pfsync_in_ureq,			/* PFSYNC_ACT_UPD_REQ */
183	pfsync_in_del,			/* PFSYNC_ACT_DEL */
184	pfsync_in_del_c,		/* PFSYNC_ACT_DEL_C */
185	pfsync_in_error,		/* PFSYNC_ACT_INS_F */
186	pfsync_in_error,		/* PFSYNC_ACT_DEL_F */
187	pfsync_in_bus,			/* PFSYNC_ACT_BUS */
188	pfsync_in_tdb,			/* PFSYNC_ACT_TDB */
189	pfsync_in_eof			/* PFSYNC_ACT_EOF */
190};
191
192struct pfsync_q {
193	int		(*write)(struct pf_state *, struct mbuf *, int);
194	size_t		len;
195	u_int8_t	action;
196};
197
198/* we have one of these for every PFSYNC_S_ */
199int	pfsync_out_state(struct pf_state *, struct mbuf *, int);
200int	pfsync_out_iack(struct pf_state *, struct mbuf *, int);
201int	pfsync_out_upd_c(struct pf_state *, struct mbuf *, int);
202int	pfsync_out_del(struct pf_state *, struct mbuf *, int);
203
204struct pfsync_q pfsync_qs[] = {
205	{ pfsync_out_state, sizeof(struct pfsync_state),   PFSYNC_ACT_INS },
206	{ pfsync_out_iack,  sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK },
207	{ pfsync_out_state, sizeof(struct pfsync_state),   PFSYNC_ACT_UPD },
208	{ pfsync_out_upd_c, sizeof(struct pfsync_upd_c),   PFSYNC_ACT_UPD_C },
209	{ pfsync_out_del,   sizeof(struct pfsync_del_c),   PFSYNC_ACT_DEL_C }
210};
211
212void	pfsync_q_ins(struct pf_state *, int);
213void	pfsync_q_del(struct pf_state *);
214
215struct pfsync_upd_req_item {
216	TAILQ_ENTRY(pfsync_upd_req_item)	ur_entry;
217	struct pfsync_upd_req			ur_msg;
218};
219TAILQ_HEAD(pfsync_upd_reqs, pfsync_upd_req_item);
220
221struct pfsync_deferral {
222	TAILQ_ENTRY(pfsync_deferral)		 pd_entry;
223	struct pf_state				*pd_st;
224	struct mbuf				*pd_m;
225#ifdef __FreeBSD__
226	struct callout				 pd_tmo;
227#else
228	struct timeout				 pd_tmo;
229#endif
230};
231TAILQ_HEAD(pfsync_deferrals, pfsync_deferral);
232
233#define PFSYNC_PLSIZE	MAX(sizeof(struct pfsync_upd_req_item), \
234			    sizeof(struct pfsync_deferral))
235
236#ifdef notyet
237int	pfsync_out_tdb(struct tdb *, struct mbuf *, int);
238#endif
239
240struct pfsync_softc {
241#ifdef __FreeBSD__
242	struct ifnet		*sc_ifp;
243#else
244	struct ifnet		 sc_if;
245#endif
246	struct ifnet		*sc_sync_if;
247
248#ifdef __FreeBSD__
249	uma_zone_t		 sc_pool;
250#else
251	struct pool		 sc_pool;
252#endif
253
254	struct ip_moptions	 sc_imo;
255
256	struct in_addr		 sc_sync_peer;
257	u_int8_t		 sc_maxupdates;
258#ifdef __FreeBSD__
259	int			 pfsync_sync_ok;
260#endif
261
262	struct ip		 sc_template;
263
264	struct pf_state_queue	 sc_qs[PFSYNC_S_COUNT];
265	size_t			 sc_len;
266
267	struct pfsync_upd_reqs	 sc_upd_req_list;
268
269	struct pfsync_deferrals	 sc_deferrals;
270	u_int			 sc_deferred;
271
272	void			*sc_plus;
273	size_t			 sc_pluslen;
274
275	u_int32_t		 sc_ureq_sent;
276	int			 sc_bulk_tries;
277#ifdef __FreeBSD__
278	struct callout		 sc_bulkfail_tmo;
279#else
280	struct timeout		 sc_bulkfail_tmo;
281#endif
282
283	u_int32_t		 sc_ureq_received;
284	struct pf_state		*sc_bulk_next;
285	struct pf_state		*sc_bulk_last;
286#ifdef __FreeBSD__
287	struct callout		 sc_bulk_tmo;
288#else
289	struct timeout		 sc_bulk_tmo;
290#endif
291
292	TAILQ_HEAD(, tdb)	 sc_tdb_q;
293
294#ifdef __FreeBSD__
295	struct callout		 sc_tmo;
296#else
297	struct timeout		 sc_tmo;
298#endif
299#ifdef __FreeBSD__
300	eventhandler_tag	 sc_detachtag;
301#endif
302
303};
304
305#ifdef __FreeBSD__
306static VNET_DEFINE(struct pfsync_softc	*, pfsyncif) = NULL;
307#define	V_pfsyncif		VNET(pfsyncif)
308
309static VNET_DEFINE(struct pfsyncstats, pfsyncstats);
310#define	V_pfsyncstats		VNET(pfsyncstats)
311
312SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC");
313SYSCTL_VNET_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_RW,
314    &VNET_NAME(pfsyncstats), pfsyncstats,
315    "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)");
316#else
317struct pfsync_softc	*pfsyncif = NULL;
318struct pfsyncstats	 pfsyncstats;
319#define	V_pfsyncstats	 pfsyncstats
320#endif
321
322#ifdef __FreeBSD__
323static void	pfsyncintr(void *);
324struct pfsync_swi {
325	void *	pfsync_swi_cookie;
326};
327static struct pfsync_swi	 pfsync_swi;
328#define	schednetisr(p)	swi_sched(pfsync_swi.pfsync_swi_cookie, 0)
329#define	NETISR_PFSYNC
330#endif
331
332void	pfsyncattach(int);
333#ifdef __FreeBSD__
334int	pfsync_clone_create(struct if_clone *, int, caddr_t);
335void	pfsync_clone_destroy(struct ifnet *);
336#else
337int	pfsync_clone_create(struct if_clone *, int);
338int	pfsync_clone_destroy(struct ifnet *);
339#endif
340int	pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
341	    struct pf_state_peer *);
342void	pfsync_update_net_tdb(struct pfsync_tdb *);
343int	pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
344#ifdef __FreeBSD__
345	    struct route *);
346#else
347	    struct rtentry *);
348#endif
349int	pfsyncioctl(struct ifnet *, u_long, caddr_t);
350void	pfsyncstart(struct ifnet *);
351
352struct mbuf *pfsync_if_dequeue(struct ifnet *);
353struct mbuf *pfsync_get_mbuf(struct pfsync_softc *);
354
355void	pfsync_deferred(struct pf_state *, int);
356void	pfsync_undefer(struct pfsync_deferral *, int);
357void	pfsync_defer_tmo(void *);
358
359void	pfsync_request_update(u_int32_t, u_int64_t);
360void	pfsync_update_state_req(struct pf_state *);
361
362void	pfsync_drop(struct pfsync_softc *);
363void	pfsync_sendout(void);
364void	pfsync_send_plus(void *, size_t);
365int	pfsync_tdb_sendout(struct pfsync_softc *);
366int	pfsync_sendout_mbuf(struct pfsync_softc *, struct mbuf *);
367void	pfsync_timeout(void *);
368void	pfsync_tdb_timeout(void *);
369void	pfsync_send_bus(struct pfsync_softc *, u_int8_t);
370
371void	pfsync_bulk_start(void);
372void	pfsync_bulk_status(u_int8_t);
373void	pfsync_bulk_update(void *);
374void	pfsync_bulk_fail(void *);
375
376#ifdef __FreeBSD__
377void	pfsync_ifdetach(void *, struct ifnet *);
378
379/* XXX: ugly */
380#define	betoh64		(unsigned long long)be64toh
381#define	timeout_del	callout_stop
382#endif
383
384#define PFSYNC_MAX_BULKTRIES	12
385#ifndef __FreeBSD__
386int	pfsync_sync_ok;
387#endif
388
389#ifdef __FreeBSD__
390IFC_SIMPLE_DECLARE(pfsync, 1);
391#else
392struct if_clone	pfsync_cloner =
393    IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy);
394#endif
395
396void
397pfsyncattach(int npfsync)
398{
399	if_clone_attach(&pfsync_cloner);
400}
401int
402#ifdef __FreeBSD__
403pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param)
404#else
405pfsync_clone_create(struct if_clone *ifc, int unit)
406#endif
407{
408	struct pfsync_softc *sc;
409	struct ifnet *ifp;
410	int q;
411
412	if (unit != 0)
413		return (EINVAL);
414
415#ifndef __FreeBSD__
416	pfsync_sync_ok = 1;
417#endif
418
419	sc = malloc(sizeof(struct pfsync_softc), M_DEVBUF, M_NOWAIT | M_ZERO);
420	if (sc == NULL)
421		return (ENOMEM);
422
423	for (q = 0; q < PFSYNC_S_COUNT; q++)
424		TAILQ_INIT(&sc->sc_qs[q]);
425
426#ifdef __FreeBSD__
427	sc->pfsync_sync_ok = 1;
428	sc->sc_pool = uma_zcreate("pfsync", PFSYNC_PLSIZE,
429			NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
430	if (sc->sc_pool == NULL) {
431		free(sc, M_DEVBUF);
432		return (ENOMEM);
433	}
434#else
435	pool_init(&sc->sc_pool, PFSYNC_PLSIZE, 0, 0, 0, "pfsync", NULL);
436#endif
437	TAILQ_INIT(&sc->sc_upd_req_list);
438	TAILQ_INIT(&sc->sc_deferrals);
439	sc->sc_deferred = 0;
440
441	TAILQ_INIT(&sc->sc_tdb_q);
442
443	sc->sc_len = PFSYNC_MINPKT;
444	sc->sc_maxupdates = 128;
445
446#ifdef __FreeBSD__
447	sc->sc_imo.imo_membership = (struct in_multi **)malloc(
448	    (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_DEVBUF,
449	    M_NOWAIT | M_ZERO);
450	sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS;
451	sc->sc_imo.imo_multicast_vif = -1;
452#else
453	sc->sc_imo.imo_membership = (struct in_multi **)malloc(
454	    (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_IPMOPTS,
455	    M_WAITOK | M_ZERO);
456	sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS;
457#endif
458
459#ifdef __FreeBSD__
460	ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC);
461	if (ifp == NULL) {
462		free(sc->sc_imo.imo_membership, M_DEVBUF);
463		uma_zdestroy(sc->sc_pool);
464		free(sc, M_DEVBUF);
465		return (ENOSPC);
466	}
467	if_initname(ifp, ifc->ifc_name, unit);
468
469	sc->sc_detachtag = EVENTHANDLER_REGISTER(ifnet_departure_event,
470#ifdef __FreeBSD__
471	    pfsync_ifdetach, V_pfsyncif, EVENTHANDLER_PRI_ANY);
472#else
473	    pfsync_ifdetach, pfsyncif, EVENTHANDLER_PRI_ANY);
474#endif
475	if (sc->sc_detachtag == NULL) {
476		if_free(ifp);
477		free(sc->sc_imo.imo_membership, M_DEVBUF);
478		uma_zdestroy(sc->sc_pool);
479		free(sc, M_DEVBUF);
480		return (ENOSPC);
481	}
482#else
483	ifp = &sc->sc_if;
484	snprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit);
485#endif
486	ifp->if_softc = sc;
487	ifp->if_ioctl = pfsyncioctl;
488	ifp->if_output = pfsyncoutput;
489	ifp->if_start = pfsyncstart;
490	ifp->if_type = IFT_PFSYNC;
491	ifp->if_snd.ifq_maxlen = ifqmaxlen;
492	ifp->if_hdrlen = sizeof(struct pfsync_header);
493	ifp->if_mtu = 1500; /* XXX */
494#ifdef __FreeBSD__
495	callout_init(&sc->sc_tmo, CALLOUT_MPSAFE);
496	callout_init(&sc->sc_bulk_tmo, CALLOUT_MPSAFE);
497	callout_init(&sc->sc_bulkfail_tmo, CALLOUT_MPSAFE);
498#else
499	ifp->if_hardmtu = MCLBYTES; /* XXX */
500	timeout_set(&sc->sc_tmo, pfsync_timeout, sc);
501	timeout_set(&sc->sc_bulk_tmo, pfsync_bulk_update, sc);
502	timeout_set(&sc->sc_bulkfail_tmo, pfsync_bulk_fail, sc);
503#endif
504
505	if_attach(ifp);
506#ifndef __FreeBSD__
507	if_alloc_sadl(ifp);
508#endif
509
510#if NCARP > 0
511	if_addgroup(ifp, "carp");
512#endif
513
514#if NBPFILTER > 0
515#ifdef __FreeBSD__
516	bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
517#else
518	bpfattach(&sc->sc_if.if_bpf, ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
519#endif
520#endif
521
522#ifdef __FreeBSD__
523	V_pfsyncif = sc;
524#else
525	pfsyncif = sc;
526#endif
527
528	return (0);
529}
530
531#ifdef __FreeBSD__
532void
533#else
534int
535#endif
536pfsync_clone_destroy(struct ifnet *ifp)
537{
538	struct pfsync_softc *sc = ifp->if_softc;
539
540#ifdef __FreeBSD__
541	EVENTHANDLER_DEREGISTER(ifnet_departure_event, sc->sc_detachtag);
542#endif
543	timeout_del(&sc->sc_bulk_tmo);
544	timeout_del(&sc->sc_tmo);
545#if NCARP > 0
546#ifdef notyet
547#ifdef __FreeBSD__
548	if (!sc->pfsync_sync_ok)
549#else
550	if (!pfsync_sync_ok)
551#endif
552		carp_group_demote_adj(&sc->sc_if, -1);
553#endif
554#endif
555#if NBPFILTER > 0
556	bpfdetach(ifp);
557#endif
558	if_detach(ifp);
559
560	pfsync_drop(sc);
561
562	while (sc->sc_deferred > 0)
563		pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
564
565#ifdef __FreeBSD__
566	UMA_DESTROY(sc->sc_pool);
567#else
568	pool_destroy(&sc->sc_pool);
569#endif
570#ifdef __FreeBSD__
571	if_free(ifp);
572	free(sc->sc_imo.imo_membership, M_DEVBUF);
573#else
574	free(sc->sc_imo.imo_membership, M_IPMOPTS);
575#endif
576	free(sc, M_DEVBUF);
577
578#ifdef __FreeBSD__
579	V_pfsyncif = NULL;
580#else
581	pfsyncif = NULL;
582#endif
583
584#ifndef __FreeBSD__
585	return (0);
586#endif
587}
588
589struct mbuf *
590pfsync_if_dequeue(struct ifnet *ifp)
591{
592	struct mbuf *m;
593#ifndef __FreeBSD__
594	int s;
595#endif
596
597#ifdef __FreeBSD__
598	IF_LOCK(&ifp->if_snd);
599	_IF_DROP(&ifp->if_snd);
600	_IF_DEQUEUE(&ifp->if_snd, m);
601	IF_UNLOCK(&ifp->if_snd);
602#else
603	s = splnet();
604	IF_DEQUEUE(&ifp->if_snd, m);
605	splx(s);
606#endif
607
608	return (m);
609}
610
611/*
612 * Start output on the pfsync interface.
613 */
614void
615pfsyncstart(struct ifnet *ifp)
616{
617	struct mbuf *m;
618
619	while ((m = pfsync_if_dequeue(ifp)) != NULL) {
620#ifndef __FreeBSD__
621		IF_DROP(&ifp->if_snd);
622#endif
623		m_freem(m);
624	}
625}
626
627int
628pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
629    struct pf_state_peer *d)
630{
631	if (s->scrub.scrub_flag && d->scrub == NULL) {
632#ifdef __FreeBSD__
633		d->scrub = pool_get(&V_pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
634#else
635		d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
636#endif
637		if (d->scrub == NULL)
638			return (ENOMEM);
639	}
640
641	return (0);
642}
643
644#ifndef __FreeBSD__
645void
646pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
647{
648	bzero(sp, sizeof(struct pfsync_state));
649
650	/* copy from state key */
651	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
652	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
653	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
654	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
655	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
656	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
657	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
658	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
659	sp->proto = st->key[PF_SK_WIRE]->proto;
660	sp->af = st->key[PF_SK_WIRE]->af;
661
662	/* copy from state */
663	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
664	bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
665	sp->creation = htonl(time_second - st->creation);
666	sp->expire = pf_state_expires(st);
667	if (sp->expire <= time_second)
668		sp->expire = htonl(0);
669	else
670		sp->expire = htonl(sp->expire - time_second);
671
672	sp->direction = st->direction;
673	sp->log = st->log;
674	sp->timeout = st->timeout;
675	sp->state_flags = st->state_flags;
676	if (st->src_node)
677		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
678	if (st->nat_src_node)
679		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
680
681	bcopy(&st->id, &sp->id, sizeof(sp->id));
682	sp->creatorid = st->creatorid;
683	pf_state_peer_hton(&st->src, &sp->src);
684	pf_state_peer_hton(&st->dst, &sp->dst);
685
686	if (st->rule.ptr == NULL)
687		sp->rule = htonl(-1);
688	else
689		sp->rule = htonl(st->rule.ptr->nr);
690	if (st->anchor.ptr == NULL)
691		sp->anchor = htonl(-1);
692	else
693		sp->anchor = htonl(st->anchor.ptr->nr);
694	if (st->nat_rule.ptr == NULL)
695		sp->nat_rule = htonl(-1);
696	else
697		sp->nat_rule = htonl(st->nat_rule.ptr->nr);
698
699	pf_state_counter_hton(st->packets[0], sp->packets[0]);
700	pf_state_counter_hton(st->packets[1], sp->packets[1]);
701	pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
702	pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
703
704}
705#endif
706
707int
708pfsync_state_import(struct pfsync_state *sp, u_int8_t flags)
709{
710	struct pf_state	*st = NULL;
711	struct pf_state_key *skw = NULL, *sks = NULL;
712	struct pf_rule *r = NULL;
713	struct pfi_kif	*kif;
714	int pool_flags;
715	int error;
716
717#ifdef __FreeBSD__
718	if (sp->creatorid == 0 && V_pf_status.debug >= PF_DEBUG_MISC) {
719#else
720	if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) {
721#endif
722		printf("pfsync_state_import: invalid creator id:"
723		    " %08x\n", ntohl(sp->creatorid));
724		return (EINVAL);
725	}
726
727	if ((kif = pfi_kif_get(sp->ifname)) == NULL) {
728#ifdef __FreeBSD__
729		if (V_pf_status.debug >= PF_DEBUG_MISC)
730#else
731		if (pf_status.debug >= PF_DEBUG_MISC)
732#endif
733			printf("pfsync_state_import: "
734			    "unknown interface: %s\n", sp->ifname);
735		if (flags & PFSYNC_SI_IOCTL)
736			return (EINVAL);
737		return (0);	/* skip this state */
738	}
739
740	/*
741	 * If the ruleset checksums match or the state is coming from the ioctl,
742	 * it's safe to associate the state with the rule of that number.
743	 */
744	if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
745	    (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) <
746	    pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
747		r = pf_main_ruleset.rules[
748		    PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
749	else
750#ifdef __FreeBSD__
751		r = &V_pf_default_rule;
752#else
753		r = &pf_default_rule;
754#endif
755
756	if ((r->max_states && r->states_cur >= r->max_states))
757		goto cleanup;
758
759#ifdef __FreeBSD__
760	if (flags & PFSYNC_SI_IOCTL)
761		pool_flags = PR_WAITOK | PR_ZERO;
762	else
763		pool_flags = PR_ZERO;
764
765	if ((st = pool_get(&V_pf_state_pl, pool_flags)) == NULL)
766		goto cleanup;
767#else
768	if (flags & PFSYNC_SI_IOCTL)
769		pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO;
770	else
771		pool_flags = PR_LIMITFAIL | PR_ZERO;
772
773	if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL)
774		goto cleanup;
775#endif
776
777	if ((skw = pf_alloc_state_key(pool_flags)) == NULL)
778		goto cleanup;
779
780	if (PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0],
781	    &sp->key[PF_SK_STACK].addr[0], sp->af) ||
782	    PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1],
783	    &sp->key[PF_SK_STACK].addr[1], sp->af) ||
784	    sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] ||
785	    sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1]) {
786		if ((sks = pf_alloc_state_key(pool_flags)) == NULL)
787			goto cleanup;
788	} else
789		sks = skw;
790
791	/* allocate memory for scrub info */
792	if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
793	    pfsync_alloc_scrub_memory(&sp->dst, &st->dst))
794		goto cleanup;
795
796	/* copy to state key(s) */
797	skw->addr[0] = sp->key[PF_SK_WIRE].addr[0];
798	skw->addr[1] = sp->key[PF_SK_WIRE].addr[1];
799	skw->port[0] = sp->key[PF_SK_WIRE].port[0];
800	skw->port[1] = sp->key[PF_SK_WIRE].port[1];
801	skw->proto = sp->proto;
802	skw->af = sp->af;
803	if (sks != skw) {
804		sks->addr[0] = sp->key[PF_SK_STACK].addr[0];
805		sks->addr[1] = sp->key[PF_SK_STACK].addr[1];
806		sks->port[0] = sp->key[PF_SK_STACK].port[0];
807		sks->port[1] = sp->key[PF_SK_STACK].port[1];
808		sks->proto = sp->proto;
809		sks->af = sp->af;
810	}
811
812	/* copy to state */
813	bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr));
814	st->creation = time_second - ntohl(sp->creation);
815	st->expire = time_second;
816	if (sp->expire) {
817		/* XXX No adaptive scaling. */
818		st->expire -= r->timeout[sp->timeout] - ntohl(sp->expire);
819	}
820
821	st->expire = ntohl(sp->expire) + time_second;
822	st->direction = sp->direction;
823	st->log = sp->log;
824	st->timeout = sp->timeout;
825	st->state_flags = sp->state_flags;
826
827	bcopy(sp->id, &st->id, sizeof(st->id));
828	st->creatorid = sp->creatorid;
829	pf_state_peer_ntoh(&sp->src, &st->src);
830	pf_state_peer_ntoh(&sp->dst, &st->dst);
831
832	st->rule.ptr = r;
833	st->nat_rule.ptr = NULL;
834	st->anchor.ptr = NULL;
835	st->rt_kif = NULL;
836
837	st->pfsync_time = time_second;
838	st->sync_state = PFSYNC_S_NONE;
839
840	/* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
841	r->states_cur++;
842	r->states_tot++;
843
844	if (!ISSET(flags, PFSYNC_SI_IOCTL))
845		SET(st->state_flags, PFSTATE_NOSYNC);
846
847	if ((error = pf_state_insert(kif, skw, sks, st)) != 0) {
848		/* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */
849		r->states_cur--;
850		goto cleanup_state;
851	}
852
853	if (!ISSET(flags, PFSYNC_SI_IOCTL)) {
854		CLR(st->state_flags, PFSTATE_NOSYNC);
855		if (ISSET(st->state_flags, PFSTATE_ACK)) {
856			pfsync_q_ins(st, PFSYNC_S_IACK);
857			schednetisr(NETISR_PFSYNC);
858		}
859	}
860	CLR(st->state_flags, PFSTATE_ACK);
861
862	return (0);
863
864cleanup:
865	error = ENOMEM;
866	if (skw == sks)
867		sks = NULL;
868#ifdef __FreeBSD__
869	if (skw != NULL)
870		pool_put(&V_pf_state_key_pl, skw);
871	if (sks != NULL)
872		pool_put(&V_pf_state_key_pl, sks);
873#else
874	if (skw != NULL)
875		pool_put(&pf_state_key_pl, skw);
876	if (sks != NULL)
877		pool_put(&pf_state_key_pl, sks);
878#endif
879
880cleanup_state:	/* pf_state_insert frees the state keys */
881	if (st) {
882#ifdef __FreeBSD__
883		if (st->dst.scrub)
884			pool_put(&V_pf_state_scrub_pl, st->dst.scrub);
885		if (st->src.scrub)
886			pool_put(&V_pf_state_scrub_pl, st->src.scrub);
887		pool_put(&V_pf_state_pl, st);
888#else
889		if (st->dst.scrub)
890			pool_put(&pf_state_scrub_pl, st->dst.scrub);
891		if (st->src.scrub)
892			pool_put(&pf_state_scrub_pl, st->src.scrub);
893		pool_put(&pf_state_pl, st);
894#endif
895	}
896	return (error);
897}
898
899void
900#ifdef __FreeBSD__
901pfsync_input(struct mbuf *m, __unused int off)
902#else
903pfsync_input(struct mbuf *m, ...)
904#endif
905{
906#ifdef __FreeBSD__
907	struct pfsync_softc *sc = V_pfsyncif;
908#else
909	struct pfsync_softc *sc = pfsyncif;
910#endif
911	struct pfsync_pkt pkt;
912	struct ip *ip = mtod(m, struct ip *);
913	struct pfsync_header *ph;
914	struct pfsync_subheader subh;
915
916	int offset;
917	int rv;
918
919	V_pfsyncstats.pfsyncs_ipackets++;
920
921	/* verify that we have a sync interface configured */
922#ifdef __FreeBSD__
923	if (!sc || !sc->sc_sync_if || !V_pf_status.running)
924#else
925	if (!sc || !sc->sc_sync_if || !pf_status.running)
926#endif
927		goto done;
928
929	/* verify that the packet came in on the right interface */
930	if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
931		V_pfsyncstats.pfsyncs_badif++;
932		goto done;
933	}
934
935#ifdef __FreeBSD__
936	sc->sc_ifp->if_ipackets++;
937	sc->sc_ifp->if_ibytes += m->m_pkthdr.len;
938#else
939	sc->sc_if.if_ipackets++;
940	sc->sc_if.if_ibytes += m->m_pkthdr.len;
941#endif
942	/* verify that the IP TTL is 255. */
943	if (ip->ip_ttl != PFSYNC_DFLTTL) {
944		V_pfsyncstats.pfsyncs_badttl++;
945		goto done;
946	}
947
948	offset = ip->ip_hl << 2;
949	if (m->m_pkthdr.len < offset + sizeof(*ph)) {
950		V_pfsyncstats.pfsyncs_hdrops++;
951		goto done;
952	}
953
954	if (offset + sizeof(*ph) > m->m_len) {
955		if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
956			V_pfsyncstats.pfsyncs_hdrops++;
957			return;
958		}
959		ip = mtod(m, struct ip *);
960	}
961	ph = (struct pfsync_header *)((char *)ip + offset);
962
963	/* verify the version */
964	if (ph->version != PFSYNC_VERSION) {
965		V_pfsyncstats.pfsyncs_badver++;
966		goto done;
967	}
968
969#if 0
970	if (pfsync_input_hmac(m, offset) != 0) {
971		/* XXX stats */
972		goto done;
973	}
974#endif
975
976	/* Cheaper to grab this now than having to mess with mbufs later */
977	pkt.ip = ip;
978	pkt.src = ip->ip_src;
979	pkt.flags = 0;
980
981#ifdef __FreeBSD__
982	if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
983#else
984	if (!bcmp(&ph->pfcksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
985#endif
986		pkt.flags |= PFSYNC_SI_CKSUM;
987
988	offset += sizeof(*ph);
989	for (;;) {
990		m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
991		offset += sizeof(subh);
992
993		if (subh.action >= PFSYNC_ACT_MAX) {
994			V_pfsyncstats.pfsyncs_badact++;
995			goto done;
996		}
997
998		rv = (*pfsync_acts[subh.action])(&pkt, m, offset,
999		    ntohs(subh.count));
1000		if (rv == -1)
1001			return;
1002
1003		offset += rv;
1004	}
1005
1006done:
1007	m_freem(m);
1008}
1009
1010int
1011pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1012{
1013	struct pfsync_clr *clr;
1014	struct mbuf *mp;
1015	int len = sizeof(*clr) * count;
1016	int i, offp;
1017
1018	struct pf_state *st, *nexts;
1019	struct pf_state_key *sk, *nextsk;
1020	struct pf_state_item *si;
1021	u_int32_t creatorid;
1022	int s;
1023
1024	mp = m_pulldown(m, offset, len, &offp);
1025	if (mp == NULL) {
1026		V_pfsyncstats.pfsyncs_badlen++;
1027		return (-1);
1028	}
1029	clr = (struct pfsync_clr *)(mp->m_data + offp);
1030
1031	s = splsoftnet();
1032#ifdef __FreeBSD__
1033	PF_LOCK();
1034#endif
1035	for (i = 0; i < count; i++) {
1036		creatorid = clr[i].creatorid;
1037
1038		if (clr[i].ifname[0] == '\0') {
1039#ifdef __FreeBSD__
1040			for (st = RB_MIN(pf_state_tree_id, &V_tree_id);
1041			    st; st = nexts) {
1042				nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, st);
1043#else
1044			for (st = RB_MIN(pf_state_tree_id, &tree_id);
1045			    st; st = nexts) {
1046				nexts = RB_NEXT(pf_state_tree_id, &tree_id, st);
1047#endif
1048				if (st->creatorid == creatorid) {
1049					SET(st->state_flags, PFSTATE_NOSYNC);
1050					pf_unlink_state(st);
1051				}
1052			}
1053		} else {
1054			if (pfi_kif_get(clr[i].ifname) == NULL)
1055				continue;
1056
1057			/* XXX correct? */
1058#ifdef __FreeBSD__
1059			for (sk = RB_MIN(pf_state_tree, &V_pf_statetbl);
1060#else
1061			for (sk = RB_MIN(pf_state_tree, &pf_statetbl);
1062#endif
1063			    sk; sk = nextsk) {
1064				nextsk = RB_NEXT(pf_state_tree,
1065#ifdef __FreeBSD__
1066				    &V_pf_statetbl, sk);
1067#else
1068				    &pf_statetbl, sk);
1069#endif
1070				TAILQ_FOREACH(si, &sk->states, entry) {
1071					if (si->s->creatorid == creatorid) {
1072						SET(si->s->state_flags,
1073						    PFSTATE_NOSYNC);
1074						pf_unlink_state(si->s);
1075					}
1076				}
1077			}
1078		}
1079	}
1080#ifdef __FreeBSD__
1081	PF_UNLOCK();
1082#endif
1083	splx(s);
1084
1085	return (len);
1086}
1087
1088int
1089pfsync_in_ins(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1090{
1091	struct mbuf *mp;
1092	struct pfsync_state *sa, *sp;
1093	int len = sizeof(*sp) * count;
1094	int i, offp;
1095
1096	int s;
1097
1098	mp = m_pulldown(m, offset, len, &offp);
1099	if (mp == NULL) {
1100		V_pfsyncstats.pfsyncs_badlen++;
1101		return (-1);
1102	}
1103	sa = (struct pfsync_state *)(mp->m_data + offp);
1104
1105	s = splsoftnet();
1106#ifdef __FreeBSD__
1107	PF_LOCK();
1108#endif
1109	for (i = 0; i < count; i++) {
1110		sp = &sa[i];
1111
1112		/* check for invalid values */
1113		if (sp->timeout >= PFTM_MAX ||
1114		    sp->src.state > PF_TCPS_PROXY_DST ||
1115		    sp->dst.state > PF_TCPS_PROXY_DST ||
1116		    sp->direction > PF_OUT ||
1117		    (sp->af != AF_INET && sp->af != AF_INET6)) {
1118#ifdef __FreeBSD__
1119			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1120#else
1121			if (pf_status.debug >= PF_DEBUG_MISC) {
1122#endif
1123				printf("pfsync_input: PFSYNC5_ACT_INS: "
1124				    "invalid value\n");
1125			}
1126			V_pfsyncstats.pfsyncs_badval++;
1127			continue;
1128		}
1129
1130		if (pfsync_state_import(sp, pkt->flags) == ENOMEM) {
1131			/* drop out, but process the rest of the actions */
1132			break;
1133		}
1134	}
1135#ifdef __FreeBSD__
1136	PF_UNLOCK();
1137#endif
1138	splx(s);
1139
1140	return (len);
1141}
1142
1143int
1144pfsync_in_iack(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1145{
1146	struct pfsync_ins_ack *ia, *iaa;
1147	struct pf_state_cmp id_key;
1148	struct pf_state *st;
1149
1150	struct mbuf *mp;
1151	int len = count * sizeof(*ia);
1152	int offp, i;
1153	int s;
1154
1155	mp = m_pulldown(m, offset, len, &offp);
1156	if (mp == NULL) {
1157		V_pfsyncstats.pfsyncs_badlen++;
1158		return (-1);
1159	}
1160	iaa = (struct pfsync_ins_ack *)(mp->m_data + offp);
1161
1162	s = splsoftnet();
1163#ifdef __FreeBSD__
1164	PF_LOCK();
1165#endif
1166	for (i = 0; i < count; i++) {
1167		ia = &iaa[i];
1168
1169		bcopy(&ia->id, &id_key.id, sizeof(id_key.id));
1170		id_key.creatorid = ia->creatorid;
1171
1172		st = pf_find_state_byid(&id_key);
1173		if (st == NULL)
1174			continue;
1175
1176		if (ISSET(st->state_flags, PFSTATE_ACK))
1177			pfsync_deferred(st, 0);
1178	}
1179#ifdef __FreeBSD__
1180	PF_UNLOCK();
1181#endif
1182	splx(s);
1183	/*
1184	 * XXX this is not yet implemented, but we know the size of the
1185	 * message so we can skip it.
1186	 */
1187
1188	return (count * sizeof(struct pfsync_ins_ack));
1189}
1190
1191int
1192pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src,
1193    struct pfsync_state_peer *dst)
1194{
1195	int sfail = 0;
1196
1197	/*
1198	 * The state should never go backwards except
1199	 * for syn-proxy states.  Neither should the
1200	 * sequence window slide backwards.
1201	 */
1202	if (st->src.state > src->state &&
1203	    (st->src.state < PF_TCPS_PROXY_SRC ||
1204	    src->state >= PF_TCPS_PROXY_SRC))
1205		sfail = 1;
1206	else if (SEQ_GT(st->src.seqlo, ntohl(src->seqlo)))
1207		sfail = 3;
1208	else if (st->dst.state > dst->state) {
1209		/* There might still be useful
1210		 * information about the src state here,
1211		 * so import that part of the update,
1212		 * then "fail" so we send the updated
1213		 * state back to the peer who is missing
1214		 * our what we know. */
1215		pf_state_peer_ntoh(src, &st->src);
1216		/* XXX do anything with timeouts? */
1217		sfail = 7;
1218	} else if (st->dst.state >= TCPS_SYN_SENT &&
1219	    SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo)))
1220		sfail = 4;
1221
1222	return (sfail);
1223}
1224
1225int
1226pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1227{
1228	struct pfsync_state *sa, *sp;
1229	struct pf_state_cmp id_key;
1230	struct pf_state_key *sk;
1231	struct pf_state *st;
1232	int sfail;
1233
1234	struct mbuf *mp;
1235	int len = count * sizeof(*sp);
1236	int offp, i;
1237	int s;
1238
1239	mp = m_pulldown(m, offset, len, &offp);
1240	if (mp == NULL) {
1241		V_pfsyncstats.pfsyncs_badlen++;
1242		return (-1);
1243	}
1244	sa = (struct pfsync_state *)(mp->m_data + offp);
1245
1246	s = splsoftnet();
1247#ifdef __FreeBSD__
1248	PF_LOCK();
1249#endif
1250	for (i = 0; i < count; i++) {
1251		sp = &sa[i];
1252
1253		/* check for invalid values */
1254		if (sp->timeout >= PFTM_MAX ||
1255		    sp->src.state > PF_TCPS_PROXY_DST ||
1256		    sp->dst.state > PF_TCPS_PROXY_DST) {
1257#ifdef __FreeBSD__
1258			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1259#else
1260			if (pf_status.debug >= PF_DEBUG_MISC) {
1261#endif
1262				printf("pfsync_input: PFSYNC_ACT_UPD: "
1263				    "invalid value\n");
1264			}
1265			V_pfsyncstats.pfsyncs_badval++;
1266			continue;
1267		}
1268
1269		bcopy(sp->id, &id_key.id, sizeof(id_key.id));
1270		id_key.creatorid = sp->creatorid;
1271
1272		st = pf_find_state_byid(&id_key);
1273		if (st == NULL) {
1274			/* insert the update */
1275			if (pfsync_state_import(sp, 0))
1276				V_pfsyncstats.pfsyncs_badstate++;
1277			continue;
1278		}
1279
1280		if (ISSET(st->state_flags, PFSTATE_ACK))
1281			pfsync_deferred(st, 1);
1282
1283		sk = st->key[PF_SK_WIRE];	/* XXX right one? */
1284		sfail = 0;
1285		if (sk->proto == IPPROTO_TCP)
1286			sfail = pfsync_upd_tcp(st, &sp->src, &sp->dst);
1287		else {
1288			/*
1289			 * Non-TCP protocol state machine always go
1290			 * forwards
1291			 */
1292			if (st->src.state > sp->src.state)
1293				sfail = 5;
1294			else if (st->dst.state > sp->dst.state)
1295				sfail = 6;
1296		}
1297
1298		if (sfail) {
1299#ifdef __FreeBSD__
1300			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1301#else
1302			if (pf_status.debug >= PF_DEBUG_MISC) {
1303#endif
1304				printf("pfsync: %s stale update (%d)"
1305				    " id: %016llx creatorid: %08x\n",
1306				    (sfail < 7 ?  "ignoring" : "partial"),
1307				    sfail, betoh64(st->id),
1308				    ntohl(st->creatorid));
1309			}
1310			V_pfsyncstats.pfsyncs_stale++;
1311
1312			pfsync_update_state(st);
1313			schednetisr(NETISR_PFSYNC);
1314			continue;
1315		}
1316		pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
1317		pf_state_peer_ntoh(&sp->src, &st->src);
1318		pf_state_peer_ntoh(&sp->dst, &st->dst);
1319		st->expire = ntohl(sp->expire) + time_second;
1320		st->timeout = sp->timeout;
1321		st->pfsync_time = time_second;
1322	}
1323#ifdef __FreeBSD__
1324	PF_UNLOCK();
1325#endif
1326	splx(s);
1327
1328	return (len);
1329}
1330
1331int
1332pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1333{
1334	struct pfsync_upd_c *ua, *up;
1335	struct pf_state_key *sk;
1336	struct pf_state_cmp id_key;
1337	struct pf_state *st;
1338
1339	int len = count * sizeof(*up);
1340	int sfail;
1341
1342	struct mbuf *mp;
1343	int offp, i;
1344	int s;
1345
1346	mp = m_pulldown(m, offset, len, &offp);
1347	if (mp == NULL) {
1348		V_pfsyncstats.pfsyncs_badlen++;
1349		return (-1);
1350	}
1351	ua = (struct pfsync_upd_c *)(mp->m_data + offp);
1352
1353	s = splsoftnet();
1354#ifdef __FreeBSD__
1355	PF_LOCK();
1356#endif
1357	for (i = 0; i < count; i++) {
1358		up = &ua[i];
1359
1360		/* check for invalid values */
1361		if (up->timeout >= PFTM_MAX ||
1362		    up->src.state > PF_TCPS_PROXY_DST ||
1363		    up->dst.state > PF_TCPS_PROXY_DST) {
1364#ifdef __FreeBSD__
1365			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1366#else
1367			if (pf_status.debug >= PF_DEBUG_MISC) {
1368#endif
1369				printf("pfsync_input: "
1370				    "PFSYNC_ACT_UPD_C: "
1371				    "invalid value\n");
1372			}
1373			V_pfsyncstats.pfsyncs_badval++;
1374			continue;
1375		}
1376
1377		bcopy(&up->id, &id_key.id, sizeof(id_key.id));
1378		id_key.creatorid = up->creatorid;
1379
1380		st = pf_find_state_byid(&id_key);
1381		if (st == NULL) {
1382			/* We don't have this state. Ask for it. */
1383			pfsync_request_update(id_key.creatorid, id_key.id);
1384			continue;
1385		}
1386
1387		if (ISSET(st->state_flags, PFSTATE_ACK))
1388			pfsync_deferred(st, 1);
1389
1390		sk = st->key[PF_SK_WIRE]; /* XXX right one? */
1391		sfail = 0;
1392		if (sk->proto == IPPROTO_TCP)
1393			sfail = pfsync_upd_tcp(st, &up->src, &up->dst);
1394		else {
1395			/*
1396			 * Non-TCP protocol state machine always go forwards
1397			 */
1398			if (st->src.state > up->src.state)
1399				sfail = 5;
1400			else if (st->dst.state > up->dst.state)
1401				sfail = 6;
1402		}
1403
1404		if (sfail) {
1405#ifdef __FreeBSD__
1406			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1407#else
1408			if (pf_status.debug >= PF_DEBUG_MISC) {
1409#endif
1410				printf("pfsync: ignoring stale update "
1411				    "(%d) id: %016llx "
1412				    "creatorid: %08x\n", sfail,
1413				    betoh64(st->id),
1414				    ntohl(st->creatorid));
1415			}
1416			V_pfsyncstats.pfsyncs_stale++;
1417
1418			pfsync_update_state(st);
1419			schednetisr(NETISR_PFSYNC);
1420			continue;
1421		}
1422		pfsync_alloc_scrub_memory(&up->dst, &st->dst);
1423		pf_state_peer_ntoh(&up->src, &st->src);
1424		pf_state_peer_ntoh(&up->dst, &st->dst);
1425		st->expire = ntohl(up->expire) + time_second;
1426		st->timeout = up->timeout;
1427		st->pfsync_time = time_second;
1428	}
1429#ifdef __FreeBSD__
1430	PF_UNLOCK();
1431#endif
1432	splx(s);
1433
1434	return (len);
1435}
1436
1437int
1438pfsync_in_ureq(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1439{
1440	struct pfsync_upd_req *ur, *ura;
1441	struct mbuf *mp;
1442	int len = count * sizeof(*ur);
1443	int i, offp;
1444
1445	struct pf_state_cmp id_key;
1446	struct pf_state *st;
1447
1448	mp = m_pulldown(m, offset, len, &offp);
1449	if (mp == NULL) {
1450		V_pfsyncstats.pfsyncs_badlen++;
1451		return (-1);
1452	}
1453	ura = (struct pfsync_upd_req *)(mp->m_data + offp);
1454
1455	for (i = 0; i < count; i++) {
1456		ur = &ura[i];
1457
1458		bcopy(&ur->id, &id_key.id, sizeof(id_key.id));
1459		id_key.creatorid = ur->creatorid;
1460
1461		if (id_key.id == 0 && id_key.creatorid == 0)
1462			pfsync_bulk_start();
1463		else {
1464			st = pf_find_state_byid(&id_key);
1465			if (st == NULL) {
1466				V_pfsyncstats.pfsyncs_badstate++;
1467				continue;
1468			}
1469			if (ISSET(st->state_flags, PFSTATE_NOSYNC))
1470				continue;
1471
1472			pfsync_update_state_req(st);
1473		}
1474	}
1475
1476	return (len);
1477}
1478
1479int
1480pfsync_in_del(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1481{
1482	struct mbuf *mp;
1483	struct pfsync_state *sa, *sp;
1484	struct pf_state_cmp id_key;
1485	struct pf_state *st;
1486	int len = count * sizeof(*sp);
1487	int offp, i;
1488	int s;
1489
1490	mp = m_pulldown(m, offset, len, &offp);
1491	if (mp == NULL) {
1492		V_pfsyncstats.pfsyncs_badlen++;
1493		return (-1);
1494	}
1495	sa = (struct pfsync_state *)(mp->m_data + offp);
1496
1497	s = splsoftnet();
1498#ifdef __FreeBSD__
1499	PF_LOCK();
1500#endif
1501	for (i = 0; i < count; i++) {
1502		sp = &sa[i];
1503
1504		bcopy(sp->id, &id_key.id, sizeof(id_key.id));
1505		id_key.creatorid = sp->creatorid;
1506
1507		st = pf_find_state_byid(&id_key);
1508		if (st == NULL) {
1509			V_pfsyncstats.pfsyncs_badstate++;
1510			continue;
1511		}
1512		SET(st->state_flags, PFSTATE_NOSYNC);
1513		pf_unlink_state(st);
1514	}
1515#ifdef __FreeBSD__
1516	PF_UNLOCK();
1517#endif
1518	splx(s);
1519
1520	return (len);
1521}
1522
1523int
1524pfsync_in_del_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1525{
1526	struct mbuf *mp;
1527	struct pfsync_del_c *sa, *sp;
1528	struct pf_state_cmp id_key;
1529	struct pf_state *st;
1530	int len = count * sizeof(*sp);
1531	int offp, i;
1532	int s;
1533
1534	mp = m_pulldown(m, offset, len, &offp);
1535	if (mp == NULL) {
1536		V_pfsyncstats.pfsyncs_badlen++;
1537		return (-1);
1538	}
1539	sa = (struct pfsync_del_c *)(mp->m_data + offp);
1540
1541	s = splsoftnet();
1542#ifdef __FreeBSD__
1543	PF_LOCK();
1544#endif
1545	for (i = 0; i < count; i++) {
1546		sp = &sa[i];
1547
1548		bcopy(&sp->id, &id_key.id, sizeof(id_key.id));
1549		id_key.creatorid = sp->creatorid;
1550
1551		st = pf_find_state_byid(&id_key);
1552		if (st == NULL) {
1553			V_pfsyncstats.pfsyncs_badstate++;
1554			continue;
1555		}
1556
1557		SET(st->state_flags, PFSTATE_NOSYNC);
1558		pf_unlink_state(st);
1559	}
1560#ifdef __FreeBSD__
1561	PF_LOCK();
1562#endif
1563	splx(s);
1564
1565	return (len);
1566}
1567
1568int
1569pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1570{
1571#ifdef __FreeBSD__
1572	struct pfsync_softc *sc = V_pfsyncif;
1573#else
1574	struct pfsync_softc *sc = pfsyncif;
1575#endif
1576	struct pfsync_bus *bus;
1577	struct mbuf *mp;
1578	int len = count * sizeof(*bus);
1579	int offp;
1580
1581	/* If we're not waiting for a bulk update, who cares. */
1582	if (sc->sc_ureq_sent == 0)
1583		return (len);
1584
1585	mp = m_pulldown(m, offset, len, &offp);
1586	if (mp == NULL) {
1587		V_pfsyncstats.pfsyncs_badlen++;
1588		return (-1);
1589	}
1590	bus = (struct pfsync_bus *)(mp->m_data + offp);
1591
1592	switch (bus->status) {
1593	case PFSYNC_BUS_START:
1594#ifdef __FreeBSD__
1595		callout_reset(&sc->sc_bulkfail_tmo, 5 * hz, pfsync_bulk_fail,
1596		    V_pfsyncif);
1597#else
1598		timeout_add_sec(&sc->sc_bulkfail_tmo, 5); /* XXX magic */
1599#endif
1600#ifdef XXX
1601		    pf_pool_limits[PF_LIMIT_STATES].limit /
1602		    (PFSYNC_BULKPACKETS * sc->sc_maxcount));
1603#endif
1604#ifdef __FreeBSD__
1605		if (V_pf_status.debug >= PF_DEBUG_MISC)
1606#else
1607		if (pf_status.debug >= PF_DEBUG_MISC)
1608#endif
1609			printf("pfsync: received bulk update start\n");
1610		break;
1611
1612	case PFSYNC_BUS_END:
1613		if (time_uptime - ntohl(bus->endtime) >=
1614		    sc->sc_ureq_sent) {
1615			/* that's it, we're happy */
1616			sc->sc_ureq_sent = 0;
1617			sc->sc_bulk_tries = 0;
1618			timeout_del(&sc->sc_bulkfail_tmo);
1619#if NCARP > 0
1620#ifdef notyet
1621#ifdef __FreeBSD__
1622			if (!sc->pfsync_sync_ok)
1623#else
1624			if (!pfsync_sync_ok)
1625#endif
1626				carp_group_demote_adj(&sc->sc_if, -1);
1627#endif
1628#endif
1629#ifdef __FreeBSD__
1630			sc->pfsync_sync_ok = 1;
1631#else
1632			pfsync_sync_ok = 1;
1633#endif
1634#ifdef __FreeBSD__
1635			if (V_pf_status.debug >= PF_DEBUG_MISC)
1636#else
1637			if (pf_status.debug >= PF_DEBUG_MISC)
1638#endif
1639				printf("pfsync: received valid "
1640				    "bulk update end\n");
1641		} else {
1642#ifdef __FreeBSD__
1643			if (V_pf_status.debug >= PF_DEBUG_MISC)
1644#else
1645			if (pf_status.debug >= PF_DEBUG_MISC)
1646#endif
1647				printf("pfsync: received invalid "
1648				    "bulk update end: bad timestamp\n");
1649		}
1650		break;
1651	}
1652
1653	return (len);
1654}
1655
1656int
1657pfsync_in_tdb(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1658{
1659	int len = count * sizeof(struct pfsync_tdb);
1660
1661#if defined(IPSEC)
1662	struct pfsync_tdb *tp;
1663	struct mbuf *mp;
1664	int offp;
1665	int i;
1666	int s;
1667
1668	mp = m_pulldown(m, offset, len, &offp);
1669	if (mp == NULL) {
1670		V_pfsyncstats.pfsyncs_badlen++;
1671		return (-1);
1672	}
1673	tp = (struct pfsync_tdb *)(mp->m_data + offp);
1674
1675	s = splsoftnet();
1676#ifdef __FreeBSD__
1677	PF_LOCK();
1678#endif
1679	for (i = 0; i < count; i++)
1680		pfsync_update_net_tdb(&tp[i]);
1681#ifdef __FreeBSD__
1682	PF_UNLOCK();
1683#endif
1684	splx(s);
1685#endif
1686
1687	return (len);
1688}
1689
1690#if defined(IPSEC)
1691/* Update an in-kernel tdb. Silently fail if no tdb is found. */
1692void
1693pfsync_update_net_tdb(struct pfsync_tdb *pt)
1694{
1695	struct tdb		*tdb;
1696	int			 s;
1697
1698	/* check for invalid values */
1699	if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
1700	    (pt->dst.sa.sa_family != AF_INET &&
1701	     pt->dst.sa.sa_family != AF_INET6))
1702		goto bad;
1703
1704	s = spltdb();
1705	tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
1706	if (tdb) {
1707		pt->rpl = ntohl(pt->rpl);
1708		pt->cur_bytes = betoh64(pt->cur_bytes);
1709
1710		/* Neither replay nor byte counter should ever decrease. */
1711		if (pt->rpl < tdb->tdb_rpl ||
1712		    pt->cur_bytes < tdb->tdb_cur_bytes) {
1713			splx(s);
1714			goto bad;
1715		}
1716
1717		tdb->tdb_rpl = pt->rpl;
1718		tdb->tdb_cur_bytes = pt->cur_bytes;
1719	}
1720	splx(s);
1721	return;
1722
1723bad:
1724#ifdef __FreeBSD__
1725	if (V_pf_status.debug >= PF_DEBUG_MISC)
1726#else
1727	if (pf_status.debug >= PF_DEBUG_MISC)
1728#endif
1729		printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
1730		    "invalid value\n");
1731	V_pfsyncstats.pfsyncs_badstate++;
1732	return;
1733}
1734#endif
1735
1736
1737int
1738pfsync_in_eof(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1739{
1740	/* check if we are at the right place in the packet */
1741	if (offset != m->m_pkthdr.len - sizeof(struct pfsync_eof))
1742		V_pfsyncstats.pfsyncs_badact++;
1743
1744	/* we're done. free and let the caller return */
1745	m_freem(m);
1746	return (-1);
1747}
1748
1749int
1750pfsync_in_error(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1751{
1752	V_pfsyncstats.pfsyncs_badact++;
1753
1754	m_freem(m);
1755	return (-1);
1756}
1757
1758int
1759pfsyncoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
1760#ifdef __FreeBSD__
1761	struct route *rt)
1762#else
1763	struct rtentry *rt)
1764#endif
1765{
1766	m_freem(m);
1767	return (0);
1768}
1769
1770/* ARGSUSED */
1771int
1772pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1773{
1774#ifndef __FreeBSD__
1775	struct proc *p = curproc;
1776#endif
1777	struct pfsync_softc *sc = ifp->if_softc;
1778	struct ifreq *ifr = (struct ifreq *)data;
1779	struct ip_moptions *imo = &sc->sc_imo;
1780	struct pfsyncreq pfsyncr;
1781	struct ifnet    *sifp;
1782	struct ip *ip;
1783	int s, error;
1784
1785	switch (cmd) {
1786#if 0
1787	case SIOCSIFADDR:
1788	case SIOCAIFADDR:
1789	case SIOCSIFDSTADDR:
1790#endif
1791	case SIOCSIFFLAGS:
1792#ifdef __FreeBSD__
1793		if (ifp->if_flags & IFF_UP)
1794			ifp->if_drv_flags |= IFF_DRV_RUNNING;
1795		else
1796			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1797#else
1798		if (ifp->if_flags & IFF_UP)
1799			ifp->if_flags |= IFF_RUNNING;
1800		else
1801			ifp->if_flags &= ~IFF_RUNNING;
1802#endif
1803		break;
1804	case SIOCSIFMTU:
1805		if (ifr->ifr_mtu <= PFSYNC_MINPKT)
1806			return (EINVAL);
1807		if (ifr->ifr_mtu > MCLBYTES) /* XXX could be bigger */
1808			ifr->ifr_mtu = MCLBYTES;
1809		if (ifr->ifr_mtu < ifp->if_mtu) {
1810			s = splnet();
1811#ifdef __FreeBSD__
1812			PF_LOCK();
1813#endif
1814			pfsync_sendout();
1815#ifdef __FreeBSD__
1816			PF_UNLOCK();
1817#endif
1818			splx(s);
1819		}
1820		ifp->if_mtu = ifr->ifr_mtu;
1821		break;
1822	case SIOCGETPFSYNC:
1823		bzero(&pfsyncr, sizeof(pfsyncr));
1824		if (sc->sc_sync_if) {
1825			strlcpy(pfsyncr.pfsyncr_syncdev,
1826			    sc->sc_sync_if->if_xname, IFNAMSIZ);
1827		}
1828		pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
1829		pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
1830		return (copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr)));
1831
1832	case SIOCSETPFSYNC:
1833#ifdef __FreeBSD__
1834		if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
1835#else
1836		if ((error = suser(p, p->p_acflag)) != 0)
1837#endif
1838			return (error);
1839		if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr))))
1840			return (error);
1841
1842#ifdef __FreeBSD__
1843		PF_LOCK();
1844#endif
1845		if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
1846#ifdef __FreeBSD__
1847			sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP);
1848#else
1849			sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
1850#endif
1851		else
1852			sc->sc_sync_peer.s_addr =
1853			    pfsyncr.pfsyncr_syncpeer.s_addr;
1854
1855		if (pfsyncr.pfsyncr_maxupdates > 255)
1856#ifdef __FreeBSD__
1857		{
1858			PF_UNLOCK();
1859#endif
1860			return (EINVAL);
1861#ifdef __FreeBSD__
1862		}
1863#endif
1864		sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
1865
1866		if (pfsyncr.pfsyncr_syncdev[0] == 0) {
1867			sc->sc_sync_if = NULL;
1868#ifdef __FreeBSD__
1869			PF_UNLOCK();
1870#endif
1871			if (imo->imo_num_memberships > 0) {
1872				in_delmulti(imo->imo_membership[
1873				    --imo->imo_num_memberships]);
1874				imo->imo_multicast_ifp = NULL;
1875			}
1876			break;
1877		}
1878
1879#ifdef __FreeBSD__
1880		PF_UNLOCK();
1881#endif
1882		if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL)
1883			return (EINVAL);
1884
1885#ifdef __FreeBSD__
1886		PF_LOCK();
1887#endif
1888		s = splnet();
1889#ifdef __FreeBSD__
1890		if (sifp->if_mtu < sc->sc_ifp->if_mtu ||
1891#else
1892		if (sifp->if_mtu < sc->sc_if.if_mtu ||
1893#endif
1894		    (sc->sc_sync_if != NULL &&
1895		    sifp->if_mtu < sc->sc_sync_if->if_mtu) ||
1896		    sifp->if_mtu < MCLBYTES - sizeof(struct ip))
1897			pfsync_sendout();
1898		sc->sc_sync_if = sifp;
1899
1900		if (imo->imo_num_memberships > 0) {
1901#ifdef __FreeBSD__
1902			PF_UNLOCK();
1903#endif
1904			in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
1905#ifdef __FreeBSD__
1906			PF_LOCK();
1907#endif
1908			imo->imo_multicast_ifp = NULL;
1909		}
1910
1911		if (sc->sc_sync_if &&
1912#ifdef __FreeBSD__
1913		    sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
1914#else
1915		    sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
1916#endif
1917			struct in_addr addr;
1918
1919			if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) {
1920				sc->sc_sync_if = NULL;
1921#ifdef __FreeBSD__
1922				PF_UNLOCK();
1923#endif
1924				splx(s);
1925				return (EADDRNOTAVAIL);
1926			}
1927
1928#ifdef __FreeBSD__
1929			addr.s_addr = htonl(INADDR_PFSYNC_GROUP);
1930#else
1931			addr.s_addr = INADDR_PFSYNC_GROUP;
1932#endif
1933
1934#ifdef __FreeBSD__
1935			PF_UNLOCK();
1936#endif
1937			if ((imo->imo_membership[0] =
1938			    in_addmulti(&addr, sc->sc_sync_if)) == NULL) {
1939				sc->sc_sync_if = NULL;
1940				splx(s);
1941				return (ENOBUFS);
1942			}
1943#ifdef __FreeBSD__
1944			PF_LOCK();
1945#endif
1946			imo->imo_num_memberships++;
1947			imo->imo_multicast_ifp = sc->sc_sync_if;
1948			imo->imo_multicast_ttl = PFSYNC_DFLTTL;
1949			imo->imo_multicast_loop = 0;
1950		}
1951
1952		ip = &sc->sc_template;
1953		bzero(ip, sizeof(*ip));
1954		ip->ip_v = IPVERSION;
1955		ip->ip_hl = sizeof(sc->sc_template) >> 2;
1956		ip->ip_tos = IPTOS_LOWDELAY;
1957		/* len and id are set later */
1958		ip->ip_off = htons(IP_DF);
1959		ip->ip_ttl = PFSYNC_DFLTTL;
1960		ip->ip_p = IPPROTO_PFSYNC;
1961		ip->ip_src.s_addr = INADDR_ANY;
1962		ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr;
1963
1964		if (sc->sc_sync_if) {
1965			/* Request a full state table update. */
1966			sc->sc_ureq_sent = time_uptime;
1967#if NCARP > 0
1968#ifdef notyet
1969#ifdef __FreeBSD__
1970			if (sc->pfsync_sync_ok)
1971#else
1972			if (pfsync_sync_ok)
1973#endif
1974				carp_group_demote_adj(&sc->sc_if, 1);
1975#endif
1976#endif
1977#ifdef __FreeBSD__
1978			sc->pfsync_sync_ok = 0;
1979#else
1980			pfsync_sync_ok = 0;
1981#endif
1982#ifdef __FreeBSD__
1983			if (V_pf_status.debug >= PF_DEBUG_MISC)
1984#else
1985			if (pf_status.debug >= PF_DEBUG_MISC)
1986#endif
1987				printf("pfsync: requesting bulk update\n");
1988#ifdef __FreeBSD__
1989				callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
1990				    pfsync_bulk_fail, V_pfsyncif);
1991#else
1992			timeout_add_sec(&sc->sc_bulkfail_tmo, 5);
1993#endif
1994			pfsync_request_update(0, 0);
1995		}
1996#ifdef __FreeBSD__
1997		PF_UNLOCK();
1998#endif
1999		splx(s);
2000
2001		break;
2002
2003	default:
2004		return (ENOTTY);
2005	}
2006
2007	return (0);
2008}
2009
2010int
2011pfsync_out_state(struct pf_state *st, struct mbuf *m, int offset)
2012{
2013	struct pfsync_state *sp = (struct pfsync_state *)(m->m_data + offset);
2014
2015	pfsync_state_export(sp, st);
2016
2017	return (sizeof(*sp));
2018}
2019
2020int
2021pfsync_out_iack(struct pf_state *st, struct mbuf *m, int offset)
2022{
2023	struct pfsync_ins_ack *iack =
2024	    (struct pfsync_ins_ack *)(m->m_data + offset);
2025
2026	iack->id = st->id;
2027	iack->creatorid = st->creatorid;
2028
2029	return (sizeof(*iack));
2030}
2031
2032int
2033pfsync_out_upd_c(struct pf_state *st, struct mbuf *m, int offset)
2034{
2035	struct pfsync_upd_c *up = (struct pfsync_upd_c *)(m->m_data + offset);
2036
2037	up->id = st->id;
2038	pf_state_peer_hton(&st->src, &up->src);
2039	pf_state_peer_hton(&st->dst, &up->dst);
2040	up->creatorid = st->creatorid;
2041
2042	up->expire = pf_state_expires(st);
2043	if (up->expire <= time_second)
2044		up->expire = htonl(0);
2045	else
2046		up->expire = htonl(up->expire - time_second);
2047	up->timeout = st->timeout;
2048
2049	bzero(up->_pad, sizeof(up->_pad)); /* XXX */
2050
2051	return (sizeof(*up));
2052}
2053
2054int
2055pfsync_out_del(struct pf_state *st, struct mbuf *m, int offset)
2056{
2057	struct pfsync_del_c *dp = (struct pfsync_del_c *)(m->m_data + offset);
2058
2059	dp->id = st->id;
2060	dp->creatorid = st->creatorid;
2061
2062	SET(st->state_flags, PFSTATE_NOSYNC);
2063
2064	return (sizeof(*dp));
2065}
2066
2067void
2068pfsync_drop(struct pfsync_softc *sc)
2069{
2070	struct pf_state *st;
2071	struct pfsync_upd_req_item *ur;
2072#ifdef notyet
2073	struct tdb *t;
2074#endif
2075	int q;
2076
2077	for (q = 0; q < PFSYNC_S_COUNT; q++) {
2078		if (TAILQ_EMPTY(&sc->sc_qs[q]))
2079			continue;
2080
2081		TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) {
2082#ifdef PFSYNC_DEBUG
2083#ifdef __FreeBSD__
2084			KASSERT(st->sync_state == q,
2085				("%s: st->sync_state == q",
2086					__FUNCTION__));
2087#else
2088			KASSERT(st->sync_state == q);
2089#endif
2090#endif
2091			st->sync_state = PFSYNC_S_NONE;
2092		}
2093		TAILQ_INIT(&sc->sc_qs[q]);
2094	}
2095
2096	while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
2097		TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
2098		pool_put(&sc->sc_pool, ur);
2099	}
2100
2101	sc->sc_plus = NULL;
2102
2103#ifdef notyet
2104	if (!TAILQ_EMPTY(&sc->sc_tdb_q)) {
2105		TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry)
2106			CLR(t->tdb_flags, TDBF_PFSYNC);
2107
2108		TAILQ_INIT(&sc->sc_tdb_q);
2109	}
2110#endif
2111
2112	sc->sc_len = PFSYNC_MINPKT;
2113}
2114
2115void
2116pfsync_sendout(void)
2117{
2118#ifdef __FreeBSD__
2119	struct pfsync_softc *sc = V_pfsyncif;
2120#else
2121	struct pfsync_softc *sc = pfsyncif;
2122#endif
2123#if NBPFILTER > 0
2124#ifdef __FreeBSD__
2125	struct ifnet *ifp = sc->sc_ifp;
2126#else
2127	struct ifnet *ifp = &sc->sc_if;
2128#endif
2129#endif
2130	struct mbuf *m;
2131	struct ip *ip;
2132	struct pfsync_header *ph;
2133	struct pfsync_subheader *subh;
2134	struct pf_state *st;
2135	struct pfsync_upd_req_item *ur;
2136#ifdef notyet
2137	struct tdb *t;
2138#endif
2139#ifdef __FreeBSD__
2140	size_t pktlen;
2141#endif
2142	int offset;
2143	int q, count = 0;
2144
2145#ifdef __FreeBSD__
2146	PF_ASSERT(MA_OWNED);
2147#else
2148	splassert(IPL_NET);
2149#endif
2150
2151	if (sc == NULL || sc->sc_len == PFSYNC_MINPKT)
2152		return;
2153
2154#if NBPFILTER > 0
2155	if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) {
2156#else
2157	if (sc->sc_sync_if == NULL) {
2158#endif
2159		pfsync_drop(sc);
2160		return;
2161	}
2162
2163	MGETHDR(m, M_DONTWAIT, MT_DATA);
2164	if (m == NULL) {
2165#ifdef __FreeBSD__
2166		sc->sc_ifp->if_oerrors++;
2167#else
2168		sc->sc_if.if_oerrors++;
2169#endif
2170		V_pfsyncstats.pfsyncs_onomem++;
2171		pfsync_drop(sc);
2172		return;
2173	}
2174
2175#ifdef __FreeBSD__
2176	pktlen = max_linkhdr + sc->sc_len;
2177	if (pktlen > MHLEN) {
2178		/* Find the right pool to allocate from. */
2179		/* XXX: This is ugly. */
2180		m_cljget(m, M_DONTWAIT, pktlen <= MSIZE ? MSIZE :
2181			pktlen <= MCLBYTES ? MCLBYTES :
2182#if MJUMPAGESIZE != MCLBYTES
2183			pktlen <= MJUMPAGESIZE ? MJUMPAGESIZE :
2184#endif
2185			pktlen <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES);
2186#else
2187	if (max_linkhdr + sc->sc_len > MHLEN) {
2188		MCLGETI(m, M_DONTWAIT, NULL, max_linkhdr + sc->sc_len);
2189#endif
2190		if (!ISSET(m->m_flags, M_EXT)) {
2191			m_free(m);
2192#ifdef __FreeBSD__
2193			sc->sc_ifp->if_oerrors++;
2194#else
2195			sc->sc_if.if_oerrors++;
2196#endif
2197			V_pfsyncstats.pfsyncs_onomem++;
2198			pfsync_drop(sc);
2199			return;
2200		}
2201	}
2202	m->m_data += max_linkhdr;
2203	m->m_len = m->m_pkthdr.len = sc->sc_len;
2204
2205	/* build the ip header */
2206	ip = (struct ip *)m->m_data;
2207	bcopy(&sc->sc_template, ip, sizeof(*ip));
2208	offset = sizeof(*ip);
2209
2210	ip->ip_len = htons(m->m_pkthdr.len);
2211	ip->ip_id = htons(ip_randomid());
2212
2213	/* build the pfsync header */
2214	ph = (struct pfsync_header *)(m->m_data + offset);
2215	bzero(ph, sizeof(*ph));
2216	offset += sizeof(*ph);
2217
2218	ph->version = PFSYNC_VERSION;
2219	ph->len = htons(sc->sc_len - sizeof(*ip));
2220#ifdef __FreeBSD__
2221	bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
2222#else
2223	bcopy(pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
2224#endif
2225
2226	/* walk the queues */
2227	for (q = 0; q < PFSYNC_S_COUNT; q++) {
2228		if (TAILQ_EMPTY(&sc->sc_qs[q]))
2229			continue;
2230
2231		subh = (struct pfsync_subheader *)(m->m_data + offset);
2232		offset += sizeof(*subh);
2233
2234		count = 0;
2235		TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) {
2236#ifdef PFSYNC_DEBUG
2237#ifdef __FreeBSD__
2238			KASSERT(st->sync_state == q,
2239				("%s: st->sync_state == q",
2240					__FUNCTION__));
2241#else
2242			KASSERT(st->sync_state == q);
2243#endif
2244#endif
2245
2246			offset += pfsync_qs[q].write(st, m, offset);
2247			st->sync_state = PFSYNC_S_NONE;
2248			count++;
2249		}
2250		TAILQ_INIT(&sc->sc_qs[q]);
2251
2252		bzero(subh, sizeof(*subh));
2253		subh->action = pfsync_qs[q].action;
2254		subh->count = htons(count);
2255	}
2256
2257	if (!TAILQ_EMPTY(&sc->sc_upd_req_list)) {
2258		subh = (struct pfsync_subheader *)(m->m_data + offset);
2259		offset += sizeof(*subh);
2260
2261		count = 0;
2262		while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
2263			TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
2264
2265			bcopy(&ur->ur_msg, m->m_data + offset,
2266			    sizeof(ur->ur_msg));
2267			offset += sizeof(ur->ur_msg);
2268
2269			pool_put(&sc->sc_pool, ur);
2270
2271			count++;
2272		}
2273
2274		bzero(subh, sizeof(*subh));
2275		subh->action = PFSYNC_ACT_UPD_REQ;
2276		subh->count = htons(count);
2277	}
2278
2279	/* has someone built a custom region for us to add? */
2280	if (sc->sc_plus != NULL) {
2281		bcopy(sc->sc_plus, m->m_data + offset, sc->sc_pluslen);
2282		offset += sc->sc_pluslen;
2283
2284		sc->sc_plus = NULL;
2285	}
2286
2287#ifdef notyet
2288	if (!TAILQ_EMPTY(&sc->sc_tdb_q)) {
2289		subh = (struct pfsync_subheader *)(m->m_data + offset);
2290		offset += sizeof(*subh);
2291
2292		count = 0;
2293		TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry) {
2294			offset += pfsync_out_tdb(t, m, offset);
2295			CLR(t->tdb_flags, TDBF_PFSYNC);
2296
2297			count++;
2298		}
2299		TAILQ_INIT(&sc->sc_tdb_q);
2300
2301		bzero(subh, sizeof(*subh));
2302		subh->action = PFSYNC_ACT_TDB;
2303		subh->count = htons(count);
2304	}
2305#endif
2306
2307	subh = (struct pfsync_subheader *)(m->m_data + offset);
2308	offset += sizeof(*subh);
2309
2310	bzero(subh, sizeof(*subh));
2311	subh->action = PFSYNC_ACT_EOF;
2312	subh->count = htons(1);
2313
2314	/* XXX write checksum in EOF here */
2315
2316	/* we're done, let's put it on the wire */
2317#if NBPFILTER > 0
2318	if (ifp->if_bpf) {
2319		m->m_data += sizeof(*ip);
2320		m->m_len = m->m_pkthdr.len = sc->sc_len - sizeof(*ip);
2321#ifdef __FreeBSD__
2322		BPF_MTAP(ifp, m);
2323#else
2324		bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2325#endif
2326		m->m_data -= sizeof(*ip);
2327		m->m_len = m->m_pkthdr.len = sc->sc_len;
2328	}
2329
2330	if (sc->sc_sync_if == NULL) {
2331		sc->sc_len = PFSYNC_MINPKT;
2332		m_freem(m);
2333		return;
2334	}
2335#endif
2336
2337#ifdef __FreeBSD__
2338	sc->sc_ifp->if_opackets++;
2339	sc->sc_ifp->if_obytes += m->m_pkthdr.len;
2340#else
2341	sc->sc_if.if_opackets++;
2342	sc->sc_if.if_obytes += m->m_pkthdr.len;
2343#endif
2344
2345#ifdef __FreeBSD__
2346	PF_UNLOCK();
2347#endif
2348	if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL) == 0)
2349#ifdef __FreeBSD__
2350	{
2351		PF_LOCK();
2352#endif
2353		V_pfsyncstats.pfsyncs_opackets++;
2354#ifdef __FreeBSD__
2355	}
2356#endif
2357	else
2358#ifdef __FreeBSD__
2359	{
2360		PF_LOCK();
2361#endif
2362		V_pfsyncstats.pfsyncs_oerrors++;
2363#ifdef __FreeBSD__
2364	}
2365#endif
2366
2367	/* start again */
2368	sc->sc_len = PFSYNC_MINPKT;
2369}
2370
2371void
2372pfsync_insert_state(struct pf_state *st)
2373{
2374#ifdef __FreeBSD__
2375	struct pfsync_softc *sc = V_pfsyncif;
2376#else
2377	struct pfsync_softc *sc = pfsyncif;
2378#endif
2379
2380#ifdef __FreeBSD__
2381	PF_ASSERT(MA_OWNED);
2382#else
2383	splassert(IPL_SOFTNET);
2384#endif
2385
2386	if (ISSET(st->rule.ptr->rule_flag, PFRULE_NOSYNC) ||
2387	    st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) {
2388		SET(st->state_flags, PFSTATE_NOSYNC);
2389		return;
2390	}
2391
2392	if (sc == NULL || ISSET(st->state_flags, PFSTATE_NOSYNC))
2393		return;
2394
2395#ifdef PFSYNC_DEBUG
2396#ifdef __FreeBSD__
2397	KASSERT(st->sync_state == PFSYNC_S_NONE,
2398		("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__));
2399#else
2400	KASSERT(st->sync_state == PFSYNC_S_NONE);
2401#endif
2402#endif
2403
2404	if (sc->sc_len == PFSYNC_MINPKT)
2405#ifdef __FreeBSD__
2406		callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2407		    V_pfsyncif);
2408#else
2409		timeout_add_sec(&sc->sc_tmo, 1);
2410#endif
2411
2412	pfsync_q_ins(st, PFSYNC_S_INS);
2413
2414	if (ISSET(st->state_flags, PFSTATE_ACK))
2415		schednetisr(NETISR_PFSYNC);
2416	else
2417		st->sync_updates = 0;
2418}
2419
2420int defer = 10;
2421
2422int
2423pfsync_defer(struct pf_state *st, struct mbuf *m)
2424{
2425#ifdef __FreeBSD__
2426	struct pfsync_softc *sc = V_pfsyncif;
2427#else
2428	struct pfsync_softc *sc = pfsyncif;
2429#endif
2430	struct pfsync_deferral *pd;
2431
2432#ifdef __FreeBSD__
2433	PF_ASSERT(MA_OWNED);
2434#else
2435	splassert(IPL_SOFTNET);
2436#endif
2437
2438	if (sc->sc_deferred >= 128)
2439		pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
2440
2441	pd = pool_get(&sc->sc_pool, M_NOWAIT);
2442	if (pd == NULL)
2443		return (0);
2444	sc->sc_deferred++;
2445
2446#ifdef __FreeBSD__
2447	m->m_flags |= M_SKIP_FIREWALL;
2448#else
2449	m->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
2450#endif
2451	SET(st->state_flags, PFSTATE_ACK);
2452
2453	pd->pd_st = st;
2454	pd->pd_m = m;
2455
2456	TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry);
2457#ifdef __FreeBSD__
2458	callout_init(&pd->pd_tmo, CALLOUT_MPSAFE);
2459	callout_reset(&pd->pd_tmo, defer, pfsync_defer_tmo,
2460		pd);
2461#else
2462	timeout_set(&pd->pd_tmo, pfsync_defer_tmo, pd);
2463	timeout_add(&pd->pd_tmo, defer);
2464#endif
2465
2466	return (1);
2467}
2468
2469void
2470pfsync_undefer(struct pfsync_deferral *pd, int drop)
2471{
2472#ifdef __FreeBSD__
2473	struct pfsync_softc *sc = V_pfsyncif;
2474#else
2475	struct pfsync_softc *sc = pfsyncif;
2476#endif
2477	int s;
2478
2479#ifdef __FreeBSD__
2480	PF_ASSERT(MA_OWNED);
2481#else
2482	splassert(IPL_SOFTNET);
2483#endif
2484
2485	TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry);
2486	sc->sc_deferred--;
2487
2488	CLR(pd->pd_st->state_flags, PFSTATE_ACK);
2489	timeout_del(&pd->pd_tmo); /* bah */
2490	if (drop)
2491		m_freem(pd->pd_m);
2492	else {
2493		s = splnet();
2494#ifdef __FreeBSD__
2495		/* XXX: use pf_defered?! */
2496		PF_UNLOCK();
2497#endif
2498		ip_output(pd->pd_m, (void *)NULL, (void *)NULL, 0,
2499		    (void *)NULL, (void *)NULL);
2500#ifdef __FreeBSD__
2501		PF_LOCK();
2502#endif
2503		splx(s);
2504	}
2505
2506	pool_put(&sc->sc_pool, pd);
2507}
2508
2509void
2510pfsync_defer_tmo(void *arg)
2511{
2512#if defined(__FreeBSD__) && defined(VIMAGE)
2513	struct pfsync_deferral *pd = arg;
2514#endif
2515	int s;
2516
2517	s = splsoftnet();
2518#ifdef __FreeBSD__
2519	CURVNET_SET(pd->pd_m->m_pkthdr.rcvif->if_vnet); /* XXX */
2520	PF_LOCK();
2521#endif
2522	pfsync_undefer(arg, 0);
2523#ifdef __FreeBSD__
2524	PF_UNLOCK();
2525	CURVNET_RESTORE();
2526#endif
2527	splx(s);
2528}
2529
2530void
2531pfsync_deferred(struct pf_state *st, int drop)
2532{
2533#ifdef __FreeBSD__
2534	struct pfsync_softc *sc = V_pfsyncif;
2535#else
2536	struct pfsync_softc *sc = pfsyncif;
2537#endif
2538	struct pfsync_deferral *pd;
2539
2540	TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) {
2541		 if (pd->pd_st == st) {
2542			pfsync_undefer(pd, drop);
2543			return;
2544		}
2545	}
2546
2547	panic("pfsync_send_deferred: unable to find deferred state");
2548}
2549
2550u_int pfsync_upds = 0;
2551
2552void
2553pfsync_update_state(struct pf_state *st)
2554{
2555#ifdef __FreeBSD__
2556	struct pfsync_softc *sc = V_pfsyncif;
2557#else
2558	struct pfsync_softc *sc = pfsyncif;
2559#endif
2560	int sync = 0;
2561
2562#ifdef __FreeBSD__
2563	PF_ASSERT(MA_OWNED);
2564#else
2565	splassert(IPL_SOFTNET);
2566#endif
2567
2568	if (sc == NULL)
2569		return;
2570
2571	if (ISSET(st->state_flags, PFSTATE_ACK))
2572		pfsync_deferred(st, 0);
2573	if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2574		if (st->sync_state != PFSYNC_S_NONE)
2575			pfsync_q_del(st);
2576		return;
2577	}
2578
2579	if (sc->sc_len == PFSYNC_MINPKT)
2580#ifdef __FreeBSD__
2581		callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2582		    V_pfsyncif);
2583#else
2584		timeout_add_sec(&sc->sc_tmo, 1);
2585#endif
2586
2587	switch (st->sync_state) {
2588	case PFSYNC_S_UPD_C:
2589	case PFSYNC_S_UPD:
2590	case PFSYNC_S_INS:
2591		/* we're already handling it */
2592
2593		st->sync_updates++;
2594		if (st->sync_updates >= sc->sc_maxupdates)
2595			sync = 1;
2596		break;
2597
2598	case PFSYNC_S_IACK:
2599		pfsync_q_del(st);
2600	case PFSYNC_S_NONE:
2601		pfsync_q_ins(st, PFSYNC_S_UPD_C);
2602		st->sync_updates = 0;
2603		break;
2604
2605	default:
2606		panic("pfsync_update_state: unexpected sync state %d",
2607		    st->sync_state);
2608	}
2609
2610	if (sync || (time_second - st->pfsync_time) < 2) {
2611		pfsync_upds++;
2612		schednetisr(NETISR_PFSYNC);
2613	}
2614}
2615
2616void
2617pfsync_request_update(u_int32_t creatorid, u_int64_t id)
2618{
2619#ifdef __FreeBSD__
2620	struct pfsync_softc *sc = V_pfsyncif;
2621#else
2622	struct pfsync_softc *sc = pfsyncif;
2623#endif
2624	struct pfsync_upd_req_item *item;
2625	size_t nlen = sizeof(struct pfsync_upd_req);
2626	int s;
2627
2628	/*
2629	 * this code does nothing to prevent multiple update requests for the
2630	 * same state being generated.
2631	 */
2632
2633	item = pool_get(&sc->sc_pool, PR_NOWAIT);
2634	if (item == NULL) {
2635		/* XXX stats */
2636		return;
2637	}
2638
2639	item->ur_msg.id = id;
2640	item->ur_msg.creatorid = creatorid;
2641
2642	if (TAILQ_EMPTY(&sc->sc_upd_req_list))
2643		nlen += sizeof(struct pfsync_subheader);
2644
2645#ifdef __FreeBSD__
2646	if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
2647#else
2648	if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2649#endif
2650		s = splnet();
2651		pfsync_sendout();
2652		splx(s);
2653
2654		nlen = sizeof(struct pfsync_subheader) +
2655		    sizeof(struct pfsync_upd_req);
2656	}
2657
2658	TAILQ_INSERT_TAIL(&sc->sc_upd_req_list, item, ur_entry);
2659	sc->sc_len += nlen;
2660
2661	schednetisr(NETISR_PFSYNC);
2662}
2663
2664void
2665pfsync_update_state_req(struct pf_state *st)
2666{
2667#ifdef __FreeBSD__
2668	struct pfsync_softc *sc = V_pfsyncif;
2669#else
2670	struct pfsync_softc *sc = pfsyncif;
2671#endif
2672
2673	if (sc == NULL)
2674		panic("pfsync_update_state_req: nonexistant instance");
2675
2676	if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2677		if (st->sync_state != PFSYNC_S_NONE)
2678			pfsync_q_del(st);
2679		return;
2680	}
2681
2682	switch (st->sync_state) {
2683	case PFSYNC_S_UPD_C:
2684	case PFSYNC_S_IACK:
2685		pfsync_q_del(st);
2686	case PFSYNC_S_NONE:
2687		pfsync_q_ins(st, PFSYNC_S_UPD);
2688		schednetisr(NETISR_PFSYNC);
2689		return;
2690
2691	case PFSYNC_S_INS:
2692	case PFSYNC_S_UPD:
2693	case PFSYNC_S_DEL:
2694		/* we're already handling it */
2695		return;
2696
2697	default:
2698		panic("pfsync_update_state_req: unexpected sync state %d",
2699		    st->sync_state);
2700	}
2701}
2702
2703void
2704pfsync_delete_state(struct pf_state *st)
2705{
2706#ifdef __FreeBSD__
2707	struct pfsync_softc *sc = V_pfsyncif;
2708#else
2709	struct pfsync_softc *sc = pfsyncif;
2710#endif
2711
2712#ifdef __FreeBSD__
2713	PF_ASSERT(MA_OWNED);
2714#else
2715	splassert(IPL_SOFTNET);
2716#endif
2717
2718	if (sc == NULL)
2719		return;
2720
2721	if (ISSET(st->state_flags, PFSTATE_ACK))
2722		pfsync_deferred(st, 1);
2723	if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2724		if (st->sync_state != PFSYNC_S_NONE)
2725			pfsync_q_del(st);
2726		return;
2727	}
2728
2729	if (sc->sc_len == PFSYNC_MINPKT)
2730#ifdef __FreeBSD__
2731		callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2732		    V_pfsyncif);
2733#else
2734		timeout_add_sec(&sc->sc_tmo, 1);
2735#endif
2736
2737	switch (st->sync_state) {
2738	case PFSYNC_S_INS:
2739		/* we never got to tell the world so just forget about it */
2740		pfsync_q_del(st);
2741		return;
2742
2743	case PFSYNC_S_UPD_C:
2744	case PFSYNC_S_UPD:
2745	case PFSYNC_S_IACK:
2746		pfsync_q_del(st);
2747		/* FALLTHROUGH to putting it on the del list */
2748
2749	case PFSYNC_S_NONE:
2750		pfsync_q_ins(st, PFSYNC_S_DEL);
2751		return;
2752
2753	default:
2754		panic("pfsync_delete_state: unexpected sync state %d",
2755		    st->sync_state);
2756	}
2757}
2758
2759void
2760pfsync_clear_states(u_int32_t creatorid, const char *ifname)
2761{
2762	struct {
2763		struct pfsync_subheader subh;
2764		struct pfsync_clr clr;
2765	} __packed r;
2766
2767#ifdef __FreeBSD__
2768	struct pfsync_softc *sc = V_pfsyncif;
2769#else
2770	struct pfsync_softc *sc = pfsyncif;
2771#endif
2772
2773#ifdef __FreeBSD__
2774	PF_ASSERT(MA_OWNED);
2775#else
2776	splassert(IPL_SOFTNET);
2777#endif
2778
2779	if (sc == NULL)
2780		return;
2781
2782	bzero(&r, sizeof(r));
2783
2784	r.subh.action = PFSYNC_ACT_CLR;
2785	r.subh.count = htons(1);
2786
2787	strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname));
2788	r.clr.creatorid = creatorid;
2789
2790	pfsync_send_plus(&r, sizeof(r));
2791}
2792
2793void
2794pfsync_q_ins(struct pf_state *st, int q)
2795{
2796#ifdef __FreeBSD__
2797	struct pfsync_softc *sc = V_pfsyncif;
2798#else
2799	struct pfsync_softc *sc = pfsyncif;
2800#endif
2801	size_t nlen = pfsync_qs[q].len;
2802	int s;
2803
2804#ifdef __FreeBSD__
2805	KASSERT(st->sync_state == PFSYNC_S_NONE,
2806		("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__));
2807#else
2808	KASSERT(st->sync_state == PFSYNC_S_NONE);
2809#endif
2810
2811#if 1 || defined(PFSYNC_DEBUG)
2812	if (sc->sc_len < PFSYNC_MINPKT)
2813#ifdef __FreeBSD__
2814		panic("pfsync pkt len is too low %zu", sc->sc_len);
2815#else
2816		panic("pfsync pkt len is too low %d", sc->sc_len);
2817#endif
2818#endif
2819	if (TAILQ_EMPTY(&sc->sc_qs[q]))
2820		nlen += sizeof(struct pfsync_subheader);
2821
2822#ifdef __FreeBSD__
2823	if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
2824#else
2825	if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2826#endif
2827		s = splnet();
2828#ifdef __FreeBSD__
2829		PF_LOCK();
2830#endif
2831		pfsync_sendout();
2832#ifdef __FreeBSD__
2833		PF_UNLOCK();
2834#endif
2835		splx(s);
2836
2837		nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len;
2838	}
2839
2840	sc->sc_len += nlen;
2841	TAILQ_INSERT_TAIL(&sc->sc_qs[q], st, sync_list);
2842	st->sync_state = q;
2843}
2844
2845void
2846pfsync_q_del(struct pf_state *st)
2847{
2848#ifdef __FreeBSD__
2849	struct pfsync_softc *sc = V_pfsyncif;
2850#else
2851	struct pfsync_softc *sc = pfsyncif;
2852#endif
2853	int q = st->sync_state;
2854
2855#ifdef __FreeBSD__
2856	KASSERT(st->sync_state != PFSYNC_S_NONE,
2857		("%s: st->sync_state != PFSYNC_S_NONE", __FUNCTION__));
2858#else
2859	KASSERT(st->sync_state != PFSYNC_S_NONE);
2860#endif
2861
2862	sc->sc_len -= pfsync_qs[q].len;
2863	TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list);
2864	st->sync_state = PFSYNC_S_NONE;
2865
2866	if (TAILQ_EMPTY(&sc->sc_qs[q]))
2867		sc->sc_len -= sizeof(struct pfsync_subheader);
2868}
2869
2870#ifdef notyet
2871void
2872pfsync_update_tdb(struct tdb *t, int output)
2873{
2874#ifdef __FreeBSD__
2875	struct pfsync_softc *sc = V_pfsyncif;
2876#else
2877	struct pfsync_softc *sc = pfsyncif;
2878#endif
2879	size_t nlen = sizeof(struct pfsync_tdb);
2880	int s;
2881
2882	if (sc == NULL)
2883		return;
2884
2885	if (!ISSET(t->tdb_flags, TDBF_PFSYNC)) {
2886		if (TAILQ_EMPTY(&sc->sc_tdb_q))
2887			nlen += sizeof(struct pfsync_subheader);
2888
2889		if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2890			s = splnet();
2891			pfsync_sendout();
2892			splx(s);
2893
2894			nlen = sizeof(struct pfsync_subheader) +
2895			    sizeof(struct pfsync_tdb);
2896		}
2897
2898		sc->sc_len += nlen;
2899		TAILQ_INSERT_TAIL(&sc->sc_tdb_q, t, tdb_sync_entry);
2900		SET(t->tdb_flags, TDBF_PFSYNC);
2901		t->tdb_updates = 0;
2902	} else {
2903		if (++t->tdb_updates >= sc->sc_maxupdates)
2904			schednetisr(NETISR_PFSYNC);
2905	}
2906
2907	if (output)
2908		SET(t->tdb_flags, TDBF_PFSYNC_RPL);
2909	else
2910		CLR(t->tdb_flags, TDBF_PFSYNC_RPL);
2911}
2912
2913void
2914pfsync_delete_tdb(struct tdb *t)
2915{
2916#ifdef __FreeBSD__
2917	struct pfsync_softc *sc = V_pfsyncif;
2918#else
2919	struct pfsync_softc *sc = pfsyncif;
2920#endif
2921
2922	if (sc == NULL || !ISSET(t->tdb_flags, TDBF_PFSYNC))
2923		return;
2924
2925	sc->sc_len -= sizeof(struct pfsync_tdb);
2926	TAILQ_REMOVE(&sc->sc_tdb_q, t, tdb_sync_entry);
2927	CLR(t->tdb_flags, TDBF_PFSYNC);
2928
2929	if (TAILQ_EMPTY(&sc->sc_tdb_q))
2930		sc->sc_len -= sizeof(struct pfsync_subheader);
2931}
2932
2933int
2934pfsync_out_tdb(struct tdb *t, struct mbuf *m, int offset)
2935{
2936	struct pfsync_tdb *ut = (struct pfsync_tdb *)(m->m_data + offset);
2937
2938	bzero(ut, sizeof(*ut));
2939	ut->spi = t->tdb_spi;
2940	bcopy(&t->tdb_dst, &ut->dst, sizeof(ut->dst));
2941	/*
2942	 * When a failover happens, the master's rpl is probably above
2943	 * what we see here (we may be up to a second late), so
2944	 * increase it a bit for outbound tdbs to manage most such
2945	 * situations.
2946	 *
2947	 * For now, just add an offset that is likely to be larger
2948	 * than the number of packets we can see in one second. The RFC
2949	 * just says the next packet must have a higher seq value.
2950	 *
2951	 * XXX What is a good algorithm for this? We could use
2952	 * a rate-determined increase, but to know it, we would have
2953	 * to extend struct tdb.
2954	 * XXX pt->rpl can wrap over MAXINT, but if so the real tdb
2955	 * will soon be replaced anyway. For now, just don't handle
2956	 * this edge case.
2957	 */
2958#define RPL_INCR 16384
2959	ut->rpl = htonl(t->tdb_rpl + (ISSET(t->tdb_flags, TDBF_PFSYNC_RPL) ?
2960	    RPL_INCR : 0));
2961	ut->cur_bytes = htobe64(t->tdb_cur_bytes);
2962	ut->sproto = t->tdb_sproto;
2963
2964	return (sizeof(*ut));
2965}
2966#endif
2967
2968void
2969pfsync_bulk_start(void)
2970{
2971#ifdef __FreeBSD__
2972	struct pfsync_softc *sc = V_pfsyncif;
2973#else
2974	struct pfsync_softc *sc = pfsyncif;
2975#endif
2976
2977	sc->sc_ureq_received = time_uptime;
2978
2979	if (sc->sc_bulk_next == NULL)
2980#ifdef __FreeBSD__
2981		sc->sc_bulk_next = TAILQ_FIRST(&V_state_list);
2982#else
2983		sc->sc_bulk_next = TAILQ_FIRST(&state_list);
2984#endif
2985	sc->sc_bulk_last = sc->sc_bulk_next;
2986
2987#ifdef __FreeBSD__
2988	if (V_pf_status.debug >= PF_DEBUG_MISC)
2989#else
2990	if (pf_status.debug >= PF_DEBUG_MISC)
2991#endif
2992		printf("pfsync: received bulk update request\n");
2993
2994	pfsync_bulk_status(PFSYNC_BUS_START);
2995	pfsync_bulk_update(sc);
2996}
2997
2998void
2999pfsync_bulk_update(void *arg)
3000{
3001	struct pfsync_softc *sc = arg;
3002	struct pf_state *st = sc->sc_bulk_next;
3003	int i = 0;
3004	int s;
3005
3006	s = splsoftnet();
3007#ifdef __FreeBSD__
3008	CURVNET_SET(sc->sc_ifp->if_vnet);
3009	PF_LOCK();
3010#endif
3011	do {
3012		if (st->sync_state == PFSYNC_S_NONE &&
3013		    st->timeout < PFTM_MAX &&
3014		    st->pfsync_time <= sc->sc_ureq_received) {
3015			pfsync_update_state_req(st);
3016			i++;
3017		}
3018
3019		st = TAILQ_NEXT(st, entry_list);
3020		if (st == NULL)
3021#ifdef __FreeBSD__
3022			st = TAILQ_FIRST(&V_state_list);
3023#else
3024			st = TAILQ_FIRST(&state_list);
3025#endif
3026
3027		if (i > 0 && TAILQ_EMPTY(&sc->sc_qs[PFSYNC_S_UPD])) {
3028			sc->sc_bulk_next = st;
3029#ifdef __FreeBSD__
3030			callout_reset(&sc->sc_bulk_tmo, 1,
3031			    pfsync_bulk_fail, sc);
3032#else
3033			timeout_add(&sc->sc_bulk_tmo, 1);
3034#endif
3035			goto out;
3036		}
3037	} while (st != sc->sc_bulk_last);
3038
3039	/* we're done */
3040	sc->sc_bulk_next = NULL;
3041	sc->sc_bulk_last = NULL;
3042	pfsync_bulk_status(PFSYNC_BUS_END);
3043
3044out:
3045#ifdef __FreeBSD__
3046	PF_UNLOCK();
3047	CURVNET_RESTORE();
3048#endif
3049	splx(s);
3050}
3051
3052void
3053pfsync_bulk_status(u_int8_t status)
3054{
3055	struct {
3056		struct pfsync_subheader subh;
3057		struct pfsync_bus bus;
3058	} __packed r;
3059
3060#ifdef __FreeBSD__
3061	struct pfsync_softc *sc = V_pfsyncif;
3062#else
3063	struct pfsync_softc *sc = pfsyncif;
3064#endif
3065
3066	bzero(&r, sizeof(r));
3067
3068	r.subh.action = PFSYNC_ACT_BUS;
3069	r.subh.count = htons(1);
3070
3071#ifdef __FreeBSD__
3072	r.bus.creatorid = V_pf_status.hostid;
3073#else
3074	r.bus.creatorid = pf_status.hostid;
3075#endif
3076	r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received);
3077	r.bus.status = status;
3078
3079	pfsync_send_plus(&r, sizeof(r));
3080}
3081
3082void
3083pfsync_bulk_fail(void *arg)
3084{
3085	struct pfsync_softc *sc = arg;
3086
3087#ifdef __FreeBSD__
3088	CURVNET_SET(sc->sc_ifp->if_vnet);
3089#endif
3090
3091	if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
3092		/* Try again */
3093#ifdef __FreeBSD__
3094		callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
3095		    pfsync_bulk_fail, V_pfsyncif);
3096#else
3097		timeout_add_sec(&sc->sc_bulkfail_tmo, 5);
3098#endif
3099		pfsync_request_update(0, 0);
3100	} else {
3101		/* Pretend like the transfer was ok */
3102		sc->sc_ureq_sent = 0;
3103		sc->sc_bulk_tries = 0;
3104#if NCARP > 0
3105#ifdef notyet
3106#ifdef __FreeBSD__
3107		if (!sc->pfsync_sync_ok)
3108#else
3109		if (!pfsync_sync_ok)
3110#endif
3111			carp_group_demote_adj(&sc->sc_if, -1);
3112#endif
3113#endif
3114#ifdef __FreeBSD__
3115		sc->pfsync_sync_ok = 1;
3116#else
3117		pfsync_sync_ok = 1;
3118#endif
3119#ifdef __FreeBSD__
3120		if (V_pf_status.debug >= PF_DEBUG_MISC)
3121#else
3122		if (pf_status.debug >= PF_DEBUG_MISC)
3123#endif
3124			printf("pfsync: failed to receive bulk update\n");
3125	}
3126
3127#ifdef __FreeBSD__
3128	CURVNET_RESTORE();
3129#endif
3130}
3131
3132void
3133pfsync_send_plus(void *plus, size_t pluslen)
3134{
3135#ifdef __FreeBSD__
3136	struct pfsync_softc *sc = V_pfsyncif;
3137#else
3138	struct pfsync_softc *sc = pfsyncif;
3139#endif
3140	int s;
3141
3142#ifdef __FreeBSD__
3143	if (sc->sc_len + pluslen > sc->sc_ifp->if_mtu) {
3144#else
3145	if (sc->sc_len + pluslen > sc->sc_if.if_mtu) {
3146#endif
3147		s = splnet();
3148#ifdef __FreeBSD__
3149		PF_LOCK();
3150#endif
3151		pfsync_sendout();
3152#ifdef __FreeBSD__
3153		PF_UNLOCK();
3154#endif
3155		splx(s);
3156	}
3157
3158	sc->sc_plus = plus;
3159	sc->sc_len += (sc->sc_pluslen = pluslen);
3160
3161	s = splnet();
3162#ifdef __FreeBSD__
3163	PF_LOCK();
3164#endif
3165	pfsync_sendout();
3166#ifdef __FreeBSD__
3167	PF_UNLOCK();
3168#endif
3169	splx(s);
3170}
3171
3172int
3173pfsync_up(void)
3174{
3175#ifdef __FreeBSD__
3176	struct pfsync_softc *sc = V_pfsyncif;
3177#else
3178	struct pfsync_softc *sc = pfsyncif;
3179#endif
3180
3181#ifdef __FreeBSD__
3182	if (sc == NULL || !ISSET(sc->sc_ifp->if_flags, IFF_DRV_RUNNING))
3183#else
3184	if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING))
3185#endif
3186		return (0);
3187
3188	return (1);
3189}
3190
3191int
3192pfsync_state_in_use(struct pf_state *st)
3193{
3194#ifdef __FreeBSD__
3195	struct pfsync_softc *sc = V_pfsyncif;
3196#else
3197	struct pfsync_softc *sc = pfsyncif;
3198#endif
3199
3200	if (sc == NULL)
3201		return (0);
3202
3203	if (st->sync_state != PFSYNC_S_NONE)
3204		return (1);
3205
3206	if (sc->sc_bulk_next == NULL && sc->sc_bulk_last == NULL)
3207		return (0);
3208
3209	return (1);
3210}
3211
3212u_int pfsync_ints;
3213u_int pfsync_tmos;
3214
3215void
3216pfsync_timeout(void *arg)
3217{
3218#if defined(__FreeBSD__) && defined(VIMAGE)
3219	struct pfsync_softc *sc = arg;
3220#endif
3221	int s;
3222
3223#ifdef __FreeBSD__
3224	CURVNET_SET(sc->sc_ifp->if_vnet);
3225#endif
3226
3227	pfsync_tmos++;
3228
3229	s = splnet();
3230#ifdef __FreeBSD__
3231	PF_LOCK();
3232#endif
3233	pfsync_sendout();
3234#ifdef __FreeBSD__
3235	PF_UNLOCK();
3236#endif
3237	splx(s);
3238
3239#ifdef __FreeBSD__
3240	CURVNET_RESTORE();
3241#endif
3242}
3243
3244/* this is a softnet/netisr handler */
3245void
3246#ifdef __FreeBSD__
3247pfsyncintr(void *arg)
3248#else
3249pfsyncintr(void)
3250#endif
3251{
3252#ifdef __FreeBSD__
3253	struct pfsync_softc *sc = arg;
3254#endif
3255	int s;
3256
3257#ifdef __FreeBSD__
3258	if (sc == NULL)
3259		return;
3260
3261	CURVNET_SET(sc->sc_ifp->if_vnet);
3262#endif
3263	pfsync_ints++;
3264
3265	s = splnet();
3266#ifdef __FreeBSD__
3267	PF_LOCK();
3268#endif
3269	pfsync_sendout();
3270#ifdef __FreeBSD__
3271	PF_UNLOCK();
3272#endif
3273	splx(s);
3274
3275#ifdef __FreeBSD__
3276	CURVNET_RESTORE();
3277#endif
3278}
3279
3280int
3281pfsync_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
3282    size_t newlen)
3283{
3284
3285#ifdef notyet
3286	/* All sysctl names at this level are terminal. */
3287	if (namelen != 1)
3288		return (ENOTDIR);
3289
3290	switch (name[0]) {
3291	case PFSYNCCTL_STATS:
3292		if (newp != NULL)
3293			return (EPERM);
3294		return (sysctl_struct(oldp, oldlenp, newp, newlen,
3295		    &V_pfsyncstats, sizeof(V_pfsyncstats)));
3296	}
3297#endif
3298	return (ENOPROTOOPT);
3299}
3300
3301#ifdef __FreeBSD__
3302void
3303pfsync_ifdetach(void *arg, struct ifnet *ifp)
3304{
3305	struct pfsync_softc *sc = (struct pfsync_softc *)arg;
3306	struct ip_moptions *imo;
3307
3308	if (sc == NULL || sc->sc_sync_if != ifp)
3309		return;         /* not for us; unlocked read */
3310
3311	CURVNET_SET(sc->sc_ifp->if_vnet);
3312
3313	PF_LOCK();
3314
3315	/* Deal with a member interface going away from under us. */
3316	sc->sc_sync_if = NULL;
3317	imo = &sc->sc_imo;
3318	if (imo->imo_num_memberships > 0) {
3319		KASSERT(imo->imo_num_memberships == 1,
3320		    ("%s: imo_num_memberships != 1", __func__));
3321		/*
3322		 * Our event handler is always called after protocol
3323		 * domains have been detached from the underlying ifnet.
3324		 * Do not call in_delmulti(); we held a single reference
3325		 * which the protocol domain has purged in in_purgemaddrs().
3326		 */
3327		PF_UNLOCK();
3328		imo->imo_membership[--imo->imo_num_memberships] = NULL;
3329		PF_LOCK();
3330		imo->imo_multicast_ifp = NULL;
3331	}
3332
3333	PF_UNLOCK();
3334
3335	CURVNET_RESTORE();
3336}
3337
3338static int
3339vnet_pfsync_init(const void *unused)
3340{
3341	int error = 0;
3342
3343	pfsyncattach(0);
3344
3345	error = swi_add(NULL, "pfsync", pfsyncintr, V_pfsyncif,
3346		SWI_NET, INTR_MPSAFE, &pfsync_swi.pfsync_swi_cookie);
3347	if (error)
3348		panic("%s: swi_add %d", __func__, error);
3349
3350	pfsync_state_import_ptr = pfsync_state_import;
3351	pfsync_up_ptr = pfsync_up;
3352	pfsync_insert_state_ptr = pfsync_insert_state;
3353	pfsync_update_state_ptr = pfsync_update_state;
3354	pfsync_delete_state_ptr = pfsync_delete_state;
3355	pfsync_clear_states_ptr = pfsync_clear_states;
3356	pfsync_state_in_use_ptr = pfsync_state_in_use;
3357	pfsync_defer_ptr = pfsync_defer;
3358
3359	return (0);
3360}
3361
3362static int
3363vnet_pfsync_uninit(const void *unused)
3364{
3365
3366	swi_remove(pfsync_swi.pfsync_swi_cookie);
3367
3368	pfsync_state_import_ptr = NULL;
3369	pfsync_up_ptr = NULL;
3370	pfsync_insert_state_ptr = NULL;
3371	pfsync_update_state_ptr = NULL;
3372	pfsync_delete_state_ptr = NULL;
3373	pfsync_clear_states_ptr = NULL;
3374	pfsync_state_in_use_ptr = NULL;
3375	pfsync_defer_ptr = NULL;
3376
3377	if_clone_detach(&pfsync_cloner);
3378
3379	return (0);
3380}
3381
3382/* Define startup order. */
3383#define	PFSYNC_SYSINIT_ORDER	SI_SUB_PROTO_BEGIN
3384#define	PFSYNC_MODEVENT_ORDER	(SI_ORDER_FIRST) /* On boot slot in here. */
3385#define	PFSYNC_VNET_ORDER	(PFSYNC_MODEVENT_ORDER + 2) /* Later still. */
3386
3387/*
3388 * Starting up.
3389 * VNET_SYSINIT is called for each existing vnet and each new vnet.
3390 */
3391VNET_SYSINIT(vnet_pfsync_init, PFSYNC_SYSINIT_ORDER, PFSYNC_VNET_ORDER,
3392    vnet_pfsync_init, NULL);
3393
3394/*
3395 * Closing up shop. These are done in REVERSE ORDER,
3396 * Not called on reboot.
3397 * VNET_SYSUNINIT is called for each exiting vnet as it exits.
3398 */
3399VNET_SYSUNINIT(vnet_pfsync_uninit, PFSYNC_SYSINIT_ORDER, PFSYNC_VNET_ORDER,
3400    vnet_pfsync_uninit, NULL);
3401static int
3402pfsync_modevent(module_t mod, int type, void *data)
3403{
3404	int error = 0;
3405
3406	switch (type) {
3407	case MOD_LOAD:
3408#ifndef __FreeBSD__
3409		pfsyncattach(0);
3410#endif
3411		break;
3412	case MOD_UNLOAD:
3413#ifndef __FreeBSD__
3414		if_clone_detach(&pfsync_cloner);
3415#endif
3416		break;
3417	default:
3418		error = EINVAL;
3419		break;
3420	}
3421
3422	return error;
3423}
3424
3425static moduledata_t pfsync_mod = {
3426	"pfsync",
3427	pfsync_modevent,
3428	0
3429};
3430
3431#define PFSYNC_MODVER 1
3432
3433DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY);
3434MODULE_VERSION(pfsync, PFSYNC_MODVER);
3435MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER);
3436#endif /* __FreeBSD__ */
3437