if_pfsync.c revision 229961
1/*	$OpenBSD: if_pfsync.c,v 1.110 2009/02/24 05:39:19 dlg Exp $	*/
2
3/*
4 * Copyright (c) 2002 Michael Shalayeff
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/*
30 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org>
31 *
32 * Permission to use, copy, modify, and distribute this software for any
33 * purpose with or without fee is hereby granted, provided that the above
34 * copyright notice and this permission notice appear in all copies.
35 *
36 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
37 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
38 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
39 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
40 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
41 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
42 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
43 */
44
45/*
46 * Revisions picked from OpenBSD after revision 1.110 import:
47 * 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates
48 * 1.120, 1.175 - use monotonic time_uptime
49 * 1.122 - reduce number of updates for non-TCP sessions
50 * 1.128 - cleanups
51 * 1.170 - SIOCSIFMTU checks
52 */
53
54#ifdef __FreeBSD__
55#include "opt_inet.h"
56#include "opt_inet6.h"
57#include "opt_pf.h"
58
59#include <sys/cdefs.h>
60__FBSDID("$FreeBSD: head/sys/contrib/pf/net/if_pfsync.c 229961 2012-01-11 14:11:10Z glebius $");
61
62#define	NBPFILTER	1
63#endif /* __FreeBSD__ */
64
65#include <sys/param.h>
66#include <sys/kernel.h>
67#ifdef __FreeBSD__
68#include <sys/bus.h>
69#include <sys/interrupt.h>
70#include <sys/priv.h>
71#endif
72#include <sys/proc.h>
73#include <sys/systm.h>
74#include <sys/time.h>
75#include <sys/mbuf.h>
76#include <sys/socket.h>
77#ifdef __FreeBSD__
78#include <sys/endian.h>
79#include <sys/malloc.h>
80#include <sys/module.h>
81#include <sys/sockio.h>
82#include <sys/taskqueue.h>
83#include <sys/lock.h>
84#include <sys/mutex.h>
85#include <sys/protosw.h>
86#else
87#include <sys/ioctl.h>
88#include <sys/timeout.h>
89#endif
90#include <sys/sysctl.h>
91#ifndef __FreeBSD__
92#include <sys/pool.h>
93#endif
94
95#include <net/if.h>
96#ifdef __FreeBSD__
97#include <net/if_clone.h>
98#endif
99#include <net/if_types.h>
100#include <net/route.h>
101#include <net/bpf.h>
102#include <net/netisr.h>
103#ifdef __FreeBSD__
104#include <net/vnet.h>
105#endif
106
107#include <netinet/in.h>
108#include <netinet/if_ether.h>
109#include <netinet/tcp.h>
110#include <netinet/tcp_seq.h>
111
112#ifdef	INET
113#include <netinet/in_systm.h>
114#include <netinet/in_var.h>
115#include <netinet/ip.h>
116#include <netinet/ip_var.h>
117#endif
118
119#ifdef INET6
120#include <netinet6/nd6.h>
121#endif /* INET6 */
122
123#ifdef __FreeBSD__
124#include <netinet/ip_carp.h>
125#else
126#include "carp.h"
127#if NCARP > 0
128#include <netinet/ip_carp.h>
129#endif
130#endif
131
132#include <net/pfvar.h>
133#include <net/if_pfsync.h>
134
135#ifndef __FreeBSD__
136#include "bpfilter.h"
137#include "pfsync.h"
138#endif
139
140#define PFSYNC_MINPKT ( \
141	sizeof(struct ip) + \
142	sizeof(struct pfsync_header) + \
143	sizeof(struct pfsync_subheader) + \
144	sizeof(struct pfsync_eof))
145
146struct pfsync_pkt {
147	struct ip *ip;
148	struct in_addr src;
149	u_int8_t flags;
150};
151
152int	pfsync_input_hmac(struct mbuf *, int);
153
154int	pfsync_upd_tcp(struct pf_state *, struct pfsync_state_peer *,
155	    struct pfsync_state_peer *);
156
157int	pfsync_in_clr(struct pfsync_pkt *, struct mbuf *, int, int);
158int	pfsync_in_ins(struct pfsync_pkt *, struct mbuf *, int, int);
159int	pfsync_in_iack(struct pfsync_pkt *, struct mbuf *, int, int);
160int	pfsync_in_upd(struct pfsync_pkt *, struct mbuf *, int, int);
161int	pfsync_in_upd_c(struct pfsync_pkt *, struct mbuf *, int, int);
162int	pfsync_in_ureq(struct pfsync_pkt *, struct mbuf *, int, int);
163int	pfsync_in_del(struct pfsync_pkt *, struct mbuf *, int, int);
164int	pfsync_in_del_c(struct pfsync_pkt *, struct mbuf *, int, int);
165int	pfsync_in_bus(struct pfsync_pkt *, struct mbuf *, int, int);
166int	pfsync_in_tdb(struct pfsync_pkt *, struct mbuf *, int, int);
167int	pfsync_in_eof(struct pfsync_pkt *, struct mbuf *, int, int);
168
169int	pfsync_in_error(struct pfsync_pkt *, struct mbuf *, int, int);
170
171int	(*pfsync_acts[])(struct pfsync_pkt *, struct mbuf *, int, int) = {
172	pfsync_in_clr,			/* PFSYNC_ACT_CLR */
173	pfsync_in_ins,			/* PFSYNC_ACT_INS */
174	pfsync_in_iack,			/* PFSYNC_ACT_INS_ACK */
175	pfsync_in_upd,			/* PFSYNC_ACT_UPD */
176	pfsync_in_upd_c,		/* PFSYNC_ACT_UPD_C */
177	pfsync_in_ureq,			/* PFSYNC_ACT_UPD_REQ */
178	pfsync_in_del,			/* PFSYNC_ACT_DEL */
179	pfsync_in_del_c,		/* PFSYNC_ACT_DEL_C */
180	pfsync_in_error,		/* PFSYNC_ACT_INS_F */
181	pfsync_in_error,		/* PFSYNC_ACT_DEL_F */
182	pfsync_in_bus,			/* PFSYNC_ACT_BUS */
183	pfsync_in_tdb,			/* PFSYNC_ACT_TDB */
184	pfsync_in_eof			/* PFSYNC_ACT_EOF */
185};
186
187struct pfsync_q {
188	int		(*write)(struct pf_state *, struct mbuf *, int);
189	size_t		len;
190	u_int8_t	action;
191};
192
193/* we have one of these for every PFSYNC_S_ */
194int	pfsync_out_state(struct pf_state *, struct mbuf *, int);
195int	pfsync_out_iack(struct pf_state *, struct mbuf *, int);
196int	pfsync_out_upd_c(struct pf_state *, struct mbuf *, int);
197int	pfsync_out_del(struct pf_state *, struct mbuf *, int);
198
199struct pfsync_q pfsync_qs[] = {
200	{ pfsync_out_state, sizeof(struct pfsync_state),   PFSYNC_ACT_INS },
201	{ pfsync_out_iack,  sizeof(struct pfsync_ins_ack), PFSYNC_ACT_INS_ACK },
202	{ pfsync_out_state, sizeof(struct pfsync_state),   PFSYNC_ACT_UPD },
203	{ pfsync_out_upd_c, sizeof(struct pfsync_upd_c),   PFSYNC_ACT_UPD_C },
204	{ pfsync_out_del,   sizeof(struct pfsync_del_c),   PFSYNC_ACT_DEL_C }
205};
206
207void	pfsync_q_ins(struct pf_state *, int);
208void	pfsync_q_del(struct pf_state *);
209
210struct pfsync_upd_req_item {
211	TAILQ_ENTRY(pfsync_upd_req_item)	ur_entry;
212	struct pfsync_upd_req			ur_msg;
213};
214TAILQ_HEAD(pfsync_upd_reqs, pfsync_upd_req_item);
215
216struct pfsync_deferral {
217	TAILQ_ENTRY(pfsync_deferral)		 pd_entry;
218	struct pf_state				*pd_st;
219	struct mbuf				*pd_m;
220#ifdef __FreeBSD__
221	struct callout				 pd_tmo;
222#else
223	struct timeout				 pd_tmo;
224#endif
225};
226TAILQ_HEAD(pfsync_deferrals, pfsync_deferral);
227
228#define PFSYNC_PLSIZE	MAX(sizeof(struct pfsync_upd_req_item), \
229			    sizeof(struct pfsync_deferral))
230
231#ifdef notyet
232int	pfsync_out_tdb(struct tdb *, struct mbuf *, int);
233#endif
234
235struct pfsync_softc {
236#ifdef __FreeBSD__
237	struct ifnet		*sc_ifp;
238#else
239	struct ifnet		 sc_if;
240#endif
241	struct ifnet		*sc_sync_if;
242
243#ifdef __FreeBSD__
244	uma_zone_t		 sc_pool;
245#else
246	struct pool		 sc_pool;
247#endif
248
249	struct ip_moptions	 sc_imo;
250
251	struct in_addr		 sc_sync_peer;
252	u_int8_t		 sc_maxupdates;
253#ifdef __FreeBSD__
254	int			 pfsync_sync_ok;
255#endif
256
257	struct ip		 sc_template;
258
259	struct pf_state_queue	 sc_qs[PFSYNC_S_COUNT];
260	size_t			 sc_len;
261
262	struct pfsync_upd_reqs	 sc_upd_req_list;
263
264	struct pfsync_deferrals	 sc_deferrals;
265	u_int			 sc_deferred;
266
267	void			*sc_plus;
268	size_t			 sc_pluslen;
269
270	u_int32_t		 sc_ureq_sent;
271	int			 sc_bulk_tries;
272#ifdef __FreeBSD__
273	struct callout		 sc_bulkfail_tmo;
274#else
275	struct timeout		 sc_bulkfail_tmo;
276#endif
277
278	u_int32_t		 sc_ureq_received;
279	struct pf_state		*sc_bulk_next;
280	struct pf_state		*sc_bulk_last;
281#ifdef __FreeBSD__
282	struct callout		 sc_bulk_tmo;
283#else
284	struct timeout		 sc_bulk_tmo;
285#endif
286
287	TAILQ_HEAD(, tdb)	 sc_tdb_q;
288
289#ifdef __FreeBSD__
290	struct callout		 sc_tmo;
291#else
292	struct timeout		 sc_tmo;
293#endif
294};
295
296#ifdef __FreeBSD__
297static MALLOC_DEFINE(M_PFSYNC, "pfsync", "pfsync data");
298static VNET_DEFINE(struct pfsync_softc	*, pfsyncif) = NULL;
299#define	V_pfsyncif		VNET(pfsyncif)
300static VNET_DEFINE(void *, pfsync_swi_cookie) = NULL;
301#define	V_pfsync_swi_cookie	VNET(pfsync_swi_cookie)
302static VNET_DEFINE(struct pfsyncstats, pfsyncstats);
303#define	V_pfsyncstats		VNET(pfsyncstats)
304static VNET_DEFINE(int, pfsync_carp_adj) = CARP_MAXSKEW;
305#define	V_pfsync_carp_adj	VNET(pfsync_carp_adj)
306
307static void	pfsyncintr(void *);
308static int	pfsync_multicast_setup(struct pfsync_softc *);
309static void	pfsync_multicast_cleanup(struct pfsync_softc *);
310static int	pfsync_init(void);
311static void	pfsync_uninit(void);
312
313SYSCTL_NODE(_net, OID_AUTO, pfsync, CTLFLAG_RW, 0, "PFSYNC");
314SYSCTL_VNET_STRUCT(_net_pfsync, OID_AUTO, stats, CTLFLAG_RW,
315    &VNET_NAME(pfsyncstats), pfsyncstats,
316    "PFSYNC statistics (struct pfsyncstats, net/if_pfsync.h)");
317SYSCTL_INT(_net_pfsync, OID_AUTO, carp_demotion_factor, CTLFLAG_RW,
318    &VNET_NAME(pfsync_carp_adj), 0, "pfsync's CARP demotion factor adjustment");
319#else
320struct pfsync_softc	*pfsyncif = NULL;
321struct pfsyncstats	 pfsyncstats;
322#define	V_pfsyncstats	 pfsyncstats
323#endif
324
325void	pfsyncattach(int);
326#ifdef __FreeBSD__
327int	pfsync_clone_create(struct if_clone *, int, caddr_t);
328void	pfsync_clone_destroy(struct ifnet *);
329#else
330int	pfsync_clone_create(struct if_clone *, int);
331int	pfsync_clone_destroy(struct ifnet *);
332#endif
333int	pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
334	    struct pf_state_peer *);
335void	pfsync_update_net_tdb(struct pfsync_tdb *);
336int	pfsyncoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
337#ifdef __FreeBSD__
338	    struct route *);
339#else
340	    struct rtentry *);
341#endif
342int	pfsyncioctl(struct ifnet *, u_long, caddr_t);
343void	pfsyncstart(struct ifnet *);
344
345struct mbuf *pfsync_if_dequeue(struct ifnet *);
346
347void	pfsync_deferred(struct pf_state *, int);
348void	pfsync_undefer(struct pfsync_deferral *, int);
349void	pfsync_defer_tmo(void *);
350
351void	pfsync_request_update(u_int32_t, u_int64_t);
352void	pfsync_update_state_req(struct pf_state *);
353
354void	pfsync_drop(struct pfsync_softc *);
355void	pfsync_sendout(void);
356void	pfsync_send_plus(void *, size_t);
357void	pfsync_timeout(void *);
358void	pfsync_tdb_timeout(void *);
359
360void	pfsync_bulk_start(void);
361void	pfsync_bulk_status(u_int8_t);
362void	pfsync_bulk_update(void *);
363void	pfsync_bulk_fail(void *);
364
365#ifdef __FreeBSD__
366/* XXX: ugly */
367#define	betoh64		(unsigned long long)be64toh
368#define	timeout_del	callout_stop
369#endif
370
371#define PFSYNC_MAX_BULKTRIES	12
372#ifndef __FreeBSD__
373int	pfsync_sync_ok;
374#endif
375
376#ifdef __FreeBSD__
377VNET_DEFINE(struct ifc_simple_data, pfsync_cloner_data);
378VNET_DEFINE(struct if_clone, pfsync_cloner);
379#define	V_pfsync_cloner_data	VNET(pfsync_cloner_data)
380#define	V_pfsync_cloner		VNET(pfsync_cloner)
381IFC_SIMPLE_DECLARE(pfsync, 1);
382#else
383struct if_clone	pfsync_cloner =
384    IF_CLONE_INITIALIZER("pfsync", pfsync_clone_create, pfsync_clone_destroy);
385#endif
386
387void
388pfsyncattach(int npfsync)
389{
390	if_clone_attach(&pfsync_cloner);
391}
392int
393#ifdef __FreeBSD__
394pfsync_clone_create(struct if_clone *ifc, int unit, caddr_t param)
395#else
396pfsync_clone_create(struct if_clone *ifc, int unit)
397#endif
398{
399	struct pfsync_softc *sc;
400	struct ifnet *ifp;
401	int q;
402
403	if (unit != 0)
404		return (EINVAL);
405
406#ifdef __FreeBSD__
407	sc = malloc(sizeof(struct pfsync_softc), M_PFSYNC, M_WAITOK | M_ZERO);
408	sc->pfsync_sync_ok = 1;
409#else
410	pfsync_sync_ok = 1;
411	sc = malloc(sizeof(*pfsyncif), M_DEVBUF, M_NOWAIT | M_ZERO);
412#endif
413
414	for (q = 0; q < PFSYNC_S_COUNT; q++)
415		TAILQ_INIT(&sc->sc_qs[q]);
416
417#ifdef __FreeBSD__
418	sc->sc_pool = uma_zcreate("pfsync", PFSYNC_PLSIZE, NULL, NULL, NULL,
419	    NULL, UMA_ALIGN_PTR, 0);
420#else
421	pool_init(&sc->sc_pool, PFSYNC_PLSIZE, 0, 0, 0, "pfsync", NULL);
422#endif
423	TAILQ_INIT(&sc->sc_upd_req_list);
424	TAILQ_INIT(&sc->sc_deferrals);
425	sc->sc_deferred = 0;
426
427	TAILQ_INIT(&sc->sc_tdb_q);
428
429	sc->sc_len = PFSYNC_MINPKT;
430	sc->sc_maxupdates = 128;
431
432#ifndef __FreeBSD__
433	sc->sc_imo.imo_membership = (struct in_multi **)malloc(
434	    (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_IPMOPTS,
435	    M_WAITOK | M_ZERO);
436	sc->sc_imo.imo_max_memberships = IP_MIN_MEMBERSHIPS;
437#endif
438
439#ifdef __FreeBSD__
440	ifp = sc->sc_ifp = if_alloc(IFT_PFSYNC);
441	if (ifp == NULL) {
442		uma_zdestroy(sc->sc_pool);
443		free(sc, M_PFSYNC);
444		return (ENOSPC);
445	}
446	if_initname(ifp, ifc->ifc_name, unit);
447#else
448	ifp = &sc->sc_if;
449	snprintf(ifp->if_xname, sizeof ifp->if_xname, "pfsync%d", unit);
450#endif
451	ifp->if_softc = sc;
452	ifp->if_ioctl = pfsyncioctl;
453	ifp->if_output = pfsyncoutput;
454	ifp->if_start = pfsyncstart;
455	ifp->if_type = IFT_PFSYNC;
456	ifp->if_snd.ifq_maxlen = ifqmaxlen;
457	ifp->if_hdrlen = sizeof(struct pfsync_header);
458	ifp->if_mtu = ETHERMTU;
459#ifdef __FreeBSD__
460	callout_init(&sc->sc_tmo, CALLOUT_MPSAFE);
461	callout_init_mtx(&sc->sc_bulk_tmo, &pf_task_mtx, 0);
462	callout_init(&sc->sc_bulkfail_tmo, CALLOUT_MPSAFE);
463#else
464	timeout_set(&sc->sc_tmo, pfsync_timeout, sc);
465	timeout_set(&sc->sc_bulk_tmo, pfsync_bulk_update, sc);
466	timeout_set(&sc->sc_bulkfail_tmo, pfsync_bulk_fail, sc);
467#endif
468
469	if_attach(ifp);
470#ifndef __FreeBSD__
471	if_alloc_sadl(ifp);
472
473#if NCARP > 0
474	if_addgroup(ifp, "carp");
475#endif
476#endif
477
478#if NBPFILTER > 0
479#ifdef __FreeBSD__
480	bpfattach(ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
481#else
482	bpfattach(&sc->sc_if.if_bpf, ifp, DLT_PFSYNC, PFSYNC_HDRLEN);
483#endif
484#endif
485
486#ifdef __FreeBSD__
487	V_pfsyncif = sc;
488#else
489	pfsyncif = sc;
490#endif
491
492	return (0);
493}
494
495#ifdef __FreeBSD__
496void
497#else
498int
499#endif
500pfsync_clone_destroy(struct ifnet *ifp)
501{
502	struct pfsync_softc *sc = ifp->if_softc;
503
504#ifdef __FreeBSD__
505	PF_LOCK();
506#endif
507	timeout_del(&sc->sc_bulkfail_tmo);
508	timeout_del(&sc->sc_bulk_tmo);
509	timeout_del(&sc->sc_tmo);
510#ifdef __FreeBSD__
511	PF_UNLOCK();
512	if (!sc->pfsync_sync_ok && carp_demote_adj_p)
513		(*carp_demote_adj_p)(-V_pfsync_carp_adj, "pfsync destroy");
514#else
515#if NCARP > 0
516	if (!pfsync_sync_ok)
517		carp_group_demote_adj(&sc->sc_if, -1);
518#endif
519#endif
520#if NBPFILTER > 0
521	bpfdetach(ifp);
522#endif
523	if_detach(ifp);
524
525	pfsync_drop(sc);
526
527	while (sc->sc_deferred > 0)
528		pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
529
530#ifdef __FreeBSD__
531	UMA_DESTROY(sc->sc_pool);
532#else
533	pool_destroy(&sc->sc_pool);
534#endif
535#ifdef __FreeBSD__
536	if_free(ifp);
537	if (sc->sc_imo.imo_membership)
538		pfsync_multicast_cleanup(sc);
539	free(sc, M_PFSYNC);
540#else
541	free(sc->sc_imo.imo_membership, M_IPMOPTS);
542	free(sc, M_DEVBUF);
543#endif
544
545#ifdef __FreeBSD__
546	V_pfsyncif = NULL;
547#else
548	pfsyncif = NULL;
549#endif
550
551#ifndef __FreeBSD__
552	return (0);
553#endif
554}
555
556struct mbuf *
557pfsync_if_dequeue(struct ifnet *ifp)
558{
559	struct mbuf *m;
560#ifndef __FreeBSD__
561	int s;
562#endif
563
564#ifdef __FreeBSD__
565	IF_LOCK(&ifp->if_snd);
566	_IF_DROP(&ifp->if_snd);
567	_IF_DEQUEUE(&ifp->if_snd, m);
568	IF_UNLOCK(&ifp->if_snd);
569#else
570	s = splnet();
571	IF_DEQUEUE(&ifp->if_snd, m);
572	splx(s);
573#endif
574
575	return (m);
576}
577
578/*
579 * Start output on the pfsync interface.
580 */
581void
582pfsyncstart(struct ifnet *ifp)
583{
584	struct mbuf *m;
585
586	while ((m = pfsync_if_dequeue(ifp)) != NULL) {
587#ifndef __FreeBSD__
588		IF_DROP(&ifp->if_snd);
589#endif
590		m_freem(m);
591	}
592}
593
594int
595pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
596    struct pf_state_peer *d)
597{
598	if (s->scrub.scrub_flag && d->scrub == NULL) {
599#ifdef __FreeBSD__
600		d->scrub = pool_get(&V_pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
601#else
602		d->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT | PR_ZERO);
603#endif
604		if (d->scrub == NULL)
605			return (ENOMEM);
606	}
607
608	return (0);
609}
610
611#ifndef __FreeBSD__
612void
613pfsync_state_export(struct pfsync_state *sp, struct pf_state *st)
614{
615	bzero(sp, sizeof(struct pfsync_state));
616
617	/* copy from state key */
618	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
619	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
620	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
621	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
622	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
623	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
624	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
625	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
626	sp->proto = st->key[PF_SK_WIRE]->proto;
627	sp->af = st->key[PF_SK_WIRE]->af;
628
629	/* copy from state */
630	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
631	bcopy(&st->rt_addr, &sp->rt_addr, sizeof(sp->rt_addr));
632	sp->creation = htonl(time_uptime - st->creation);
633	sp->expire = pf_state_expires(st);
634	if (sp->expire <= time_second)
635		sp->expire = htonl(0);
636	else
637		sp->expire = htonl(sp->expire - time_second);
638
639	sp->direction = st->direction;
640	sp->log = st->log;
641	sp->timeout = st->timeout;
642	sp->state_flags = st->state_flags;
643	if (st->src_node)
644		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
645	if (st->nat_src_node)
646		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
647
648	bcopy(&st->id, &sp->id, sizeof(sp->id));
649	sp->creatorid = st->creatorid;
650	pf_state_peer_hton(&st->src, &sp->src);
651	pf_state_peer_hton(&st->dst, &sp->dst);
652
653	if (st->rule.ptr == NULL)
654		sp->rule = htonl(-1);
655	else
656		sp->rule = htonl(st->rule.ptr->nr);
657	if (st->anchor.ptr == NULL)
658		sp->anchor = htonl(-1);
659	else
660		sp->anchor = htonl(st->anchor.ptr->nr);
661	if (st->nat_rule.ptr == NULL)
662		sp->nat_rule = htonl(-1);
663	else
664		sp->nat_rule = htonl(st->nat_rule.ptr->nr);
665
666	pf_state_counter_hton(st->packets[0], sp->packets[0]);
667	pf_state_counter_hton(st->packets[1], sp->packets[1]);
668	pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
669	pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
670
671}
672#endif
673
674int
675pfsync_state_import(struct pfsync_state *sp, u_int8_t flags)
676{
677	struct pf_state	*st = NULL;
678	struct pf_state_key *skw = NULL, *sks = NULL;
679	struct pf_rule *r = NULL;
680	struct pfi_kif	*kif;
681	int pool_flags;
682	int error;
683
684	PF_LOCK_ASSERT();
685
686#ifdef __FreeBSD__
687	if (sp->creatorid == 0 && V_pf_status.debug >= PF_DEBUG_MISC) {
688#else
689	if (sp->creatorid == 0 && pf_status.debug >= PF_DEBUG_MISC) {
690#endif
691		printf("pfsync_state_import: invalid creator id:"
692		    " %08x\n", ntohl(sp->creatorid));
693		return (EINVAL);
694	}
695
696	if ((kif = pfi_kif_get(sp->ifname)) == NULL) {
697#ifdef __FreeBSD__
698		if (V_pf_status.debug >= PF_DEBUG_MISC)
699#else
700		if (pf_status.debug >= PF_DEBUG_MISC)
701#endif
702			printf("pfsync_state_import: "
703			    "unknown interface: %s\n", sp->ifname);
704		if (flags & PFSYNC_SI_IOCTL)
705			return (EINVAL);
706		return (0);	/* skip this state */
707	}
708
709	/*
710	 * If the ruleset checksums match or the state is coming from the ioctl,
711	 * it's safe to associate the state with the rule of that number.
712	 */
713	if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
714	    (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) && ntohl(sp->rule) <
715	    pf_main_ruleset.rules[PF_RULESET_FILTER].active.rcount)
716		r = pf_main_ruleset.rules[
717		    PF_RULESET_FILTER].active.ptr_array[ntohl(sp->rule)];
718	else
719#ifdef __FreeBSD__
720		r = &V_pf_default_rule;
721#else
722		r = &pf_default_rule;
723#endif
724
725	if ((r->max_states && r->states_cur >= r->max_states))
726		goto cleanup;
727
728#ifdef __FreeBSD__
729	if (flags & PFSYNC_SI_IOCTL)
730		pool_flags = PR_WAITOK | PR_ZERO;
731	else
732		pool_flags = PR_NOWAIT | PR_ZERO;
733
734	if ((st = pool_get(&V_pf_state_pl, pool_flags)) == NULL)
735		goto cleanup;
736#else
737	if (flags & PFSYNC_SI_IOCTL)
738		pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO;
739	else
740		pool_flags = PR_LIMITFAIL | PR_ZERO;
741
742	if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL)
743		goto cleanup;
744#endif
745
746	if ((skw = pf_alloc_state_key(pool_flags)) == NULL)
747		goto cleanup;
748
749	if (PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0],
750	    &sp->key[PF_SK_STACK].addr[0], sp->af) ||
751	    PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1],
752	    &sp->key[PF_SK_STACK].addr[1], sp->af) ||
753	    sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] ||
754	    sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1]) {
755		if ((sks = pf_alloc_state_key(pool_flags)) == NULL)
756			goto cleanup;
757	} else
758		sks = skw;
759
760	/* allocate memory for scrub info */
761	if (pfsync_alloc_scrub_memory(&sp->src, &st->src) ||
762	    pfsync_alloc_scrub_memory(&sp->dst, &st->dst))
763		goto cleanup;
764
765	/* copy to state key(s) */
766	skw->addr[0] = sp->key[PF_SK_WIRE].addr[0];
767	skw->addr[1] = sp->key[PF_SK_WIRE].addr[1];
768	skw->port[0] = sp->key[PF_SK_WIRE].port[0];
769	skw->port[1] = sp->key[PF_SK_WIRE].port[1];
770	skw->proto = sp->proto;
771	skw->af = sp->af;
772	if (sks != skw) {
773		sks->addr[0] = sp->key[PF_SK_STACK].addr[0];
774		sks->addr[1] = sp->key[PF_SK_STACK].addr[1];
775		sks->port[0] = sp->key[PF_SK_STACK].port[0];
776		sks->port[1] = sp->key[PF_SK_STACK].port[1];
777		sks->proto = sp->proto;
778		sks->af = sp->af;
779	}
780
781	/* copy to state */
782	bcopy(&sp->rt_addr, &st->rt_addr, sizeof(st->rt_addr));
783	st->creation = time_uptime - ntohl(sp->creation);
784	st->expire = time_second;
785	if (sp->expire) {
786		/* XXX No adaptive scaling. */
787		st->expire -= r->timeout[sp->timeout] - ntohl(sp->expire);
788	}
789
790	st->expire = ntohl(sp->expire) + time_second;
791	st->direction = sp->direction;
792	st->log = sp->log;
793	st->timeout = sp->timeout;
794	st->state_flags = sp->state_flags;
795
796	bcopy(sp->id, &st->id, sizeof(st->id));
797	st->creatorid = sp->creatorid;
798	pf_state_peer_ntoh(&sp->src, &st->src);
799	pf_state_peer_ntoh(&sp->dst, &st->dst);
800
801	st->rule.ptr = r;
802	st->nat_rule.ptr = NULL;
803	st->anchor.ptr = NULL;
804	st->rt_kif = NULL;
805
806	st->pfsync_time = time_uptime;
807	st->sync_state = PFSYNC_S_NONE;
808
809	/* XXX when we have nat_rule/anchors, use STATE_INC_COUNTERS */
810	r->states_cur++;
811	r->states_tot++;
812
813	if (!ISSET(flags, PFSYNC_SI_IOCTL))
814		SET(st->state_flags, PFSTATE_NOSYNC);
815
816	if ((error = pf_state_insert(kif, skw, sks, st)) != 0) {
817		/* XXX when we have nat_rule/anchors, use STATE_DEC_COUNTERS */
818		r->states_cur--;
819		goto cleanup_state;
820	}
821
822	if (!ISSET(flags, PFSYNC_SI_IOCTL)) {
823		CLR(st->state_flags, PFSTATE_NOSYNC);
824		if (ISSET(st->state_flags, PFSTATE_ACK)) {
825			pfsync_q_ins(st, PFSYNC_S_IACK);
826#ifdef __FreeBSD__
827			pfsync_sendout();
828#else
829			schednetisr(NETISR_PFSYNC);
830#endif
831		}
832	}
833	CLR(st->state_flags, PFSTATE_ACK);
834
835	return (0);
836
837cleanup:
838	error = ENOMEM;
839	if (skw == sks)
840		sks = NULL;
841#ifdef __FreeBSD__
842	if (skw != NULL)
843		pool_put(&V_pf_state_key_pl, skw);
844	if (sks != NULL)
845		pool_put(&V_pf_state_key_pl, sks);
846#else
847	if (skw != NULL)
848		pool_put(&pf_state_key_pl, skw);
849	if (sks != NULL)
850		pool_put(&pf_state_key_pl, sks);
851#endif
852
853cleanup_state:	/* pf_state_insert frees the state keys */
854	if (st) {
855#ifdef __FreeBSD__
856		if (st->dst.scrub)
857			pool_put(&V_pf_state_scrub_pl, st->dst.scrub);
858		if (st->src.scrub)
859			pool_put(&V_pf_state_scrub_pl, st->src.scrub);
860		pool_put(&V_pf_state_pl, st);
861#else
862		if (st->dst.scrub)
863			pool_put(&pf_state_scrub_pl, st->dst.scrub);
864		if (st->src.scrub)
865			pool_put(&pf_state_scrub_pl, st->src.scrub);
866		pool_put(&pf_state_pl, st);
867#endif
868	}
869	return (error);
870}
871
872void
873#ifdef __FreeBSD__
874pfsync_input(struct mbuf *m, __unused int off)
875#else
876pfsync_input(struct mbuf *m, ...)
877#endif
878{
879#ifdef __FreeBSD__
880	struct pfsync_softc *sc = V_pfsyncif;
881#else
882	struct pfsync_softc *sc = pfsyncif;
883#endif
884	struct pfsync_pkt pkt;
885	struct ip *ip = mtod(m, struct ip *);
886	struct pfsync_header *ph;
887	struct pfsync_subheader subh;
888
889	int offset;
890	int rv;
891
892	V_pfsyncstats.pfsyncs_ipackets++;
893
894	/* verify that we have a sync interface configured */
895#ifdef __FreeBSD__
896	if (!sc || !sc->sc_sync_if || !V_pf_status.running)
897#else
898	if (!sc || !sc->sc_sync_if || !pf_status.running)
899#endif
900		goto done;
901
902	/* verify that the packet came in on the right interface */
903	if (sc->sc_sync_if != m->m_pkthdr.rcvif) {
904		V_pfsyncstats.pfsyncs_badif++;
905		goto done;
906	}
907
908#ifdef __FreeBSD__
909	sc->sc_ifp->if_ipackets++;
910	sc->sc_ifp->if_ibytes += m->m_pkthdr.len;
911#else
912	sc->sc_if.if_ipackets++;
913	sc->sc_if.if_ibytes += m->m_pkthdr.len;
914#endif
915	/* verify that the IP TTL is 255. */
916	if (ip->ip_ttl != PFSYNC_DFLTTL) {
917		V_pfsyncstats.pfsyncs_badttl++;
918		goto done;
919	}
920
921	offset = ip->ip_hl << 2;
922	if (m->m_pkthdr.len < offset + sizeof(*ph)) {
923		V_pfsyncstats.pfsyncs_hdrops++;
924		goto done;
925	}
926
927	if (offset + sizeof(*ph) > m->m_len) {
928		if (m_pullup(m, offset + sizeof(*ph)) == NULL) {
929			V_pfsyncstats.pfsyncs_hdrops++;
930			return;
931		}
932		ip = mtod(m, struct ip *);
933	}
934	ph = (struct pfsync_header *)((char *)ip + offset);
935
936	/* verify the version */
937	if (ph->version != PFSYNC_VERSION) {
938		V_pfsyncstats.pfsyncs_badver++;
939		goto done;
940	}
941
942#if 0
943	if (pfsync_input_hmac(m, offset) != 0) {
944		/* XXX stats */
945		goto done;
946	}
947#endif
948
949	/* Cheaper to grab this now than having to mess with mbufs later */
950	pkt.ip = ip;
951	pkt.src = ip->ip_src;
952	pkt.flags = 0;
953
954#ifdef __FreeBSD__
955	if (!bcmp(&ph->pfcksum, &V_pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
956#else
957	if (!bcmp(&ph->pfcksum, &pf_status.pf_chksum, PF_MD5_DIGEST_LENGTH))
958#endif
959		pkt.flags |= PFSYNC_SI_CKSUM;
960
961	offset += sizeof(*ph);
962	for (;;) {
963		m_copydata(m, offset, sizeof(subh), (caddr_t)&subh);
964		offset += sizeof(subh);
965
966		if (subh.action >= PFSYNC_ACT_MAX) {
967			V_pfsyncstats.pfsyncs_badact++;
968			goto done;
969		}
970
971		rv = (*pfsync_acts[subh.action])(&pkt, m, offset,
972		    ntohs(subh.count));
973		if (rv == -1)
974			return;
975
976		offset += rv;
977	}
978
979done:
980	m_freem(m);
981}
982
983int
984pfsync_in_clr(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
985{
986	struct pfsync_clr *clr;
987	struct mbuf *mp;
988	int len = sizeof(*clr) * count;
989	int i, offp;
990
991	struct pf_state *st, *nexts;
992	struct pf_state_key *sk, *nextsk;
993	struct pf_state_item *si;
994	u_int32_t creatorid;
995	int s;
996
997	mp = m_pulldown(m, offset, len, &offp);
998	if (mp == NULL) {
999		V_pfsyncstats.pfsyncs_badlen++;
1000		return (-1);
1001	}
1002	clr = (struct pfsync_clr *)(mp->m_data + offp);
1003
1004	s = splsoftnet();
1005#ifdef __FreeBSD__
1006	PF_LOCK();
1007#endif
1008	for (i = 0; i < count; i++) {
1009		creatorid = clr[i].creatorid;
1010
1011		if (clr[i].ifname[0] == '\0') {
1012#ifdef __FreeBSD__
1013			for (st = RB_MIN(pf_state_tree_id, &V_tree_id);
1014			    st; st = nexts) {
1015				nexts = RB_NEXT(pf_state_tree_id, &V_tree_id, st);
1016#else
1017			for (st = RB_MIN(pf_state_tree_id, &tree_id);
1018			    st; st = nexts) {
1019				nexts = RB_NEXT(pf_state_tree_id, &tree_id, st);
1020#endif
1021				if (st->creatorid == creatorid) {
1022					SET(st->state_flags, PFSTATE_NOSYNC);
1023					pf_unlink_state(st);
1024				}
1025			}
1026		} else {
1027			if (pfi_kif_get(clr[i].ifname) == NULL)
1028				continue;
1029
1030			/* XXX correct? */
1031#ifdef __FreeBSD__
1032			for (sk = RB_MIN(pf_state_tree, &V_pf_statetbl);
1033#else
1034			for (sk = RB_MIN(pf_state_tree, &pf_statetbl);
1035#endif
1036			    sk; sk = nextsk) {
1037				nextsk = RB_NEXT(pf_state_tree,
1038#ifdef __FreeBSD__
1039				    &V_pf_statetbl, sk);
1040#else
1041				    &pf_statetbl, sk);
1042#endif
1043				TAILQ_FOREACH(si, &sk->states, entry) {
1044					if (si->s->creatorid == creatorid) {
1045						SET(si->s->state_flags,
1046						    PFSTATE_NOSYNC);
1047						pf_unlink_state(si->s);
1048					}
1049				}
1050			}
1051		}
1052	}
1053#ifdef __FreeBSD__
1054	PF_UNLOCK();
1055#endif
1056	splx(s);
1057
1058	return (len);
1059}
1060
1061int
1062pfsync_in_ins(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1063{
1064	struct mbuf *mp;
1065	struct pfsync_state *sa, *sp;
1066	int len = sizeof(*sp) * count;
1067	int i, offp;
1068
1069	int s;
1070
1071	mp = m_pulldown(m, offset, len, &offp);
1072	if (mp == NULL) {
1073		V_pfsyncstats.pfsyncs_badlen++;
1074		return (-1);
1075	}
1076	sa = (struct pfsync_state *)(mp->m_data + offp);
1077
1078	s = splsoftnet();
1079#ifdef __FreeBSD__
1080	PF_LOCK();
1081#endif
1082	for (i = 0; i < count; i++) {
1083		sp = &sa[i];
1084
1085		/* check for invalid values */
1086		if (sp->timeout >= PFTM_MAX ||
1087		    sp->src.state > PF_TCPS_PROXY_DST ||
1088		    sp->dst.state > PF_TCPS_PROXY_DST ||
1089		    sp->direction > PF_OUT ||
1090		    (sp->af != AF_INET && sp->af != AF_INET6)) {
1091#ifdef __FreeBSD__
1092			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1093#else
1094			if (pf_status.debug >= PF_DEBUG_MISC) {
1095#endif
1096				printf("pfsync_input: PFSYNC5_ACT_INS: "
1097				    "invalid value\n");
1098			}
1099			V_pfsyncstats.pfsyncs_badval++;
1100			continue;
1101		}
1102
1103		if (pfsync_state_import(sp, pkt->flags) == ENOMEM) {
1104			/* drop out, but process the rest of the actions */
1105			break;
1106		}
1107	}
1108#ifdef __FreeBSD__
1109	PF_UNLOCK();
1110#endif
1111	splx(s);
1112
1113	return (len);
1114}
1115
1116int
1117pfsync_in_iack(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1118{
1119	struct pfsync_ins_ack *ia, *iaa;
1120	struct pf_state_cmp id_key;
1121	struct pf_state *st;
1122
1123	struct mbuf *mp;
1124	int len = count * sizeof(*ia);
1125	int offp, i;
1126	int s;
1127
1128	mp = m_pulldown(m, offset, len, &offp);
1129	if (mp == NULL) {
1130		V_pfsyncstats.pfsyncs_badlen++;
1131		return (-1);
1132	}
1133	iaa = (struct pfsync_ins_ack *)(mp->m_data + offp);
1134
1135	s = splsoftnet();
1136#ifdef __FreeBSD__
1137	PF_LOCK();
1138#endif
1139	for (i = 0; i < count; i++) {
1140		ia = &iaa[i];
1141
1142		bcopy(&ia->id, &id_key.id, sizeof(id_key.id));
1143		id_key.creatorid = ia->creatorid;
1144
1145		st = pf_find_state_byid(&id_key);
1146		if (st == NULL)
1147			continue;
1148
1149		if (ISSET(st->state_flags, PFSTATE_ACK))
1150			pfsync_deferred(st, 0);
1151	}
1152#ifdef __FreeBSD__
1153	PF_UNLOCK();
1154#endif
1155	splx(s);
1156	/*
1157	 * XXX this is not yet implemented, but we know the size of the
1158	 * message so we can skip it.
1159	 */
1160
1161	return (count * sizeof(struct pfsync_ins_ack));
1162}
1163
1164int
1165pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src,
1166    struct pfsync_state_peer *dst)
1167{
1168	int sfail = 0;
1169
1170	/*
1171	 * The state should never go backwards except
1172	 * for syn-proxy states.  Neither should the
1173	 * sequence window slide backwards.
1174	 */
1175	if (st->src.state > src->state &&
1176	    (st->src.state < PF_TCPS_PROXY_SRC ||
1177	    src->state >= PF_TCPS_PROXY_SRC))
1178		sfail = 1;
1179	else if (SEQ_GT(st->src.seqlo, ntohl(src->seqlo)))
1180		sfail = 3;
1181	else if (st->dst.state > dst->state) {
1182		/* There might still be useful
1183		 * information about the src state here,
1184		 * so import that part of the update,
1185		 * then "fail" so we send the updated
1186		 * state back to the peer who is missing
1187		 * our what we know. */
1188		pf_state_peer_ntoh(src, &st->src);
1189		/* XXX do anything with timeouts? */
1190		sfail = 7;
1191	} else if (st->dst.state >= TCPS_SYN_SENT &&
1192	    SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo)))
1193		sfail = 4;
1194
1195	return (sfail);
1196}
1197
1198int
1199pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1200{
1201	struct pfsync_state *sa, *sp;
1202	struct pf_state_cmp id_key;
1203	struct pf_state_key *sk;
1204	struct pf_state *st;
1205	int sfail;
1206
1207	struct mbuf *mp;
1208	int len = count * sizeof(*sp);
1209	int offp, i;
1210	int s;
1211
1212	mp = m_pulldown(m, offset, len, &offp);
1213	if (mp == NULL) {
1214		V_pfsyncstats.pfsyncs_badlen++;
1215		return (-1);
1216	}
1217	sa = (struct pfsync_state *)(mp->m_data + offp);
1218
1219	s = splsoftnet();
1220#ifdef __FreeBSD__
1221	PF_LOCK();
1222#endif
1223	for (i = 0; i < count; i++) {
1224		sp = &sa[i];
1225
1226		/* check for invalid values */
1227		if (sp->timeout >= PFTM_MAX ||
1228		    sp->src.state > PF_TCPS_PROXY_DST ||
1229		    sp->dst.state > PF_TCPS_PROXY_DST) {
1230#ifdef __FreeBSD__
1231			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1232#else
1233			if (pf_status.debug >= PF_DEBUG_MISC) {
1234#endif
1235				printf("pfsync_input: PFSYNC_ACT_UPD: "
1236				    "invalid value\n");
1237			}
1238			V_pfsyncstats.pfsyncs_badval++;
1239			continue;
1240		}
1241
1242		bcopy(sp->id, &id_key.id, sizeof(id_key.id));
1243		id_key.creatorid = sp->creatorid;
1244
1245		st = pf_find_state_byid(&id_key);
1246		if (st == NULL) {
1247			/* insert the update */
1248			if (pfsync_state_import(sp, 0))
1249				V_pfsyncstats.pfsyncs_badstate++;
1250			continue;
1251		}
1252
1253		if (ISSET(st->state_flags, PFSTATE_ACK))
1254			pfsync_deferred(st, 1);
1255
1256		sk = st->key[PF_SK_WIRE];	/* XXX right one? */
1257		sfail = 0;
1258		if (sk->proto == IPPROTO_TCP)
1259			sfail = pfsync_upd_tcp(st, &sp->src, &sp->dst);
1260		else {
1261			/*
1262			 * Non-TCP protocol state machine always go
1263			 * forwards
1264			 */
1265			if (st->src.state > sp->src.state)
1266				sfail = 5;
1267			else if (st->dst.state > sp->dst.state)
1268				sfail = 6;
1269		}
1270
1271		if (sfail) {
1272#ifdef __FreeBSD__
1273			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1274#else
1275			if (pf_status.debug >= PF_DEBUG_MISC) {
1276#endif
1277				printf("pfsync: %s stale update (%d)"
1278				    " id: %016llx creatorid: %08x\n",
1279				    (sfail < 7 ?  "ignoring" : "partial"),
1280				    sfail, betoh64(st->id),
1281				    ntohl(st->creatorid));
1282			}
1283			V_pfsyncstats.pfsyncs_stale++;
1284
1285			pfsync_update_state(st);
1286#ifdef __FreeBSD__
1287			pfsync_sendout();
1288#else
1289			schednetisr(NETISR_PFSYNC);
1290#endif
1291			continue;
1292		}
1293		pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
1294		pf_state_peer_ntoh(&sp->src, &st->src);
1295		pf_state_peer_ntoh(&sp->dst, &st->dst);
1296		st->expire = ntohl(sp->expire) + time_second;
1297		st->timeout = sp->timeout;
1298		st->pfsync_time = time_uptime;
1299	}
1300#ifdef __FreeBSD__
1301	PF_UNLOCK();
1302#endif
1303	splx(s);
1304
1305	return (len);
1306}
1307
1308int
1309pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1310{
1311	struct pfsync_upd_c *ua, *up;
1312	struct pf_state_key *sk;
1313	struct pf_state_cmp id_key;
1314	struct pf_state *st;
1315
1316	int len = count * sizeof(*up);
1317	int sfail;
1318
1319	struct mbuf *mp;
1320	int offp, i;
1321	int s;
1322
1323	mp = m_pulldown(m, offset, len, &offp);
1324	if (mp == NULL) {
1325		V_pfsyncstats.pfsyncs_badlen++;
1326		return (-1);
1327	}
1328	ua = (struct pfsync_upd_c *)(mp->m_data + offp);
1329
1330	s = splsoftnet();
1331#ifdef __FreeBSD__
1332	PF_LOCK();
1333#endif
1334	for (i = 0; i < count; i++) {
1335		up = &ua[i];
1336
1337		/* check for invalid values */
1338		if (up->timeout >= PFTM_MAX ||
1339		    up->src.state > PF_TCPS_PROXY_DST ||
1340		    up->dst.state > PF_TCPS_PROXY_DST) {
1341#ifdef __FreeBSD__
1342			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1343#else
1344			if (pf_status.debug >= PF_DEBUG_MISC) {
1345#endif
1346				printf("pfsync_input: "
1347				    "PFSYNC_ACT_UPD_C: "
1348				    "invalid value\n");
1349			}
1350			V_pfsyncstats.pfsyncs_badval++;
1351			continue;
1352		}
1353
1354		bcopy(&up->id, &id_key.id, sizeof(id_key.id));
1355		id_key.creatorid = up->creatorid;
1356
1357		st = pf_find_state_byid(&id_key);
1358		if (st == NULL) {
1359			/* We don't have this state. Ask for it. */
1360			pfsync_request_update(id_key.creatorid, id_key.id);
1361			continue;
1362		}
1363
1364		if (ISSET(st->state_flags, PFSTATE_ACK))
1365			pfsync_deferred(st, 1);
1366
1367		sk = st->key[PF_SK_WIRE]; /* XXX right one? */
1368		sfail = 0;
1369		if (sk->proto == IPPROTO_TCP)
1370			sfail = pfsync_upd_tcp(st, &up->src, &up->dst);
1371		else {
1372			/*
1373			 * Non-TCP protocol state machine always go forwards
1374			 */
1375			if (st->src.state > up->src.state)
1376				sfail = 5;
1377			else if (st->dst.state > up->dst.state)
1378				sfail = 6;
1379		}
1380
1381		if (sfail) {
1382#ifdef __FreeBSD__
1383			if (V_pf_status.debug >= PF_DEBUG_MISC) {
1384#else
1385			if (pf_status.debug >= PF_DEBUG_MISC) {
1386#endif
1387				printf("pfsync: ignoring stale update "
1388				    "(%d) id: %016llx "
1389				    "creatorid: %08x\n", sfail,
1390				    betoh64(st->id),
1391				    ntohl(st->creatorid));
1392			}
1393			V_pfsyncstats.pfsyncs_stale++;
1394
1395			pfsync_update_state(st);
1396#ifdef __FreeBSD__
1397			pfsync_sendout();
1398#else
1399			schednetisr(NETISR_PFSYNC);
1400#endif
1401			continue;
1402		}
1403		pfsync_alloc_scrub_memory(&up->dst, &st->dst);
1404		pf_state_peer_ntoh(&up->src, &st->src);
1405		pf_state_peer_ntoh(&up->dst, &st->dst);
1406		st->expire = ntohl(up->expire) + time_second;
1407		st->timeout = up->timeout;
1408		st->pfsync_time = time_uptime;
1409	}
1410#ifdef __FreeBSD__
1411	PF_UNLOCK();
1412#endif
1413	splx(s);
1414
1415	return (len);
1416}
1417
1418int
1419pfsync_in_ureq(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1420{
1421	struct pfsync_upd_req *ur, *ura;
1422	struct mbuf *mp;
1423	int len = count * sizeof(*ur);
1424	int i, offp;
1425
1426	struct pf_state_cmp id_key;
1427	struct pf_state *st;
1428
1429	mp = m_pulldown(m, offset, len, &offp);
1430	if (mp == NULL) {
1431		V_pfsyncstats.pfsyncs_badlen++;
1432		return (-1);
1433	}
1434	ura = (struct pfsync_upd_req *)(mp->m_data + offp);
1435
1436	for (i = 0; i < count; i++) {
1437		ur = &ura[i];
1438
1439		bcopy(&ur->id, &id_key.id, sizeof(id_key.id));
1440		id_key.creatorid = ur->creatorid;
1441
1442		if (id_key.id == 0 && id_key.creatorid == 0)
1443			pfsync_bulk_start();
1444		else {
1445			st = pf_find_state_byid(&id_key);
1446			if (st == NULL) {
1447				V_pfsyncstats.pfsyncs_badstate++;
1448				continue;
1449			}
1450			if (ISSET(st->state_flags, PFSTATE_NOSYNC))
1451				continue;
1452
1453			PF_LOCK();
1454			pfsync_update_state_req(st);
1455			PF_UNLOCK();
1456		}
1457	}
1458
1459	return (len);
1460}
1461
1462int
1463pfsync_in_del(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1464{
1465	struct mbuf *mp;
1466	struct pfsync_state *sa, *sp;
1467	struct pf_state_cmp id_key;
1468	struct pf_state *st;
1469	int len = count * sizeof(*sp);
1470	int offp, i;
1471	int s;
1472
1473	mp = m_pulldown(m, offset, len, &offp);
1474	if (mp == NULL) {
1475		V_pfsyncstats.pfsyncs_badlen++;
1476		return (-1);
1477	}
1478	sa = (struct pfsync_state *)(mp->m_data + offp);
1479
1480	s = splsoftnet();
1481#ifdef __FreeBSD__
1482	PF_LOCK();
1483#endif
1484	for (i = 0; i < count; i++) {
1485		sp = &sa[i];
1486
1487		bcopy(sp->id, &id_key.id, sizeof(id_key.id));
1488		id_key.creatorid = sp->creatorid;
1489
1490		st = pf_find_state_byid(&id_key);
1491		if (st == NULL) {
1492			V_pfsyncstats.pfsyncs_badstate++;
1493			continue;
1494		}
1495		SET(st->state_flags, PFSTATE_NOSYNC);
1496		pf_unlink_state(st);
1497	}
1498#ifdef __FreeBSD__
1499	PF_UNLOCK();
1500#endif
1501	splx(s);
1502
1503	return (len);
1504}
1505
1506int
1507pfsync_in_del_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1508{
1509	struct mbuf *mp;
1510	struct pfsync_del_c *sa, *sp;
1511	struct pf_state_cmp id_key;
1512	struct pf_state *st;
1513	int len = count * sizeof(*sp);
1514	int offp, i;
1515	int s;
1516
1517	mp = m_pulldown(m, offset, len, &offp);
1518	if (mp == NULL) {
1519		V_pfsyncstats.pfsyncs_badlen++;
1520		return (-1);
1521	}
1522	sa = (struct pfsync_del_c *)(mp->m_data + offp);
1523
1524	s = splsoftnet();
1525#ifdef __FreeBSD__
1526	PF_LOCK();
1527#endif
1528	for (i = 0; i < count; i++) {
1529		sp = &sa[i];
1530
1531		bcopy(&sp->id, &id_key.id, sizeof(id_key.id));
1532		id_key.creatorid = sp->creatorid;
1533
1534		st = pf_find_state_byid(&id_key);
1535		if (st == NULL) {
1536			V_pfsyncstats.pfsyncs_badstate++;
1537			continue;
1538		}
1539
1540		SET(st->state_flags, PFSTATE_NOSYNC);
1541		pf_unlink_state(st);
1542	}
1543#ifdef __FreeBSD__
1544	PF_UNLOCK();
1545#endif
1546	splx(s);
1547
1548	return (len);
1549}
1550
1551int
1552pfsync_in_bus(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1553{
1554#ifdef __FreeBSD__
1555	struct pfsync_softc *sc = V_pfsyncif;
1556#else
1557	struct pfsync_softc *sc = pfsyncif;
1558#endif
1559	struct pfsync_bus *bus;
1560	struct mbuf *mp;
1561	int len = count * sizeof(*bus);
1562	int offp;
1563
1564	/* If we're not waiting for a bulk update, who cares. */
1565	if (sc->sc_ureq_sent == 0)
1566		return (len);
1567
1568	mp = m_pulldown(m, offset, len, &offp);
1569	if (mp == NULL) {
1570		V_pfsyncstats.pfsyncs_badlen++;
1571		return (-1);
1572	}
1573	bus = (struct pfsync_bus *)(mp->m_data + offp);
1574
1575	switch (bus->status) {
1576	case PFSYNC_BUS_START:
1577#ifdef __FreeBSD__
1578		callout_reset(&sc->sc_bulkfail_tmo, 4 * hz +
1579		    V_pf_pool_limits[PF_LIMIT_STATES].limit /
1580		    ((sc->sc_ifp->if_mtu - PFSYNC_MINPKT) /
1581		    sizeof(struct pfsync_state)),
1582		    pfsync_bulk_fail, V_pfsyncif);
1583#else
1584		timeout_add(&sc->sc_bulkfail_tmo, 4 * hz +
1585		    pf_pool_limits[PF_LIMIT_STATES].limit /
1586		    ((sc->sc_if.if_mtu - PFSYNC_MINPKT) /
1587		    sizeof(struct pfsync_state)));
1588#endif
1589#ifdef __FreeBSD__
1590		if (V_pf_status.debug >= PF_DEBUG_MISC)
1591#else
1592		if (pf_status.debug >= PF_DEBUG_MISC)
1593#endif
1594			printf("pfsync: received bulk update start\n");
1595		break;
1596
1597	case PFSYNC_BUS_END:
1598		if (time_uptime - ntohl(bus->endtime) >=
1599		    sc->sc_ureq_sent) {
1600			/* that's it, we're happy */
1601			sc->sc_ureq_sent = 0;
1602			sc->sc_bulk_tries = 0;
1603			timeout_del(&sc->sc_bulkfail_tmo);
1604#ifdef __FreeBSD__
1605			if (!sc->pfsync_sync_ok && carp_demote_adj_p)
1606				(*carp_demote_adj_p)(-V_pfsync_carp_adj,
1607				    "pfsync bulk done");
1608			sc->pfsync_sync_ok = 1;
1609#else
1610#if NCARP > 0
1611			if (!pfsync_sync_ok)
1612				carp_group_demote_adj(&sc->sc_if, -1);
1613#endif
1614			pfsync_sync_ok = 1;
1615#endif
1616#ifdef __FreeBSD__
1617			if (V_pf_status.debug >= PF_DEBUG_MISC)
1618#else
1619			if (pf_status.debug >= PF_DEBUG_MISC)
1620#endif
1621				printf("pfsync: received valid "
1622				    "bulk update end\n");
1623		} else {
1624#ifdef __FreeBSD__
1625			if (V_pf_status.debug >= PF_DEBUG_MISC)
1626#else
1627			if (pf_status.debug >= PF_DEBUG_MISC)
1628#endif
1629				printf("pfsync: received invalid "
1630				    "bulk update end: bad timestamp\n");
1631		}
1632		break;
1633	}
1634
1635	return (len);
1636}
1637
1638int
1639pfsync_in_tdb(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1640{
1641	int len = count * sizeof(struct pfsync_tdb);
1642
1643#if defined(IPSEC)
1644	struct pfsync_tdb *tp;
1645	struct mbuf *mp;
1646	int offp;
1647	int i;
1648	int s;
1649
1650	mp = m_pulldown(m, offset, len, &offp);
1651	if (mp == NULL) {
1652		V_pfsyncstats.pfsyncs_badlen++;
1653		return (-1);
1654	}
1655	tp = (struct pfsync_tdb *)(mp->m_data + offp);
1656
1657	s = splsoftnet();
1658#ifdef __FreeBSD__
1659	PF_LOCK();
1660#endif
1661	for (i = 0; i < count; i++)
1662		pfsync_update_net_tdb(&tp[i]);
1663#ifdef __FreeBSD__
1664	PF_UNLOCK();
1665#endif
1666	splx(s);
1667#endif
1668
1669	return (len);
1670}
1671
1672#if defined(IPSEC)
1673/* Update an in-kernel tdb. Silently fail if no tdb is found. */
1674void
1675pfsync_update_net_tdb(struct pfsync_tdb *pt)
1676{
1677	struct tdb		*tdb;
1678	int			 s;
1679
1680	/* check for invalid values */
1681	if (ntohl(pt->spi) <= SPI_RESERVED_MAX ||
1682	    (pt->dst.sa.sa_family != AF_INET &&
1683	     pt->dst.sa.sa_family != AF_INET6))
1684		goto bad;
1685
1686	s = spltdb();
1687	tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
1688	if (tdb) {
1689		pt->rpl = ntohl(pt->rpl);
1690		pt->cur_bytes = betoh64(pt->cur_bytes);
1691
1692		/* Neither replay nor byte counter should ever decrease. */
1693		if (pt->rpl < tdb->tdb_rpl ||
1694		    pt->cur_bytes < tdb->tdb_cur_bytes) {
1695			splx(s);
1696			goto bad;
1697		}
1698
1699		tdb->tdb_rpl = pt->rpl;
1700		tdb->tdb_cur_bytes = pt->cur_bytes;
1701	}
1702	splx(s);
1703	return;
1704
1705bad:
1706#ifdef __FreeBSD__
1707	if (V_pf_status.debug >= PF_DEBUG_MISC)
1708#else
1709	if (pf_status.debug >= PF_DEBUG_MISC)
1710#endif
1711		printf("pfsync_insert: PFSYNC_ACT_TDB_UPD: "
1712		    "invalid value\n");
1713	V_pfsyncstats.pfsyncs_badstate++;
1714	return;
1715}
1716#endif
1717
1718
1719int
1720pfsync_in_eof(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1721{
1722	/* check if we are at the right place in the packet */
1723	if (offset != m->m_pkthdr.len - sizeof(struct pfsync_eof))
1724		V_pfsyncstats.pfsyncs_badact++;
1725
1726	/* we're done. free and let the caller return */
1727	m_freem(m);
1728	return (-1);
1729}
1730
1731int
1732pfsync_in_error(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
1733{
1734	V_pfsyncstats.pfsyncs_badact++;
1735
1736	m_freem(m);
1737	return (-1);
1738}
1739
1740int
1741pfsyncoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst,
1742#ifdef __FreeBSD__
1743	struct route *rt)
1744#else
1745	struct rtentry *rt)
1746#endif
1747{
1748	m_freem(m);
1749	return (0);
1750}
1751
1752/* ARGSUSED */
1753int
1754pfsyncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1755{
1756#ifndef __FreeBSD__
1757	struct proc *p = curproc;
1758#endif
1759	struct pfsync_softc *sc = ifp->if_softc;
1760	struct ifreq *ifr = (struct ifreq *)data;
1761	struct ip_moptions *imo = &sc->sc_imo;
1762	struct pfsyncreq pfsyncr;
1763	struct ifnet    *sifp;
1764	struct ip *ip;
1765	int s, error;
1766
1767	switch (cmd) {
1768#if 0
1769	case SIOCSIFADDR:
1770	case SIOCAIFADDR:
1771	case SIOCSIFDSTADDR:
1772#endif
1773	case SIOCSIFFLAGS:
1774#ifdef __FreeBSD__
1775		if (ifp->if_flags & IFF_UP)
1776			ifp->if_drv_flags |= IFF_DRV_RUNNING;
1777		else
1778			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1779#else
1780		if (ifp->if_flags & IFF_UP)
1781			ifp->if_flags |= IFF_RUNNING;
1782		else
1783			ifp->if_flags &= ~IFF_RUNNING;
1784#endif
1785		break;
1786	case SIOCSIFMTU:
1787		if (!sc->sc_sync_if ||
1788		    ifr->ifr_mtu <= PFSYNC_MINPKT ||
1789		    ifr->ifr_mtu > sc->sc_sync_if->if_mtu)
1790			return (EINVAL);
1791		if (ifr->ifr_mtu < ifp->if_mtu) {
1792			s = splnet();
1793#ifdef __FreeBSD__
1794			PF_LOCK();
1795#endif
1796			pfsync_sendout();
1797#ifdef __FreeBSD__
1798			PF_UNLOCK();
1799#endif
1800			splx(s);
1801		}
1802		ifp->if_mtu = ifr->ifr_mtu;
1803		break;
1804	case SIOCGETPFSYNC:
1805		bzero(&pfsyncr, sizeof(pfsyncr));
1806		if (sc->sc_sync_if) {
1807			strlcpy(pfsyncr.pfsyncr_syncdev,
1808			    sc->sc_sync_if->if_xname, IFNAMSIZ);
1809		}
1810		pfsyncr.pfsyncr_syncpeer = sc->sc_sync_peer;
1811		pfsyncr.pfsyncr_maxupdates = sc->sc_maxupdates;
1812		return (copyout(&pfsyncr, ifr->ifr_data, sizeof(pfsyncr)));
1813
1814	case SIOCSETPFSYNC:
1815#ifdef __FreeBSD__
1816		if ((error = priv_check(curthread, PRIV_NETINET_PF)) != 0)
1817#else
1818		if ((error = suser(p, p->p_acflag)) != 0)
1819#endif
1820			return (error);
1821		if ((error = copyin(ifr->ifr_data, &pfsyncr, sizeof(pfsyncr))))
1822			return (error);
1823
1824#ifdef __FreeBSD__
1825		PF_LOCK();
1826#endif
1827		if (pfsyncr.pfsyncr_syncpeer.s_addr == 0)
1828#ifdef __FreeBSD__
1829			sc->sc_sync_peer.s_addr = htonl(INADDR_PFSYNC_GROUP);
1830#else
1831			sc->sc_sync_peer.s_addr = INADDR_PFSYNC_GROUP;
1832#endif
1833		else
1834			sc->sc_sync_peer.s_addr =
1835			    pfsyncr.pfsyncr_syncpeer.s_addr;
1836
1837		if (pfsyncr.pfsyncr_maxupdates > 255)
1838#ifdef __FreeBSD__
1839		{
1840			PF_UNLOCK();
1841#endif
1842			return (EINVAL);
1843#ifdef __FreeBSD__
1844		}
1845#endif
1846		sc->sc_maxupdates = pfsyncr.pfsyncr_maxupdates;
1847
1848		if (pfsyncr.pfsyncr_syncdev[0] == 0) {
1849			sc->sc_sync_if = NULL;
1850#ifdef __FreeBSD__
1851			PF_UNLOCK();
1852			if (imo->imo_membership)
1853				pfsync_multicast_cleanup(sc);
1854#else
1855			if (imo->imo_num_memberships > 0) {
1856				in_delmulti(imo->imo_membership[
1857				    --imo->imo_num_memberships]);
1858				imo->imo_multicast_ifp = NULL;
1859			}
1860#endif
1861			break;
1862		}
1863
1864#ifdef __FreeBSD__
1865		PF_UNLOCK();
1866#endif
1867		if ((sifp = ifunit(pfsyncr.pfsyncr_syncdev)) == NULL)
1868			return (EINVAL);
1869
1870#ifdef __FreeBSD__
1871		PF_LOCK();
1872#endif
1873		s = splnet();
1874#ifdef __FreeBSD__
1875		if (sifp->if_mtu < sc->sc_ifp->if_mtu ||
1876#else
1877		if (sifp->if_mtu < sc->sc_if.if_mtu ||
1878#endif
1879		    (sc->sc_sync_if != NULL &&
1880		    sifp->if_mtu < sc->sc_sync_if->if_mtu) ||
1881		    sifp->if_mtu < MCLBYTES - sizeof(struct ip))
1882			pfsync_sendout();
1883		sc->sc_sync_if = sifp;
1884
1885#ifdef __FreeBSD__
1886		if (imo->imo_membership) {
1887			PF_UNLOCK();
1888			pfsync_multicast_cleanup(sc);
1889			PF_LOCK();
1890		}
1891#else
1892		if (imo->imo_num_memberships > 0) {
1893			in_delmulti(imo->imo_membership[--imo->imo_num_memberships]);
1894			imo->imo_multicast_ifp = NULL;
1895		}
1896#endif
1897
1898#ifdef __FreeBSD__
1899		if (sc->sc_sync_if &&
1900		    sc->sc_sync_peer.s_addr == htonl(INADDR_PFSYNC_GROUP)) {
1901			PF_UNLOCK();
1902			error = pfsync_multicast_setup(sc);
1903			if (error)
1904				return (error);
1905			PF_LOCK();
1906		}
1907#else
1908		if (sc->sc_sync_if &&
1909		    sc->sc_sync_peer.s_addr == INADDR_PFSYNC_GROUP) {
1910			struct in_addr addr;
1911
1912			if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) {
1913				sc->sc_sync_if = NULL;
1914				splx(s);
1915				return (EADDRNOTAVAIL);
1916			}
1917
1918			addr.s_addr = INADDR_PFSYNC_GROUP;
1919
1920			if ((imo->imo_membership[0] =
1921			    in_addmulti(&addr, sc->sc_sync_if)) == NULL) {
1922				sc->sc_sync_if = NULL;
1923				splx(s);
1924				return (ENOBUFS);
1925			}
1926			imo->imo_num_memberships++;
1927			imo->imo_multicast_ifp = sc->sc_sync_if;
1928			imo->imo_multicast_ttl = PFSYNC_DFLTTL;
1929			imo->imo_multicast_loop = 0;
1930		}
1931#endif	/* !__FreeBSD__ */
1932
1933		ip = &sc->sc_template;
1934		bzero(ip, sizeof(*ip));
1935		ip->ip_v = IPVERSION;
1936		ip->ip_hl = sizeof(sc->sc_template) >> 2;
1937		ip->ip_tos = IPTOS_LOWDELAY;
1938		/* len and id are set later */
1939#ifdef __FreeBSD__
1940		ip->ip_off = IP_DF;
1941#else
1942		ip->ip_off = htons(IP_DF);
1943#endif
1944		ip->ip_ttl = PFSYNC_DFLTTL;
1945		ip->ip_p = IPPROTO_PFSYNC;
1946		ip->ip_src.s_addr = INADDR_ANY;
1947		ip->ip_dst.s_addr = sc->sc_sync_peer.s_addr;
1948
1949		if (sc->sc_sync_if) {
1950			/* Request a full state table update. */
1951			sc->sc_ureq_sent = time_uptime;
1952#ifdef __FreeBSD__
1953			if (sc->pfsync_sync_ok && carp_demote_adj_p)
1954				(*carp_demote_adj_p)(V_pfsync_carp_adj,
1955				    "pfsync bulk start");
1956			sc->pfsync_sync_ok = 0;
1957#else
1958#if NCARP > 0
1959			if (pfsync_sync_ok)
1960				carp_group_demote_adj(&sc->sc_if, 1);
1961#endif
1962			pfsync_sync_ok = 0;
1963#endif
1964#ifdef __FreeBSD__
1965			if (V_pf_status.debug >= PF_DEBUG_MISC)
1966#else
1967			if (pf_status.debug >= PF_DEBUG_MISC)
1968#endif
1969				printf("pfsync: requesting bulk update\n");
1970#ifdef __FreeBSD__
1971			callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
1972			    pfsync_bulk_fail, V_pfsyncif);
1973#else
1974			timeout_add_sec(&sc->sc_bulkfail_tmo, 5);
1975#endif
1976			pfsync_request_update(0, 0);
1977		}
1978#ifdef __FreeBSD__
1979		PF_UNLOCK();
1980#endif
1981		splx(s);
1982
1983		break;
1984
1985	default:
1986		return (ENOTTY);
1987	}
1988
1989	return (0);
1990}
1991
1992int
1993pfsync_out_state(struct pf_state *st, struct mbuf *m, int offset)
1994{
1995	struct pfsync_state *sp = (struct pfsync_state *)(m->m_data + offset);
1996
1997	pfsync_state_export(sp, st);
1998
1999	return (sizeof(*sp));
2000}
2001
2002int
2003pfsync_out_iack(struct pf_state *st, struct mbuf *m, int offset)
2004{
2005	struct pfsync_ins_ack *iack =
2006	    (struct pfsync_ins_ack *)(m->m_data + offset);
2007
2008	iack->id = st->id;
2009	iack->creatorid = st->creatorid;
2010
2011	return (sizeof(*iack));
2012}
2013
2014int
2015pfsync_out_upd_c(struct pf_state *st, struct mbuf *m, int offset)
2016{
2017	struct pfsync_upd_c *up = (struct pfsync_upd_c *)(m->m_data + offset);
2018
2019	up->id = st->id;
2020	pf_state_peer_hton(&st->src, &up->src);
2021	pf_state_peer_hton(&st->dst, &up->dst);
2022	up->creatorid = st->creatorid;
2023
2024	up->expire = pf_state_expires(st);
2025	if (up->expire <= time_second)
2026		up->expire = htonl(0);
2027	else
2028		up->expire = htonl(up->expire - time_second);
2029	up->timeout = st->timeout;
2030
2031	bzero(up->_pad, sizeof(up->_pad)); /* XXX */
2032
2033	return (sizeof(*up));
2034}
2035
2036int
2037pfsync_out_del(struct pf_state *st, struct mbuf *m, int offset)
2038{
2039	struct pfsync_del_c *dp = (struct pfsync_del_c *)(m->m_data + offset);
2040
2041	dp->id = st->id;
2042	dp->creatorid = st->creatorid;
2043
2044	SET(st->state_flags, PFSTATE_NOSYNC);
2045
2046	return (sizeof(*dp));
2047}
2048
2049void
2050pfsync_drop(struct pfsync_softc *sc)
2051{
2052	struct pf_state *st;
2053	struct pfsync_upd_req_item *ur;
2054#ifdef notyet
2055	struct tdb *t;
2056#endif
2057	int q;
2058
2059	for (q = 0; q < PFSYNC_S_COUNT; q++) {
2060		if (TAILQ_EMPTY(&sc->sc_qs[q]))
2061			continue;
2062
2063		TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) {
2064#ifdef PFSYNC_DEBUG
2065#ifdef __FreeBSD__
2066			KASSERT(st->sync_state == q,
2067				("%s: st->sync_state == q",
2068					__FUNCTION__));
2069#else
2070			KASSERT(st->sync_state == q);
2071#endif
2072#endif
2073			st->sync_state = PFSYNC_S_NONE;
2074		}
2075		TAILQ_INIT(&sc->sc_qs[q]);
2076	}
2077
2078	while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
2079		TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
2080		pool_put(&sc->sc_pool, ur);
2081	}
2082
2083	sc->sc_plus = NULL;
2084
2085#ifdef notyet
2086	if (!TAILQ_EMPTY(&sc->sc_tdb_q)) {
2087		TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry)
2088			CLR(t->tdb_flags, TDBF_PFSYNC);
2089
2090		TAILQ_INIT(&sc->sc_tdb_q);
2091	}
2092#endif
2093
2094	sc->sc_len = PFSYNC_MINPKT;
2095}
2096
2097void
2098pfsync_sendout(void)
2099{
2100#ifdef __FreeBSD__
2101	struct pfsync_softc *sc = V_pfsyncif;
2102#else
2103	struct pfsync_softc *sc = pfsyncif;
2104#endif
2105#if NBPFILTER > 0
2106#ifdef __FreeBSD__
2107	struct ifnet *ifp = sc->sc_ifp;
2108#else
2109	struct ifnet *ifp = &sc->sc_if;
2110#endif
2111#endif
2112	struct mbuf *m;
2113	struct ip *ip;
2114	struct pfsync_header *ph;
2115	struct pfsync_subheader *subh;
2116	struct pf_state *st;
2117	struct pfsync_upd_req_item *ur;
2118#ifdef notyet
2119	struct tdb *t;
2120#endif
2121#ifdef __FreeBSD__
2122	size_t pktlen;
2123	int dummy_error;
2124#endif
2125	int offset;
2126	int q, count = 0;
2127
2128#ifdef __FreeBSD__
2129	PF_LOCK_ASSERT();
2130#else
2131	splassert(IPL_NET);
2132#endif
2133
2134	if (sc == NULL || sc->sc_len == PFSYNC_MINPKT)
2135		return;
2136
2137#if NBPFILTER > 0
2138	if (ifp->if_bpf == NULL && sc->sc_sync_if == NULL) {
2139#else
2140	if (sc->sc_sync_if == NULL) {
2141#endif
2142		pfsync_drop(sc);
2143		return;
2144	}
2145
2146	MGETHDR(m, M_DONTWAIT, MT_DATA);
2147	if (m == NULL) {
2148#ifdef __FreeBSD__
2149		sc->sc_ifp->if_oerrors++;
2150#else
2151		sc->sc_if.if_oerrors++;
2152#endif
2153		V_pfsyncstats.pfsyncs_onomem++;
2154		pfsync_drop(sc);
2155		return;
2156	}
2157
2158#ifdef __FreeBSD__
2159	pktlen = max_linkhdr + sc->sc_len;
2160	if (pktlen > MHLEN) {
2161		/* Find the right pool to allocate from. */
2162		/* XXX: This is ugly. */
2163		m_cljget(m, M_DONTWAIT, pktlen <= MCLBYTES ? MCLBYTES :
2164#if MJUMPAGESIZE != MCLBYTES
2165			pktlen <= MJUMPAGESIZE ? MJUMPAGESIZE :
2166#endif
2167			pktlen <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES);
2168#else
2169	if (max_linkhdr + sc->sc_len > MHLEN) {
2170		MCLGETI(m, M_DONTWAIT, NULL, max_linkhdr + sc->sc_len);
2171#endif
2172		if (!ISSET(m->m_flags, M_EXT)) {
2173			m_free(m);
2174#ifdef __FreeBSD__
2175			sc->sc_ifp->if_oerrors++;
2176#else
2177			sc->sc_if.if_oerrors++;
2178#endif
2179			V_pfsyncstats.pfsyncs_onomem++;
2180			pfsync_drop(sc);
2181			return;
2182		}
2183	}
2184	m->m_data += max_linkhdr;
2185	m->m_len = m->m_pkthdr.len = sc->sc_len;
2186
2187	/* build the ip header */
2188	ip = (struct ip *)m->m_data;
2189	bcopy(&sc->sc_template, ip, sizeof(*ip));
2190	offset = sizeof(*ip);
2191
2192#ifdef __FreeBSD__
2193	ip->ip_len = m->m_pkthdr.len;
2194#else
2195	ip->ip_len = htons(m->m_pkthdr.len);
2196#endif
2197	ip->ip_id = htons(ip_randomid());
2198
2199	/* build the pfsync header */
2200	ph = (struct pfsync_header *)(m->m_data + offset);
2201	bzero(ph, sizeof(*ph));
2202	offset += sizeof(*ph);
2203
2204	ph->version = PFSYNC_VERSION;
2205	ph->len = htons(sc->sc_len - sizeof(*ip));
2206#ifdef __FreeBSD__
2207	bcopy(V_pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
2208#else
2209	bcopy(pf_status.pf_chksum, ph->pfcksum, PF_MD5_DIGEST_LENGTH);
2210#endif
2211
2212	/* walk the queues */
2213	for (q = 0; q < PFSYNC_S_COUNT; q++) {
2214		if (TAILQ_EMPTY(&sc->sc_qs[q]))
2215			continue;
2216
2217		subh = (struct pfsync_subheader *)(m->m_data + offset);
2218		offset += sizeof(*subh);
2219
2220		count = 0;
2221		TAILQ_FOREACH(st, &sc->sc_qs[q], sync_list) {
2222#ifdef PFSYNC_DEBUG
2223#ifdef __FreeBSD__
2224			KASSERT(st->sync_state == q,
2225				("%s: st->sync_state == q",
2226					__FUNCTION__));
2227#else
2228			KASSERT(st->sync_state == q);
2229#endif
2230#endif
2231
2232			offset += pfsync_qs[q].write(st, m, offset);
2233			st->sync_state = PFSYNC_S_NONE;
2234			count++;
2235		}
2236		TAILQ_INIT(&sc->sc_qs[q]);
2237
2238		bzero(subh, sizeof(*subh));
2239		subh->action = pfsync_qs[q].action;
2240		subh->count = htons(count);
2241	}
2242
2243	if (!TAILQ_EMPTY(&sc->sc_upd_req_list)) {
2244		subh = (struct pfsync_subheader *)(m->m_data + offset);
2245		offset += sizeof(*subh);
2246
2247		count = 0;
2248		while ((ur = TAILQ_FIRST(&sc->sc_upd_req_list)) != NULL) {
2249			TAILQ_REMOVE(&sc->sc_upd_req_list, ur, ur_entry);
2250
2251			bcopy(&ur->ur_msg, m->m_data + offset,
2252			    sizeof(ur->ur_msg));
2253			offset += sizeof(ur->ur_msg);
2254
2255			pool_put(&sc->sc_pool, ur);
2256
2257			count++;
2258		}
2259
2260		bzero(subh, sizeof(*subh));
2261		subh->action = PFSYNC_ACT_UPD_REQ;
2262		subh->count = htons(count);
2263	}
2264
2265	/* has someone built a custom region for us to add? */
2266	if (sc->sc_plus != NULL) {
2267		bcopy(sc->sc_plus, m->m_data + offset, sc->sc_pluslen);
2268		offset += sc->sc_pluslen;
2269
2270		sc->sc_plus = NULL;
2271	}
2272
2273#ifdef notyet
2274	if (!TAILQ_EMPTY(&sc->sc_tdb_q)) {
2275		subh = (struct pfsync_subheader *)(m->m_data + offset);
2276		offset += sizeof(*subh);
2277
2278		count = 0;
2279		TAILQ_FOREACH(t, &sc->sc_tdb_q, tdb_sync_entry) {
2280			offset += pfsync_out_tdb(t, m, offset);
2281			CLR(t->tdb_flags, TDBF_PFSYNC);
2282
2283			count++;
2284		}
2285		TAILQ_INIT(&sc->sc_tdb_q);
2286
2287		bzero(subh, sizeof(*subh));
2288		subh->action = PFSYNC_ACT_TDB;
2289		subh->count = htons(count);
2290	}
2291#endif
2292
2293	subh = (struct pfsync_subheader *)(m->m_data + offset);
2294	offset += sizeof(*subh);
2295
2296	bzero(subh, sizeof(*subh));
2297	subh->action = PFSYNC_ACT_EOF;
2298	subh->count = htons(1);
2299
2300	/* XXX write checksum in EOF here */
2301
2302	/* we're done, let's put it on the wire */
2303#if NBPFILTER > 0
2304	if (ifp->if_bpf) {
2305		m->m_data += sizeof(*ip);
2306		m->m_len = m->m_pkthdr.len = sc->sc_len - sizeof(*ip);
2307#ifdef __FreeBSD__
2308		BPF_MTAP(ifp, m);
2309#else
2310		bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2311#endif
2312		m->m_data -= sizeof(*ip);
2313		m->m_len = m->m_pkthdr.len = sc->sc_len;
2314	}
2315
2316	if (sc->sc_sync_if == NULL) {
2317		sc->sc_len = PFSYNC_MINPKT;
2318		m_freem(m);
2319		return;
2320	}
2321#endif
2322
2323#ifdef __FreeBSD__
2324	sc->sc_ifp->if_opackets++;
2325	sc->sc_ifp->if_obytes += m->m_pkthdr.len;
2326	sc->sc_len = PFSYNC_MINPKT;
2327
2328	IFQ_ENQUEUE(&sc->sc_ifp->if_snd, m, dummy_error);
2329	swi_sched(V_pfsync_swi_cookie, 0);
2330#else
2331	sc->sc_if.if_opackets++;
2332	sc->sc_if.if_obytes += m->m_pkthdr.len;
2333
2334	if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL) == 0)
2335		pfsyncstats.pfsyncs_opackets++;
2336	else
2337		pfsyncstats.pfsyncs_oerrors++;
2338
2339	/* start again */
2340	sc->sc_len = PFSYNC_MINPKT;
2341#endif
2342}
2343
2344void
2345pfsync_insert_state(struct pf_state *st)
2346{
2347#ifdef __FreeBSD__
2348	struct pfsync_softc *sc = V_pfsyncif;
2349#else
2350	struct pfsync_softc *sc = pfsyncif;
2351#endif
2352
2353#ifdef __FreeBSD__
2354	PF_LOCK_ASSERT();
2355#else
2356	splassert(IPL_SOFTNET);
2357#endif
2358
2359	if (ISSET(st->rule.ptr->rule_flag, PFRULE_NOSYNC) ||
2360	    st->key[PF_SK_WIRE]->proto == IPPROTO_PFSYNC) {
2361		SET(st->state_flags, PFSTATE_NOSYNC);
2362		return;
2363	}
2364
2365	if (sc == NULL || ISSET(st->state_flags, PFSTATE_NOSYNC))
2366		return;
2367
2368#ifdef PFSYNC_DEBUG
2369#ifdef __FreeBSD__
2370	KASSERT(st->sync_state == PFSYNC_S_NONE,
2371		("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__));
2372#else
2373	KASSERT(st->sync_state == PFSYNC_S_NONE);
2374#endif
2375#endif
2376
2377	if (sc->sc_len == PFSYNC_MINPKT)
2378#ifdef __FreeBSD__
2379		callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2380		    V_pfsyncif);
2381#else
2382		timeout_add_sec(&sc->sc_tmo, 1);
2383#endif
2384
2385	pfsync_q_ins(st, PFSYNC_S_INS);
2386
2387	if (ISSET(st->state_flags, PFSTATE_ACK))
2388#ifdef __FreeBSD__
2389		pfsync_sendout();
2390#else
2391		schednetisr(NETISR_PFSYNC);
2392#endif
2393	else
2394		st->sync_updates = 0;
2395}
2396
2397int defer = 10;
2398
2399int
2400pfsync_defer(struct pf_state *st, struct mbuf *m)
2401{
2402#ifdef __FreeBSD__
2403	struct pfsync_softc *sc = V_pfsyncif;
2404#else
2405	struct pfsync_softc *sc = pfsyncif;
2406#endif
2407	struct pfsync_deferral *pd;
2408
2409#ifdef __FreeBSD__
2410	PF_LOCK_ASSERT();
2411#else
2412	splassert(IPL_SOFTNET);
2413#endif
2414
2415	if (sc->sc_deferred >= 128)
2416		pfsync_undefer(TAILQ_FIRST(&sc->sc_deferrals), 0);
2417
2418	pd = pool_get(&sc->sc_pool, M_NOWAIT);
2419	if (pd == NULL)
2420		return (0);
2421	sc->sc_deferred++;
2422
2423#ifdef __FreeBSD__
2424	m->m_flags |= M_SKIP_FIREWALL;
2425#else
2426	m->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
2427#endif
2428	SET(st->state_flags, PFSTATE_ACK);
2429
2430	pd->pd_st = st;
2431	pd->pd_m = m;
2432
2433	TAILQ_INSERT_TAIL(&sc->sc_deferrals, pd, pd_entry);
2434#ifdef __FreeBSD__
2435	callout_init(&pd->pd_tmo, CALLOUT_MPSAFE);
2436	callout_reset(&pd->pd_tmo, defer, pfsync_defer_tmo,
2437		pd);
2438#else
2439	timeout_set(&pd->pd_tmo, pfsync_defer_tmo, pd);
2440	timeout_add(&pd->pd_tmo, defer);
2441#endif
2442
2443	return (1);
2444}
2445
2446void
2447pfsync_undefer(struct pfsync_deferral *pd, int drop)
2448{
2449#ifdef __FreeBSD__
2450	struct pfsync_softc *sc = V_pfsyncif;
2451#else
2452	struct pfsync_softc *sc = pfsyncif;
2453#endif
2454	int s;
2455
2456#ifdef __FreeBSD__
2457	PF_LOCK_ASSERT();
2458#else
2459	splassert(IPL_SOFTNET);
2460#endif
2461
2462	TAILQ_REMOVE(&sc->sc_deferrals, pd, pd_entry);
2463	sc->sc_deferred--;
2464
2465	CLR(pd->pd_st->state_flags, PFSTATE_ACK);
2466	timeout_del(&pd->pd_tmo); /* bah */
2467	if (drop)
2468		m_freem(pd->pd_m);
2469	else {
2470		s = splnet();
2471#ifdef __FreeBSD__
2472		/* XXX: use pf_defered?! */
2473		PF_UNLOCK();
2474#endif
2475		ip_output(pd->pd_m, (void *)NULL, (void *)NULL, 0,
2476		    (void *)NULL, (void *)NULL);
2477#ifdef __FreeBSD__
2478		PF_LOCK();
2479#endif
2480		splx(s);
2481	}
2482
2483	pool_put(&sc->sc_pool, pd);
2484}
2485
2486void
2487pfsync_defer_tmo(void *arg)
2488{
2489#if defined(__FreeBSD__) && defined(VIMAGE)
2490	struct pfsync_deferral *pd = arg;
2491#endif
2492	int s;
2493
2494	s = splsoftnet();
2495#ifdef __FreeBSD__
2496	CURVNET_SET(pd->pd_m->m_pkthdr.rcvif->if_vnet); /* XXX */
2497	PF_LOCK();
2498#endif
2499	pfsync_undefer(arg, 0);
2500#ifdef __FreeBSD__
2501	PF_UNLOCK();
2502	CURVNET_RESTORE();
2503#endif
2504	splx(s);
2505}
2506
2507void
2508pfsync_deferred(struct pf_state *st, int drop)
2509{
2510#ifdef __FreeBSD__
2511	struct pfsync_softc *sc = V_pfsyncif;
2512#else
2513	struct pfsync_softc *sc = pfsyncif;
2514#endif
2515	struct pfsync_deferral *pd;
2516
2517	TAILQ_FOREACH(pd, &sc->sc_deferrals, pd_entry) {
2518		 if (pd->pd_st == st) {
2519			pfsync_undefer(pd, drop);
2520			return;
2521		}
2522	}
2523
2524	panic("pfsync_send_deferred: unable to find deferred state");
2525}
2526
2527u_int pfsync_upds = 0;
2528
2529void
2530pfsync_update_state(struct pf_state *st)
2531{
2532#ifdef __FreeBSD__
2533	struct pfsync_softc *sc = V_pfsyncif;
2534#else
2535	struct pfsync_softc *sc = pfsyncif;
2536#endif
2537	int sync = 0;
2538
2539#ifdef __FreeBSD__
2540	PF_LOCK_ASSERT();
2541#else
2542	splassert(IPL_SOFTNET);
2543#endif
2544
2545	if (sc == NULL)
2546		return;
2547
2548	if (ISSET(st->state_flags, PFSTATE_ACK))
2549		pfsync_deferred(st, 0);
2550	if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2551		if (st->sync_state != PFSYNC_S_NONE)
2552			pfsync_q_del(st);
2553		return;
2554	}
2555
2556	if (sc->sc_len == PFSYNC_MINPKT)
2557#ifdef __FreeBSD__
2558		callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2559		    V_pfsyncif);
2560#else
2561		timeout_add_sec(&sc->sc_tmo, 1);
2562#endif
2563
2564	switch (st->sync_state) {
2565	case PFSYNC_S_UPD_C:
2566	case PFSYNC_S_UPD:
2567	case PFSYNC_S_INS:
2568		/* we're already handling it */
2569
2570		if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP) {
2571			st->sync_updates++;
2572			if (st->sync_updates >= sc->sc_maxupdates)
2573				sync = 1;
2574		}
2575		break;
2576
2577	case PFSYNC_S_IACK:
2578		pfsync_q_del(st);
2579	case PFSYNC_S_NONE:
2580		pfsync_q_ins(st, PFSYNC_S_UPD_C);
2581		st->sync_updates = 0;
2582		break;
2583
2584	default:
2585		panic("pfsync_update_state: unexpected sync state %d",
2586		    st->sync_state);
2587	}
2588
2589	if (sync || (time_uptime - st->pfsync_time) < 2) {
2590		pfsync_upds++;
2591#ifdef __FreeBSD__
2592		pfsync_sendout();
2593#else
2594		schednetisr(NETISR_PFSYNC);
2595#endif
2596	}
2597}
2598
2599void
2600pfsync_request_update(u_int32_t creatorid, u_int64_t id)
2601{
2602#ifdef __FreeBSD__
2603	struct pfsync_softc *sc = V_pfsyncif;
2604#else
2605	struct pfsync_softc *sc = pfsyncif;
2606#endif
2607	struct pfsync_upd_req_item *item;
2608	size_t nlen = sizeof(struct pfsync_upd_req);
2609	int s;
2610
2611	PF_LOCK_ASSERT();
2612
2613	/*
2614	 * this code does nothing to prevent multiple update requests for the
2615	 * same state being generated.
2616	 */
2617
2618	item = pool_get(&sc->sc_pool, PR_NOWAIT);
2619	if (item == NULL) {
2620		/* XXX stats */
2621		return;
2622	}
2623
2624	item->ur_msg.id = id;
2625	item->ur_msg.creatorid = creatorid;
2626
2627	if (TAILQ_EMPTY(&sc->sc_upd_req_list))
2628		nlen += sizeof(struct pfsync_subheader);
2629
2630#ifdef __FreeBSD__
2631	if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
2632#else
2633	if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2634#endif
2635		s = splnet();
2636		pfsync_sendout();
2637		splx(s);
2638
2639		nlen = sizeof(struct pfsync_subheader) +
2640		    sizeof(struct pfsync_upd_req);
2641	}
2642
2643	TAILQ_INSERT_TAIL(&sc->sc_upd_req_list, item, ur_entry);
2644	sc->sc_len += nlen;
2645
2646#ifdef __FreeBSD__
2647	pfsync_sendout();
2648#else
2649	schednetisr(NETISR_PFSYNC);
2650#endif
2651}
2652
2653void
2654pfsync_update_state_req(struct pf_state *st)
2655{
2656#ifdef __FreeBSD__
2657	struct pfsync_softc *sc = V_pfsyncif;
2658#else
2659	struct pfsync_softc *sc = pfsyncif;
2660#endif
2661
2662	PF_LOCK_ASSERT();
2663
2664	if (sc == NULL)
2665		panic("pfsync_update_state_req: nonexistant instance");
2666
2667	if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2668		if (st->sync_state != PFSYNC_S_NONE)
2669			pfsync_q_del(st);
2670		return;
2671	}
2672
2673	switch (st->sync_state) {
2674	case PFSYNC_S_UPD_C:
2675	case PFSYNC_S_IACK:
2676		pfsync_q_del(st);
2677	case PFSYNC_S_NONE:
2678		pfsync_q_ins(st, PFSYNC_S_UPD);
2679#ifdef __FreeBSD__
2680		pfsync_sendout();
2681#else
2682		schednetisr(NETISR_PFSYNC);
2683#endif
2684		return;
2685
2686	case PFSYNC_S_INS:
2687	case PFSYNC_S_UPD:
2688	case PFSYNC_S_DEL:
2689		/* we're already handling it */
2690		return;
2691
2692	default:
2693		panic("pfsync_update_state_req: unexpected sync state %d",
2694		    st->sync_state);
2695	}
2696}
2697
2698void
2699pfsync_delete_state(struct pf_state *st)
2700{
2701#ifdef __FreeBSD__
2702	struct pfsync_softc *sc = V_pfsyncif;
2703#else
2704	struct pfsync_softc *sc = pfsyncif;
2705#endif
2706
2707#ifdef __FreeBSD__
2708	PF_LOCK_ASSERT();
2709#else
2710	splassert(IPL_SOFTNET);
2711#endif
2712
2713	if (sc == NULL)
2714		return;
2715
2716	if (ISSET(st->state_flags, PFSTATE_ACK))
2717		pfsync_deferred(st, 1);
2718	if (ISSET(st->state_flags, PFSTATE_NOSYNC)) {
2719		if (st->sync_state != PFSYNC_S_NONE)
2720			pfsync_q_del(st);
2721		return;
2722	}
2723
2724	if (sc->sc_len == PFSYNC_MINPKT)
2725#ifdef __FreeBSD__
2726		callout_reset(&sc->sc_tmo, 1 * hz, pfsync_timeout,
2727		    V_pfsyncif);
2728#else
2729		timeout_add_sec(&sc->sc_tmo, 1);
2730#endif
2731
2732	switch (st->sync_state) {
2733	case PFSYNC_S_INS:
2734		/* we never got to tell the world so just forget about it */
2735		pfsync_q_del(st);
2736		return;
2737
2738	case PFSYNC_S_UPD_C:
2739	case PFSYNC_S_UPD:
2740	case PFSYNC_S_IACK:
2741		pfsync_q_del(st);
2742		/* FALLTHROUGH to putting it on the del list */
2743
2744	case PFSYNC_S_NONE:
2745		pfsync_q_ins(st, PFSYNC_S_DEL);
2746		return;
2747
2748	default:
2749		panic("pfsync_delete_state: unexpected sync state %d",
2750		    st->sync_state);
2751	}
2752}
2753
2754void
2755pfsync_clear_states(u_int32_t creatorid, const char *ifname)
2756{
2757	struct {
2758		struct pfsync_subheader subh;
2759		struct pfsync_clr clr;
2760	} __packed r;
2761
2762#ifdef __FreeBSD__
2763	struct pfsync_softc *sc = V_pfsyncif;
2764#else
2765	struct pfsync_softc *sc = pfsyncif;
2766#endif
2767
2768#ifdef __FreeBSD__
2769	PF_LOCK_ASSERT();
2770#else
2771	splassert(IPL_SOFTNET);
2772#endif
2773
2774	if (sc == NULL)
2775		return;
2776
2777	bzero(&r, sizeof(r));
2778
2779	r.subh.action = PFSYNC_ACT_CLR;
2780	r.subh.count = htons(1);
2781
2782	strlcpy(r.clr.ifname, ifname, sizeof(r.clr.ifname));
2783	r.clr.creatorid = creatorid;
2784
2785	pfsync_send_plus(&r, sizeof(r));
2786}
2787
2788void
2789pfsync_q_ins(struct pf_state *st, int q)
2790{
2791#ifdef __FreeBSD__
2792	struct pfsync_softc *sc = V_pfsyncif;
2793#else
2794	struct pfsync_softc *sc = pfsyncif;
2795#endif
2796	size_t nlen = pfsync_qs[q].len;
2797	int s;
2798
2799	PF_LOCK_ASSERT();
2800
2801#ifdef __FreeBSD__
2802	KASSERT(st->sync_state == PFSYNC_S_NONE,
2803		("%s: st->sync_state == PFSYNC_S_NONE", __FUNCTION__));
2804#else
2805	KASSERT(st->sync_state == PFSYNC_S_NONE);
2806#endif
2807
2808#if 1 || defined(PFSYNC_DEBUG)
2809	if (sc->sc_len < PFSYNC_MINPKT)
2810#ifdef __FreeBSD__
2811		panic("pfsync pkt len is too low %zu", sc->sc_len);
2812#else
2813		panic("pfsync pkt len is too low %d", sc->sc_len);
2814#endif
2815#endif
2816	if (TAILQ_EMPTY(&sc->sc_qs[q]))
2817		nlen += sizeof(struct pfsync_subheader);
2818
2819#ifdef __FreeBSD__
2820	if (sc->sc_len + nlen > sc->sc_ifp->if_mtu) {
2821#else
2822	if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2823#endif
2824		s = splnet();
2825		pfsync_sendout();
2826		splx(s);
2827
2828		nlen = sizeof(struct pfsync_subheader) + pfsync_qs[q].len;
2829	}
2830
2831	sc->sc_len += nlen;
2832	TAILQ_INSERT_TAIL(&sc->sc_qs[q], st, sync_list);
2833	st->sync_state = q;
2834}
2835
2836void
2837pfsync_q_del(struct pf_state *st)
2838{
2839#ifdef __FreeBSD__
2840	struct pfsync_softc *sc = V_pfsyncif;
2841#else
2842	struct pfsync_softc *sc = pfsyncif;
2843#endif
2844	int q = st->sync_state;
2845
2846#ifdef __FreeBSD__
2847	KASSERT(st->sync_state != PFSYNC_S_NONE,
2848		("%s: st->sync_state != PFSYNC_S_NONE", __FUNCTION__));
2849#else
2850	KASSERT(st->sync_state != PFSYNC_S_NONE);
2851#endif
2852
2853	sc->sc_len -= pfsync_qs[q].len;
2854	TAILQ_REMOVE(&sc->sc_qs[q], st, sync_list);
2855	st->sync_state = PFSYNC_S_NONE;
2856
2857	if (TAILQ_EMPTY(&sc->sc_qs[q]))
2858		sc->sc_len -= sizeof(struct pfsync_subheader);
2859}
2860
2861#ifdef notyet
2862void
2863pfsync_update_tdb(struct tdb *t, int output)
2864{
2865#ifdef __FreeBSD__
2866	struct pfsync_softc *sc = V_pfsyncif;
2867#else
2868	struct pfsync_softc *sc = pfsyncif;
2869#endif
2870	size_t nlen = sizeof(struct pfsync_tdb);
2871	int s;
2872
2873	if (sc == NULL)
2874		return;
2875
2876	if (!ISSET(t->tdb_flags, TDBF_PFSYNC)) {
2877		if (TAILQ_EMPTY(&sc->sc_tdb_q))
2878			nlen += sizeof(struct pfsync_subheader);
2879
2880		if (sc->sc_len + nlen > sc->sc_if.if_mtu) {
2881			s = splnet();
2882			PF_LOCK();
2883			pfsync_sendout();
2884			PF_UNLOCK();
2885			splx(s);
2886
2887			nlen = sizeof(struct pfsync_subheader) +
2888			    sizeof(struct pfsync_tdb);
2889		}
2890
2891		sc->sc_len += nlen;
2892		TAILQ_INSERT_TAIL(&sc->sc_tdb_q, t, tdb_sync_entry);
2893		SET(t->tdb_flags, TDBF_PFSYNC);
2894		t->tdb_updates = 0;
2895	} else {
2896		if (++t->tdb_updates >= sc->sc_maxupdates)
2897			schednetisr(NETISR_PFSYNC);
2898	}
2899
2900	if (output)
2901		SET(t->tdb_flags, TDBF_PFSYNC_RPL);
2902	else
2903		CLR(t->tdb_flags, TDBF_PFSYNC_RPL);
2904}
2905
2906void
2907pfsync_delete_tdb(struct tdb *t)
2908{
2909#ifdef __FreeBSD__
2910	struct pfsync_softc *sc = V_pfsyncif;
2911#else
2912	struct pfsync_softc *sc = pfsyncif;
2913#endif
2914
2915	if (sc == NULL || !ISSET(t->tdb_flags, TDBF_PFSYNC))
2916		return;
2917
2918	sc->sc_len -= sizeof(struct pfsync_tdb);
2919	TAILQ_REMOVE(&sc->sc_tdb_q, t, tdb_sync_entry);
2920	CLR(t->tdb_flags, TDBF_PFSYNC);
2921
2922	if (TAILQ_EMPTY(&sc->sc_tdb_q))
2923		sc->sc_len -= sizeof(struct pfsync_subheader);
2924}
2925
2926int
2927pfsync_out_tdb(struct tdb *t, struct mbuf *m, int offset)
2928{
2929	struct pfsync_tdb *ut = (struct pfsync_tdb *)(m->m_data + offset);
2930
2931	bzero(ut, sizeof(*ut));
2932	ut->spi = t->tdb_spi;
2933	bcopy(&t->tdb_dst, &ut->dst, sizeof(ut->dst));
2934	/*
2935	 * When a failover happens, the master's rpl is probably above
2936	 * what we see here (we may be up to a second late), so
2937	 * increase it a bit for outbound tdbs to manage most such
2938	 * situations.
2939	 *
2940	 * For now, just add an offset that is likely to be larger
2941	 * than the number of packets we can see in one second. The RFC
2942	 * just says the next packet must have a higher seq value.
2943	 *
2944	 * XXX What is a good algorithm for this? We could use
2945	 * a rate-determined increase, but to know it, we would have
2946	 * to extend struct tdb.
2947	 * XXX pt->rpl can wrap over MAXINT, but if so the real tdb
2948	 * will soon be replaced anyway. For now, just don't handle
2949	 * this edge case.
2950	 */
2951#define RPL_INCR 16384
2952	ut->rpl = htonl(t->tdb_rpl + (ISSET(t->tdb_flags, TDBF_PFSYNC_RPL) ?
2953	    RPL_INCR : 0));
2954	ut->cur_bytes = htobe64(t->tdb_cur_bytes);
2955	ut->sproto = t->tdb_sproto;
2956
2957	return (sizeof(*ut));
2958}
2959#endif
2960
2961void
2962pfsync_bulk_start(void)
2963{
2964#ifdef __FreeBSD__
2965	struct pfsync_softc *sc = V_pfsyncif;
2966#else
2967	struct pfsync_softc *sc = pfsyncif;
2968#endif
2969
2970#ifdef __FreeBSD__
2971	if (V_pf_status.debug >= PF_DEBUG_MISC)
2972#else
2973	if (pf_status.debug >= PF_DEBUG_MISC)
2974#endif
2975		printf("pfsync: received bulk update request\n");
2976
2977#ifdef __FreeBSD__
2978	PF_LOCK();
2979	if (TAILQ_EMPTY(&V_state_list))
2980#else
2981	if (TAILQ_EMPTY(&state_list))
2982#endif
2983		pfsync_bulk_status(PFSYNC_BUS_END);
2984	else {
2985		sc->sc_ureq_received = time_uptime;
2986		if (sc->sc_bulk_next == NULL)
2987#ifdef __FreeBSD__
2988			sc->sc_bulk_next = TAILQ_FIRST(&V_state_list);
2989#else
2990			sc->sc_bulk_next = TAILQ_FIRST(&state_list);
2991#endif
2992		sc->sc_bulk_last = sc->sc_bulk_next;
2993
2994		pfsync_bulk_status(PFSYNC_BUS_START);
2995		callout_reset(&sc->sc_bulk_tmo, 1, pfsync_bulk_update, sc);
2996	}
2997#ifdef __FreeBSD__
2998	PF_UNLOCK();
2999#endif
3000}
3001
3002void
3003pfsync_bulk_update(void *arg)
3004{
3005	struct pfsync_softc *sc = arg;
3006	struct pf_state *st = sc->sc_bulk_next;
3007	int i = 0;
3008	int s;
3009
3010	PF_LOCK_ASSERT();
3011
3012	s = splsoftnet();
3013#ifdef __FreeBSD__
3014	CURVNET_SET(sc->sc_ifp->if_vnet);
3015#endif
3016	for (;;) {
3017		if (st->sync_state == PFSYNC_S_NONE &&
3018		    st->timeout < PFTM_MAX &&
3019		    st->pfsync_time <= sc->sc_ureq_received) {
3020			pfsync_update_state_req(st);
3021			i++;
3022		}
3023
3024		st = TAILQ_NEXT(st, entry_list);
3025		if (st == NULL)
3026#ifdef __FreeBSD__
3027			st = TAILQ_FIRST(&V_state_list);
3028#else
3029			st = TAILQ_FIRST(&state_list);
3030#endif
3031
3032		if (st == sc->sc_bulk_last) {
3033			/* we're done */
3034			sc->sc_bulk_next = NULL;
3035			sc->sc_bulk_last = NULL;
3036			pfsync_bulk_status(PFSYNC_BUS_END);
3037			break;
3038		}
3039
3040#ifdef __FreeBSD__
3041		if (i > 1 && (sc->sc_ifp->if_mtu - sc->sc_len) <
3042#else
3043		if (i > 1 && (sc->sc_if.if_mtu - sc->sc_len) <
3044#endif
3045		    sizeof(struct pfsync_state)) {
3046			/* we've filled a packet */
3047			sc->sc_bulk_next = st;
3048#ifdef __FreeBSD__
3049			callout_reset(&sc->sc_bulk_tmo, 1,
3050			    pfsync_bulk_update, sc);
3051#else
3052			timeout_add(&sc->sc_bulk_tmo, 1);
3053#endif
3054			break;
3055		}
3056	}
3057
3058#ifdef __FreeBSD__
3059	CURVNET_RESTORE();
3060#endif
3061	splx(s);
3062}
3063
3064void
3065pfsync_bulk_status(u_int8_t status)
3066{
3067	struct {
3068		struct pfsync_subheader subh;
3069		struct pfsync_bus bus;
3070	} __packed r;
3071
3072#ifdef __FreeBSD__
3073	struct pfsync_softc *sc = V_pfsyncif;
3074#else
3075	struct pfsync_softc *sc = pfsyncif;
3076#endif
3077
3078	PF_LOCK_ASSERT();
3079
3080	bzero(&r, sizeof(r));
3081
3082	r.subh.action = PFSYNC_ACT_BUS;
3083	r.subh.count = htons(1);
3084
3085#ifdef __FreeBSD__
3086	r.bus.creatorid = V_pf_status.hostid;
3087#else
3088	r.bus.creatorid = pf_status.hostid;
3089#endif
3090	r.bus.endtime = htonl(time_uptime - sc->sc_ureq_received);
3091	r.bus.status = status;
3092
3093	pfsync_send_plus(&r, sizeof(r));
3094}
3095
3096void
3097pfsync_bulk_fail(void *arg)
3098{
3099	struct pfsync_softc *sc = arg;
3100
3101#ifdef __FreeBSD__
3102	CURVNET_SET(sc->sc_ifp->if_vnet);
3103#endif
3104
3105	if (sc->sc_bulk_tries++ < PFSYNC_MAX_BULKTRIES) {
3106		/* Try again */
3107#ifdef __FreeBSD__
3108		callout_reset(&sc->sc_bulkfail_tmo, 5 * hz,
3109		    pfsync_bulk_fail, V_pfsyncif);
3110#else
3111		timeout_add_sec(&sc->sc_bulkfail_tmo, 5);
3112#endif
3113		PF_LOCK();
3114		pfsync_request_update(0, 0);
3115		PF_UNLOCK();
3116	} else {
3117		/* Pretend like the transfer was ok */
3118		sc->sc_ureq_sent = 0;
3119		sc->sc_bulk_tries = 0;
3120#ifdef __FreeBSD__
3121		if (!sc->pfsync_sync_ok && carp_demote_adj_p)
3122			(*carp_demote_adj_p)(-V_pfsync_carp_adj,
3123			    "pfsync bulk fail");
3124		sc->pfsync_sync_ok = 1;
3125#else
3126#if NCARP > 0
3127		if (!pfsync_sync_ok)
3128			carp_group_demote_adj(&sc->sc_if, -1);
3129#endif
3130		pfsync_sync_ok = 1;
3131#endif
3132#ifdef __FreeBSD__
3133		if (V_pf_status.debug >= PF_DEBUG_MISC)
3134#else
3135		if (pf_status.debug >= PF_DEBUG_MISC)
3136#endif
3137			printf("pfsync: failed to receive bulk update\n");
3138	}
3139
3140#ifdef __FreeBSD__
3141	CURVNET_RESTORE();
3142#endif
3143}
3144
3145void
3146pfsync_send_plus(void *plus, size_t pluslen)
3147{
3148#ifdef __FreeBSD__
3149	struct pfsync_softc *sc = V_pfsyncif;
3150#else
3151	struct pfsync_softc *sc = pfsyncif;
3152#endif
3153	int s;
3154
3155	PF_LOCK_ASSERT();
3156
3157#ifdef __FreeBSD__
3158	if (sc->sc_len + pluslen > sc->sc_ifp->if_mtu) {
3159#else
3160	if (sc->sc_len + pluslen > sc->sc_if.if_mtu) {
3161#endif
3162		s = splnet();
3163		pfsync_sendout();
3164		splx(s);
3165	}
3166
3167	sc->sc_plus = plus;
3168	sc->sc_len += (sc->sc_pluslen = pluslen);
3169
3170	s = splnet();
3171	pfsync_sendout();
3172	splx(s);
3173}
3174
3175int
3176pfsync_up(void)
3177{
3178#ifdef __FreeBSD__
3179	struct pfsync_softc *sc = V_pfsyncif;
3180#else
3181	struct pfsync_softc *sc = pfsyncif;
3182#endif
3183
3184#ifdef __FreeBSD__
3185	if (sc == NULL || !ISSET(sc->sc_ifp->if_flags, IFF_DRV_RUNNING))
3186#else
3187	if (sc == NULL || !ISSET(sc->sc_if.if_flags, IFF_RUNNING))
3188#endif
3189		return (0);
3190
3191	return (1);
3192}
3193
3194int
3195pfsync_state_in_use(struct pf_state *st)
3196{
3197#ifdef __FreeBSD__
3198	struct pfsync_softc *sc = V_pfsyncif;
3199#else
3200	struct pfsync_softc *sc = pfsyncif;
3201#endif
3202
3203	if (sc == NULL)
3204		return (0);
3205
3206	if (st->sync_state != PFSYNC_S_NONE ||
3207	    st == sc->sc_bulk_next ||
3208	    st == sc->sc_bulk_last)
3209		return (1);
3210
3211	return (0);
3212}
3213
3214u_int pfsync_ints;
3215u_int pfsync_tmos;
3216
3217void
3218pfsync_timeout(void *arg)
3219{
3220#if defined(__FreeBSD__) && defined(VIMAGE)
3221	struct pfsync_softc *sc = arg;
3222#endif
3223	int s;
3224
3225#ifdef __FreeBSD__
3226	CURVNET_SET(sc->sc_ifp->if_vnet);
3227#endif
3228
3229	pfsync_tmos++;
3230
3231	s = splnet();
3232#ifdef __FreeBSD__
3233	PF_LOCK();
3234#endif
3235	pfsync_sendout();
3236#ifdef __FreeBSD__
3237	PF_UNLOCK();
3238#endif
3239	splx(s);
3240
3241#ifdef __FreeBSD__
3242	CURVNET_RESTORE();
3243#endif
3244}
3245
3246/* this is a softnet/netisr handler */
3247void
3248#ifdef __FreeBSD__
3249pfsyncintr(void *arg)
3250{
3251	struct pfsync_softc *sc = arg;
3252	struct mbuf *m, *n;
3253
3254	CURVNET_SET(sc->sc_ifp->if_vnet);
3255	pfsync_ints++;
3256
3257	IF_DEQUEUE_ALL(&sc->sc_ifp->if_snd, m);
3258
3259	for (; m != NULL; m = n) {
3260
3261		n = m->m_nextpkt;
3262		m->m_nextpkt = NULL;
3263		if (ip_output(m, NULL, NULL, IP_RAWOUTPUT, &sc->sc_imo, NULL)
3264		    == 0)
3265			V_pfsyncstats.pfsyncs_opackets++;
3266		else
3267			V_pfsyncstats.pfsyncs_oerrors++;
3268	}
3269	CURVNET_RESTORE();
3270}
3271#else
3272pfsyncintr(void)
3273{
3274	int s;
3275
3276	pfsync_ints++;
3277
3278	s = splnet();
3279	pfsync_sendout();
3280	splx(s);
3281}
3282#endif
3283
3284int
3285pfsync_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
3286    size_t newlen)
3287{
3288
3289#ifdef notyet
3290	/* All sysctl names at this level are terminal. */
3291	if (namelen != 1)
3292		return (ENOTDIR);
3293
3294	switch (name[0]) {
3295	case PFSYNCCTL_STATS:
3296		if (newp != NULL)
3297			return (EPERM);
3298		return (sysctl_struct(oldp, oldlenp, newp, newlen,
3299		    &V_pfsyncstats, sizeof(V_pfsyncstats)));
3300	}
3301#endif
3302	return (ENOPROTOOPT);
3303}
3304
3305#ifdef __FreeBSD__
3306static int
3307pfsync_multicast_setup(struct pfsync_softc *sc)
3308{
3309	struct ip_moptions *imo = &sc->sc_imo;
3310	int error;
3311
3312	if (!(sc->sc_sync_if->if_flags & IFF_MULTICAST)) {
3313		sc->sc_sync_if = NULL;
3314		return (EADDRNOTAVAIL);
3315	}
3316
3317	imo->imo_membership = (struct in_multi **)malloc(
3318	    (sizeof(struct in_multi *) * IP_MIN_MEMBERSHIPS), M_PFSYNC,
3319	    M_WAITOK | M_ZERO);
3320	imo->imo_max_memberships = IP_MIN_MEMBERSHIPS;
3321	imo->imo_multicast_vif = -1;
3322
3323	if ((error = in_joingroup(sc->sc_sync_if, &sc->sc_sync_peer, NULL,
3324	    &imo->imo_membership[0])) != 0) {
3325		free(imo->imo_membership, M_PFSYNC);
3326		return (error);
3327	}
3328	imo->imo_num_memberships++;
3329	imo->imo_multicast_ifp = sc->sc_sync_if;
3330	imo->imo_multicast_ttl = PFSYNC_DFLTTL;
3331	imo->imo_multicast_loop = 0;
3332
3333	return (0);
3334}
3335
3336static void
3337pfsync_multicast_cleanup(struct pfsync_softc *sc)
3338{
3339	struct ip_moptions *imo = &sc->sc_imo;
3340
3341	in_leavegroup(imo->imo_membership[0], NULL);
3342	free(imo->imo_membership, M_PFSYNC);
3343	imo->imo_membership = NULL;
3344	imo->imo_multicast_ifp = NULL;
3345}
3346
3347#ifdef INET
3348extern  struct domain inetdomain;
3349static struct protosw in_pfsync_protosw = {
3350	.pr_type =              SOCK_RAW,
3351	.pr_domain =            &inetdomain,
3352	.pr_protocol =          IPPROTO_PFSYNC,
3353	.pr_flags =             PR_ATOMIC|PR_ADDR,
3354	.pr_input =             pfsync_input,
3355	.pr_output =            (pr_output_t *)rip_output,
3356	.pr_ctloutput =         rip_ctloutput,
3357	.pr_usrreqs =           &rip_usrreqs
3358};
3359#endif
3360
3361static int
3362pfsync_init()
3363{
3364	VNET_ITERATOR_DECL(vnet_iter);
3365	int error = 0;
3366
3367	VNET_LIST_RLOCK();
3368	VNET_FOREACH(vnet_iter) {
3369		CURVNET_SET(vnet_iter);
3370		V_pfsync_cloner = pfsync_cloner;
3371		V_pfsync_cloner_data = pfsync_cloner_data;
3372		V_pfsync_cloner.ifc_data = &V_pfsync_cloner_data;
3373		if_clone_attach(&V_pfsync_cloner);
3374		error = swi_add(NULL, "pfsync", pfsyncintr, V_pfsyncif,
3375		    SWI_NET, INTR_MPSAFE, &V_pfsync_swi_cookie);
3376		CURVNET_RESTORE();
3377		if (error)
3378			goto fail_locked;
3379	}
3380	VNET_LIST_RUNLOCK();
3381#ifdef INET
3382	error = pf_proto_register(PF_INET, &in_pfsync_protosw);
3383	if (error)
3384		goto fail;
3385	error = ipproto_register(IPPROTO_PFSYNC);
3386	if (error) {
3387		pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW);
3388		goto fail;
3389	}
3390#endif
3391	PF_LOCK();
3392	pfsync_state_import_ptr = pfsync_state_import;
3393	pfsync_up_ptr = pfsync_up;
3394	pfsync_insert_state_ptr = pfsync_insert_state;
3395	pfsync_update_state_ptr = pfsync_update_state;
3396	pfsync_delete_state_ptr = pfsync_delete_state;
3397	pfsync_clear_states_ptr = pfsync_clear_states;
3398	pfsync_state_in_use_ptr = pfsync_state_in_use;
3399	pfsync_defer_ptr = pfsync_defer;
3400	PF_UNLOCK();
3401
3402	return (0);
3403
3404fail:
3405	VNET_LIST_RLOCK();
3406fail_locked:
3407	VNET_FOREACH(vnet_iter) {
3408		CURVNET_SET(vnet_iter);
3409		if (V_pfsync_swi_cookie) {
3410			swi_remove(V_pfsync_swi_cookie);
3411			if_clone_detach(&V_pfsync_cloner);
3412		}
3413		CURVNET_RESTORE();
3414	}
3415	VNET_LIST_RUNLOCK();
3416
3417	return (error);
3418}
3419
3420static void
3421pfsync_uninit()
3422{
3423	VNET_ITERATOR_DECL(vnet_iter);
3424
3425	PF_LOCK();
3426	pfsync_state_import_ptr = NULL;
3427	pfsync_up_ptr = NULL;
3428	pfsync_insert_state_ptr = NULL;
3429	pfsync_update_state_ptr = NULL;
3430	pfsync_delete_state_ptr = NULL;
3431	pfsync_clear_states_ptr = NULL;
3432	pfsync_state_in_use_ptr = NULL;
3433	pfsync_defer_ptr = NULL;
3434	PF_UNLOCK();
3435
3436	ipproto_unregister(IPPROTO_PFSYNC);
3437	pf_proto_unregister(PF_INET, IPPROTO_PFSYNC, SOCK_RAW);
3438	VNET_LIST_RLOCK();
3439	VNET_FOREACH(vnet_iter) {
3440		CURVNET_SET(vnet_iter);
3441		swi_remove(V_pfsync_swi_cookie);
3442		if_clone_detach(&V_pfsync_cloner);
3443		CURVNET_RESTORE();
3444	}
3445	VNET_LIST_RUNLOCK();
3446}
3447
3448static int
3449pfsync_modevent(module_t mod, int type, void *data)
3450{
3451	int error = 0;
3452
3453	switch (type) {
3454	case MOD_LOAD:
3455		error = pfsync_init();
3456		break;
3457	case MOD_QUIESCE:
3458		/*
3459		 * Module should not be unloaded due to race conditions.
3460		 */
3461		error = EPERM;
3462		break;
3463	case MOD_UNLOAD:
3464		pfsync_uninit();
3465		break;
3466	default:
3467		error = EINVAL;
3468		break;
3469	}
3470
3471	return (error);
3472}
3473
3474static moduledata_t pfsync_mod = {
3475	"pfsync",
3476	pfsync_modevent,
3477	0
3478};
3479
3480#define PFSYNC_MODVER 1
3481
3482DECLARE_MODULE(pfsync, pfsync_mod, SI_SUB_PROTO_DOMAIN, SI_ORDER_ANY);
3483MODULE_VERSION(pfsync, PFSYNC_MODVER);
3484MODULE_DEPEND(pfsync, pf, PF_MODVER, PF_MODVER, PF_MODVER);
3485#endif /* __FreeBSD__ */
3486