Deleted Added
full compact
pf_ioctl.c (145836) pf_ioctl.c (147321)
1/* $FreeBSD: head/sys/contrib/pf/net/pf_ioctl.c 145836 2005-05-03 16:43:32Z mlaier $ */
1/* $FreeBSD: head/sys/contrib/pf/net/pf_ioctl.c 147321 2005-06-12 16:46:20Z mlaier $ */
2/* $OpenBSD: pf_ioctl.c,v 1.139 2005/03/03 07:13:39 dhartmei Exp $ */
3
4/*
5 * Copyright (c) 2001 Daniel Hartmeier
6 * Copyright (c) 2002,2003 Henning Brauer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 *
37 */
38
39#ifdef __FreeBSD__
40#include "opt_inet.h"
41#include "opt_inet6.h"
42#endif
43
44#ifdef __FreeBSD__
45#include "opt_bpf.h"
46#include "opt_pf.h"
47#define NBPFILTER DEV_BPF
48#define NPFLOG DEV_PFLOG
49#define NPFSYNC DEV_PFSYNC
50#else
51#include "bpfilter.h"
52#include "pflog.h"
53#include "pfsync.h"
54#endif
55
56#include <sys/param.h>
57#include <sys/systm.h>
58#include <sys/mbuf.h>
59#include <sys/filio.h>
60#include <sys/fcntl.h>
61#include <sys/socket.h>
62#include <sys/socketvar.h>
63#include <sys/kernel.h>
64#include <sys/time.h>
65#include <sys/malloc.h>
66#ifdef __FreeBSD__
67#include <sys/module.h>
68#include <sys/conf.h>
69#include <sys/proc.h>
70#else
71#include <sys/timeout.h>
72#include <sys/pool.h>
73#endif
74
75#include <net/if.h>
76#include <net/if_types.h>
77#include <net/route.h>
78
79#include <netinet/in.h>
80#include <netinet/in_var.h>
81#include <netinet/in_systm.h>
82#include <netinet/ip.h>
83#include <netinet/ip_var.h>
84#include <netinet/ip_icmp.h>
85
86#ifndef __FreeBSD__
87#include <dev/rndvar.h>
88#endif
89#include <net/pfvar.h>
90
91#if NPFSYNC > 0
92#include <net/if_pfsync.h>
93#endif /* NPFSYNC > 0 */
94
95#ifdef INET6
96#include <netinet/ip6.h>
97#include <netinet/in_pcb.h>
98#endif /* INET6 */
99
100#ifdef ALTQ
101#include <altq/altq.h>
102#endif
103
104#ifdef __FreeBSD__
105#include <sys/limits.h>
106#include <sys/lock.h>
107#include <sys/mutex.h>
108#include <net/pfil.h>
109#endif /* __FreeBSD__ */
110
111#ifdef __FreeBSD__
112void init_zone_var(void);
113void cleanup_pf_zone(void);
114int pfattach(void);
115#else
116void pfattach(int);
117int pfopen(dev_t, int, int, struct proc *);
118int pfclose(dev_t, int, int, struct proc *);
119#endif
120struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
121 u_int8_t, u_int8_t, u_int8_t);
122int pf_get_ruleset_number(u_int8_t);
123void pf_init_ruleset(struct pf_ruleset *);
124int pf_anchor_setup(struct pf_rule *,
125 const struct pf_ruleset *, const char *);
126int pf_anchor_copyout(const struct pf_ruleset *,
127 const struct pf_rule *, struct pfioc_rule *);
128void pf_anchor_remove(struct pf_rule *);
129
130void pf_mv_pool(struct pf_palist *, struct pf_palist *);
131void pf_empty_pool(struct pf_palist *);
132#ifdef __FreeBSD__
133int pfioctl(struct cdev *, u_long, caddr_t, int, struct thread *);
134#else
135int pfioctl(struct cdev *, u_long, caddr_t, int, struct proc *);
136#endif
137#ifdef ALTQ
138int pf_begin_altq(u_int32_t *);
139int pf_rollback_altq(u_int32_t);
140int pf_commit_altq(u_int32_t);
141int pf_enable_altq(struct pf_altq *);
142int pf_disable_altq(struct pf_altq *);
143#endif /* ALTQ */
144int pf_begin_rules(u_int32_t *, int, const char *);
145int pf_rollback_rules(u_int32_t, int, char *);
146int pf_commit_rules(u_int32_t, int, char *);
147
148#ifdef __FreeBSD__
149extern struct callout pf_expire_to;
150#else
151extern struct timeout pf_expire_to;
152#endif
153
154struct pf_rule pf_default_rule;
155#ifdef ALTQ
156static int pf_altq_running;
157#endif
158
159#define TAGID_MAX 50000
160TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
161 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
162
163#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
164#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
165#endif
166static u_int16_t tagname2tag(struct pf_tags *, char *);
167static void tag2tagname(struct pf_tags *, u_int16_t, char *);
168static void tag_unref(struct pf_tags *, u_int16_t);
169int pf_rtlabel_add(struct pf_addr_wrap *);
170void pf_rtlabel_remove(struct pf_addr_wrap *);
171void pf_rtlabel_copyout(struct pf_addr_wrap *);
172
173#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
174
175
176#ifdef __FreeBSD__
177static struct cdev *pf_dev;
178
179/*
180 * XXX - These are new and need to be checked when moveing to a new version
181 */
182static void pf_clear_states(void);
183static int pf_clear_tables(void);
184static void pf_clear_srcnodes(void);
185/*
186 * XXX - These are new and need to be checked when moveing to a new version
187 */
188
189/*
190 * Wrapper functions for pfil(9) hooks
191 */
192static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
193 int dir, struct inpcb *inp);
194static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
195 int dir, struct inpcb *inp);
196#ifdef INET6
197static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
198 int dir, struct inpcb *inp);
199static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
200 int dir, struct inpcb *inp);
201#endif
202
203static int hook_pf(void);
204static int dehook_pf(void);
205static int shutdown_pf(void);
206static int pf_load(void);
207static int pf_unload(void);
208
209static struct cdevsw pf_cdevsw = {
210 .d_ioctl = pfioctl,
211 .d_name = PF_NAME,
212 .d_version = D_VERSION,
213};
214
215static volatile int pf_pfil_hooked = 0;
216struct mtx pf_task_mtx;
217
218void
219init_pf_mutex(void)
220{
221 mtx_init(&pf_task_mtx, "pf task mtx", NULL, MTX_DEF);
222}
223
224void
225destroy_pf_mutex(void)
226{
227 mtx_destroy(&pf_task_mtx);
228}
229
230void
231init_zone_var(void)
232{
233 pf_src_tree_pl = pf_rule_pl = NULL;
234 pf_state_pl = pf_altq_pl = pf_pooladdr_pl = NULL;
235 pf_frent_pl = pf_frag_pl = pf_cache_pl = pf_cent_pl = NULL;
236 pf_state_scrub_pl = NULL;
237 pfr_ktable_pl = pfr_kentry_pl = NULL;
238}
239
240void
241cleanup_pf_zone(void)
242{
243 UMA_DESTROY(pf_src_tree_pl);
244 UMA_DESTROY(pf_rule_pl);
245 UMA_DESTROY(pf_state_pl);
246 UMA_DESTROY(pf_altq_pl);
247 UMA_DESTROY(pf_pooladdr_pl);
248 UMA_DESTROY(pf_frent_pl);
249 UMA_DESTROY(pf_frag_pl);
250 UMA_DESTROY(pf_cache_pl);
251 UMA_DESTROY(pf_cent_pl);
252 UMA_DESTROY(pfr_ktable_pl);
253 UMA_DESTROY(pfr_kentry_pl);
254 UMA_DESTROY(pf_state_scrub_pl);
255 UMA_DESTROY(pfi_addr_pl);
256}
257
258int
259pfattach(void)
260{
261 u_int32_t *my_timeout = pf_default_rule.timeout;
262 int error = 1;
263
264 do {
265 UMA_CREATE(pf_src_tree_pl,struct pf_src_node, "pfsrctrpl");
266 UMA_CREATE(pf_rule_pl, struct pf_rule, "pfrulepl");
267 UMA_CREATE(pf_state_pl, struct pf_state, "pfstatepl");
268 UMA_CREATE(pf_altq_pl, struct pf_altq, "pfaltqpl");
269 UMA_CREATE(pf_pooladdr_pl, struct pf_pooladdr, "pfpooladdrpl");
270 UMA_CREATE(pfr_ktable_pl, struct pfr_ktable, "pfrktable");
271 UMA_CREATE(pfr_kentry_pl, struct pfr_kentry, "pfrkentry");
272 UMA_CREATE(pfr_kentry_pl2, struct pfr_kentry, "pfrkentry2");
273 UMA_CREATE(pf_frent_pl, struct pf_frent, "pffrent");
274 UMA_CREATE(pf_frag_pl, struct pf_fragment, "pffrag");
275 UMA_CREATE(pf_cache_pl, struct pf_fragment, "pffrcache");
276 UMA_CREATE(pf_cent_pl, struct pf_frcache, "pffrcent");
277 UMA_CREATE(pf_state_scrub_pl, struct pf_state_scrub,
278 "pfstatescrub");
279 UMA_CREATE(pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl");
280 error = 0;
281 } while(0);
282 if (error) {
283 cleanup_pf_zone();
284 return (error);
285 }
286 pfr_initialize();
287 pfi_initialize();
288 if ( (error = pf_osfp_initialize()) ) {
289 cleanup_pf_zone();
290 pf_osfp_cleanup();
291 return (error);
292 }
293
294 pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl;
295 pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
296 pf_pool_limits[PF_LIMIT_SRC_NODES].pp = pf_src_tree_pl;
297 pf_pool_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
298 pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl;
299 pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
300 uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp,
301 pf_pool_limits[PF_LIMIT_STATES].limit);
302
303 RB_INIT(&tree_src_tracking);
304 RB_INIT(&pf_anchors);
305 pf_init_ruleset(&pf_main_ruleset);
306 TAILQ_INIT(&pf_altqs[0]);
307 TAILQ_INIT(&pf_altqs[1]);
308 TAILQ_INIT(&pf_pabuf);
309 pf_altqs_active = &pf_altqs[0];
310 pf_altqs_inactive = &pf_altqs[1];
311 TAILQ_INIT(&state_updates);
312
313 /* default rule should never be garbage collected */
314 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
315 pf_default_rule.action = PF_PASS;
316 pf_default_rule.nr = -1;
317
318 /* initialize default timeouts */
319 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
320 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
321 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
322 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
323 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
324 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
325 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
326 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
327 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
328 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
329 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
330 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
331 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
332 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
333 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
334 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
335 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
336 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
337
2/* $OpenBSD: pf_ioctl.c,v 1.139 2005/03/03 07:13:39 dhartmei Exp $ */
3
4/*
5 * Copyright (c) 2001 Daniel Hartmeier
6 * Copyright (c) 2002,2003 Henning Brauer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 *
37 */
38
39#ifdef __FreeBSD__
40#include "opt_inet.h"
41#include "opt_inet6.h"
42#endif
43
44#ifdef __FreeBSD__
45#include "opt_bpf.h"
46#include "opt_pf.h"
47#define NBPFILTER DEV_BPF
48#define NPFLOG DEV_PFLOG
49#define NPFSYNC DEV_PFSYNC
50#else
51#include "bpfilter.h"
52#include "pflog.h"
53#include "pfsync.h"
54#endif
55
56#include <sys/param.h>
57#include <sys/systm.h>
58#include <sys/mbuf.h>
59#include <sys/filio.h>
60#include <sys/fcntl.h>
61#include <sys/socket.h>
62#include <sys/socketvar.h>
63#include <sys/kernel.h>
64#include <sys/time.h>
65#include <sys/malloc.h>
66#ifdef __FreeBSD__
67#include <sys/module.h>
68#include <sys/conf.h>
69#include <sys/proc.h>
70#else
71#include <sys/timeout.h>
72#include <sys/pool.h>
73#endif
74
75#include <net/if.h>
76#include <net/if_types.h>
77#include <net/route.h>
78
79#include <netinet/in.h>
80#include <netinet/in_var.h>
81#include <netinet/in_systm.h>
82#include <netinet/ip.h>
83#include <netinet/ip_var.h>
84#include <netinet/ip_icmp.h>
85
86#ifndef __FreeBSD__
87#include <dev/rndvar.h>
88#endif
89#include <net/pfvar.h>
90
91#if NPFSYNC > 0
92#include <net/if_pfsync.h>
93#endif /* NPFSYNC > 0 */
94
95#ifdef INET6
96#include <netinet/ip6.h>
97#include <netinet/in_pcb.h>
98#endif /* INET6 */
99
100#ifdef ALTQ
101#include <altq/altq.h>
102#endif
103
104#ifdef __FreeBSD__
105#include <sys/limits.h>
106#include <sys/lock.h>
107#include <sys/mutex.h>
108#include <net/pfil.h>
109#endif /* __FreeBSD__ */
110
111#ifdef __FreeBSD__
112void init_zone_var(void);
113void cleanup_pf_zone(void);
114int pfattach(void);
115#else
116void pfattach(int);
117int pfopen(dev_t, int, int, struct proc *);
118int pfclose(dev_t, int, int, struct proc *);
119#endif
120struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
121 u_int8_t, u_int8_t, u_int8_t);
122int pf_get_ruleset_number(u_int8_t);
123void pf_init_ruleset(struct pf_ruleset *);
124int pf_anchor_setup(struct pf_rule *,
125 const struct pf_ruleset *, const char *);
126int pf_anchor_copyout(const struct pf_ruleset *,
127 const struct pf_rule *, struct pfioc_rule *);
128void pf_anchor_remove(struct pf_rule *);
129
130void pf_mv_pool(struct pf_palist *, struct pf_palist *);
131void pf_empty_pool(struct pf_palist *);
132#ifdef __FreeBSD__
133int pfioctl(struct cdev *, u_long, caddr_t, int, struct thread *);
134#else
135int pfioctl(struct cdev *, u_long, caddr_t, int, struct proc *);
136#endif
137#ifdef ALTQ
138int pf_begin_altq(u_int32_t *);
139int pf_rollback_altq(u_int32_t);
140int pf_commit_altq(u_int32_t);
141int pf_enable_altq(struct pf_altq *);
142int pf_disable_altq(struct pf_altq *);
143#endif /* ALTQ */
144int pf_begin_rules(u_int32_t *, int, const char *);
145int pf_rollback_rules(u_int32_t, int, char *);
146int pf_commit_rules(u_int32_t, int, char *);
147
148#ifdef __FreeBSD__
149extern struct callout pf_expire_to;
150#else
151extern struct timeout pf_expire_to;
152#endif
153
154struct pf_rule pf_default_rule;
155#ifdef ALTQ
156static int pf_altq_running;
157#endif
158
159#define TAGID_MAX 50000
160TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
161 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
162
163#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
164#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
165#endif
166static u_int16_t tagname2tag(struct pf_tags *, char *);
167static void tag2tagname(struct pf_tags *, u_int16_t, char *);
168static void tag_unref(struct pf_tags *, u_int16_t);
169int pf_rtlabel_add(struct pf_addr_wrap *);
170void pf_rtlabel_remove(struct pf_addr_wrap *);
171void pf_rtlabel_copyout(struct pf_addr_wrap *);
172
173#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
174
175
176#ifdef __FreeBSD__
177static struct cdev *pf_dev;
178
179/*
180 * XXX - These are new and need to be checked when moveing to a new version
181 */
182static void pf_clear_states(void);
183static int pf_clear_tables(void);
184static void pf_clear_srcnodes(void);
185/*
186 * XXX - These are new and need to be checked when moveing to a new version
187 */
188
189/*
190 * Wrapper functions for pfil(9) hooks
191 */
192static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
193 int dir, struct inpcb *inp);
194static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
195 int dir, struct inpcb *inp);
196#ifdef INET6
197static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
198 int dir, struct inpcb *inp);
199static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
200 int dir, struct inpcb *inp);
201#endif
202
203static int hook_pf(void);
204static int dehook_pf(void);
205static int shutdown_pf(void);
206static int pf_load(void);
207static int pf_unload(void);
208
209static struct cdevsw pf_cdevsw = {
210 .d_ioctl = pfioctl,
211 .d_name = PF_NAME,
212 .d_version = D_VERSION,
213};
214
215static volatile int pf_pfil_hooked = 0;
216struct mtx pf_task_mtx;
217
218void
219init_pf_mutex(void)
220{
221 mtx_init(&pf_task_mtx, "pf task mtx", NULL, MTX_DEF);
222}
223
224void
225destroy_pf_mutex(void)
226{
227 mtx_destroy(&pf_task_mtx);
228}
229
230void
231init_zone_var(void)
232{
233 pf_src_tree_pl = pf_rule_pl = NULL;
234 pf_state_pl = pf_altq_pl = pf_pooladdr_pl = NULL;
235 pf_frent_pl = pf_frag_pl = pf_cache_pl = pf_cent_pl = NULL;
236 pf_state_scrub_pl = NULL;
237 pfr_ktable_pl = pfr_kentry_pl = NULL;
238}
239
240void
241cleanup_pf_zone(void)
242{
243 UMA_DESTROY(pf_src_tree_pl);
244 UMA_DESTROY(pf_rule_pl);
245 UMA_DESTROY(pf_state_pl);
246 UMA_DESTROY(pf_altq_pl);
247 UMA_DESTROY(pf_pooladdr_pl);
248 UMA_DESTROY(pf_frent_pl);
249 UMA_DESTROY(pf_frag_pl);
250 UMA_DESTROY(pf_cache_pl);
251 UMA_DESTROY(pf_cent_pl);
252 UMA_DESTROY(pfr_ktable_pl);
253 UMA_DESTROY(pfr_kentry_pl);
254 UMA_DESTROY(pf_state_scrub_pl);
255 UMA_DESTROY(pfi_addr_pl);
256}
257
258int
259pfattach(void)
260{
261 u_int32_t *my_timeout = pf_default_rule.timeout;
262 int error = 1;
263
264 do {
265 UMA_CREATE(pf_src_tree_pl,struct pf_src_node, "pfsrctrpl");
266 UMA_CREATE(pf_rule_pl, struct pf_rule, "pfrulepl");
267 UMA_CREATE(pf_state_pl, struct pf_state, "pfstatepl");
268 UMA_CREATE(pf_altq_pl, struct pf_altq, "pfaltqpl");
269 UMA_CREATE(pf_pooladdr_pl, struct pf_pooladdr, "pfpooladdrpl");
270 UMA_CREATE(pfr_ktable_pl, struct pfr_ktable, "pfrktable");
271 UMA_CREATE(pfr_kentry_pl, struct pfr_kentry, "pfrkentry");
272 UMA_CREATE(pfr_kentry_pl2, struct pfr_kentry, "pfrkentry2");
273 UMA_CREATE(pf_frent_pl, struct pf_frent, "pffrent");
274 UMA_CREATE(pf_frag_pl, struct pf_fragment, "pffrag");
275 UMA_CREATE(pf_cache_pl, struct pf_fragment, "pffrcache");
276 UMA_CREATE(pf_cent_pl, struct pf_frcache, "pffrcent");
277 UMA_CREATE(pf_state_scrub_pl, struct pf_state_scrub,
278 "pfstatescrub");
279 UMA_CREATE(pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl");
280 error = 0;
281 } while(0);
282 if (error) {
283 cleanup_pf_zone();
284 return (error);
285 }
286 pfr_initialize();
287 pfi_initialize();
288 if ( (error = pf_osfp_initialize()) ) {
289 cleanup_pf_zone();
290 pf_osfp_cleanup();
291 return (error);
292 }
293
294 pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl;
295 pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
296 pf_pool_limits[PF_LIMIT_SRC_NODES].pp = pf_src_tree_pl;
297 pf_pool_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
298 pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl;
299 pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
300 uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp,
301 pf_pool_limits[PF_LIMIT_STATES].limit);
302
303 RB_INIT(&tree_src_tracking);
304 RB_INIT(&pf_anchors);
305 pf_init_ruleset(&pf_main_ruleset);
306 TAILQ_INIT(&pf_altqs[0]);
307 TAILQ_INIT(&pf_altqs[1]);
308 TAILQ_INIT(&pf_pabuf);
309 pf_altqs_active = &pf_altqs[0];
310 pf_altqs_inactive = &pf_altqs[1];
311 TAILQ_INIT(&state_updates);
312
313 /* default rule should never be garbage collected */
314 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
315 pf_default_rule.action = PF_PASS;
316 pf_default_rule.nr = -1;
317
318 /* initialize default timeouts */
319 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
320 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
321 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
322 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
323 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
324 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
325 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
326 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
327 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
328 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
329 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
330 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
331 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
332 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
333 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
334 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
335 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
336 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
337
338 /*
339 * XXX
340 * The 2nd arg. 0 to callout_init(9) shoule be set to CALLOUT_MPSAFE
341 * if Gaint lock is removed from the network stack.
342 */
343 callout_init(&pf_expire_to, 0);
338 callout_init(&pf_expire_to, NET_CALLOUT_MPSAFE);
344 callout_reset(&pf_expire_to, my_timeout[PFTM_INTERVAL] * hz,
345 pf_purge_timeout, &pf_expire_to);
346
347 pf_normalize_init();
348 bzero(&pf_status, sizeof(pf_status));
349 pf_pfil_hooked = 0;
350
351 /* XXX do our best to avoid a conflict */
352 pf_status.hostid = arc4random();
353
354 return (error);
355}
356#else /* !__FreeBSD__ */
357void
358pfattach(int num)
359{
360 u_int32_t *timeout = pf_default_rule.timeout;
361
362 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
363 &pool_allocator_nointr);
364 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
365 "pfsrctrpl", NULL);
366 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
367 NULL);
368 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
369 &pool_allocator_nointr);
370 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
371 "pfpooladdrpl", &pool_allocator_nointr);
372 pfr_initialize();
373 pfi_initialize();
374 pf_osfp_initialize();
375
376 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
377 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
378
379 RB_INIT(&tree_src_tracking);
380 RB_INIT(&pf_anchors);
381 pf_init_ruleset(&pf_main_ruleset);
382 TAILQ_INIT(&pf_altqs[0]);
383 TAILQ_INIT(&pf_altqs[1]);
384 TAILQ_INIT(&pf_pabuf);
385 pf_altqs_active = &pf_altqs[0];
386 pf_altqs_inactive = &pf_altqs[1];
387 TAILQ_INIT(&state_updates);
388
389 /* default rule should never be garbage collected */
390 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
391 pf_default_rule.action = PF_PASS;
392 pf_default_rule.nr = -1;
393
394 /* initialize default timeouts */
395 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
396 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
397 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
398 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
399 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
400 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
401 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
402 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
403 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
404 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
405 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
406 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
407 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
408 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
409 timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
410 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
411 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
412 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
413
414 timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to);
415 timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz);
416
417 pf_normalize_init();
418 bzero(&pf_status, sizeof(pf_status));
419 pf_status.debug = PF_DEBUG_URGENT;
420
421 /* XXX do our best to avoid a conflict */
422 pf_status.hostid = arc4random();
423}
424
425int
426pfopen(struct cdev *dev, int flags, int fmt, struct proc *p)
427{
428 if (minor(dev) >= 1)
429 return (ENXIO);
430 return (0);
431}
432
433int
434pfclose(struct cdev *dev, int flags, int fmt, struct proc *p)
435{
436 if (minor(dev) >= 1)
437 return (ENXIO);
438 return (0);
439}
440#endif /* __FreeBSD__ */
441
442struct pf_pool *
443pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
444 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
445 u_int8_t check_ticket)
446{
447 struct pf_ruleset *ruleset;
448 struct pf_rule *rule;
449 int rs_num;
450
451 ruleset = pf_find_ruleset(anchor);
452 if (ruleset == NULL)
453 return (NULL);
454 rs_num = pf_get_ruleset_number(rule_action);
455 if (rs_num >= PF_RULESET_MAX)
456 return (NULL);
457 if (active) {
458 if (check_ticket && ticket !=
459 ruleset->rules[rs_num].active.ticket)
460 return (NULL);
461 if (r_last)
462 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
463 pf_rulequeue);
464 else
465 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
466 } else {
467 if (check_ticket && ticket !=
468 ruleset->rules[rs_num].inactive.ticket)
469 return (NULL);
470 if (r_last)
471 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
472 pf_rulequeue);
473 else
474 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
475 }
476 if (!r_last) {
477 while ((rule != NULL) && (rule->nr != rule_number))
478 rule = TAILQ_NEXT(rule, entries);
479 }
480 if (rule == NULL)
481 return (NULL);
482
483 return (&rule->rpool);
484}
485
486int
487pf_get_ruleset_number(u_int8_t action)
488{
489 switch (action) {
490 case PF_SCRUB:
491 case PF_NOSCRUB:
492 return (PF_RULESET_SCRUB);
493 break;
494 case PF_PASS:
495 case PF_DROP:
496 return (PF_RULESET_FILTER);
497 break;
498 case PF_NAT:
499 case PF_NONAT:
500 return (PF_RULESET_NAT);
501 break;
502 case PF_BINAT:
503 case PF_NOBINAT:
504 return (PF_RULESET_BINAT);
505 break;
506 case PF_RDR:
507 case PF_NORDR:
508 return (PF_RULESET_RDR);
509 break;
510 default:
511 return (PF_RULESET_MAX);
512 break;
513 }
514}
515
516void
517pf_init_ruleset(struct pf_ruleset *ruleset)
518{
519 int i;
520
521 memset(ruleset, 0, sizeof(struct pf_ruleset));
522 for (i = 0; i < PF_RULESET_MAX; i++) {
523 TAILQ_INIT(&ruleset->rules[i].queues[0]);
524 TAILQ_INIT(&ruleset->rules[i].queues[1]);
525 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0];
526 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1];
527 }
528}
529
530struct pf_anchor *
531pf_find_anchor(const char *path)
532{
533 static struct pf_anchor key;
534
535 memset(&key, 0, sizeof(key));
536 strlcpy(key.path, path, sizeof(key.path));
537 return (RB_FIND(pf_anchor_global, &pf_anchors, &key));
538}
539
540struct pf_ruleset *
541pf_find_ruleset(const char *path)
542{
543 struct pf_anchor *anchor;
544
545 while (*path == '/')
546 path++;
547 if (!*path)
548 return (&pf_main_ruleset);
549 anchor = pf_find_anchor(path);
550 if (anchor == NULL)
551 return (NULL);
552 else
553 return (&anchor->ruleset);
554}
555
556struct pf_ruleset *
557pf_find_or_create_ruleset(const char *path)
558{
559 static char p[MAXPATHLEN];
560 char *q = NULL, *r; /* make the compiler happy */
561 struct pf_ruleset *ruleset;
562 struct pf_anchor *anchor = NULL, *dup, *parent = NULL;
563
564 while (*path == '/')
565 path++;
566 ruleset = pf_find_ruleset(path);
567 if (ruleset != NULL)
568 return (ruleset);
569 strlcpy(p, path, sizeof(p));
570#ifdef __FreeBSD__
571 while (parent == NULL && (q = rindex(p, '/')) != NULL) {
572#else
573 while (parent == NULL && (q = strrchr(p, '/')) != NULL) {
574#endif
575 *q = 0;
576 if ((ruleset = pf_find_ruleset(p)) != NULL) {
577 parent = ruleset->anchor;
578 break;
579 }
580 }
581 if (q == NULL)
582 q = p;
583 else
584 q++;
585 strlcpy(p, path, sizeof(p));
586 if (!*q)
587 return (NULL);
588#ifdef __FreeBSD__
589 while ((r = index(q, '/')) != NULL || *q) {
590#else
591 while ((r = strchr(q, '/')) != NULL || *q) {
592#endif
593 if (r != NULL)
594 *r = 0;
595 if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE ||
596 (parent != NULL && strlen(parent->path) >=
597 MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1))
598 return (NULL);
599 anchor = (struct pf_anchor *)malloc(sizeof(*anchor), M_TEMP,
600 M_NOWAIT);
601 if (anchor == NULL)
602 return (NULL);
603 memset(anchor, 0, sizeof(*anchor));
604 RB_INIT(&anchor->children);
605 strlcpy(anchor->name, q, sizeof(anchor->name));
606 if (parent != NULL) {
607 strlcpy(anchor->path, parent->path,
608 sizeof(anchor->path));
609 strlcat(anchor->path, "/", sizeof(anchor->path));
610 }
611 strlcat(anchor->path, anchor->name, sizeof(anchor->path));
612 if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) !=
613 NULL) {
614 printf("pf_find_or_create_ruleset: RB_INSERT1 "
615 "'%s' '%s' collides with '%s' '%s'\n",
616 anchor->path, anchor->name, dup->path, dup->name);
617 free(anchor, M_TEMP);
618 return (NULL);
619 }
620 if (parent != NULL) {
621 anchor->parent = parent;
622 if ((dup = RB_INSERT(pf_anchor_node, &parent->children,
623 anchor)) != NULL) {
624 printf("pf_find_or_create_ruleset: "
625 "RB_INSERT2 '%s' '%s' collides with "
626 "'%s' '%s'\n", anchor->path, anchor->name,
627 dup->path, dup->name);
628 RB_REMOVE(pf_anchor_global, &pf_anchors,
629 anchor);
630 free(anchor, M_TEMP);
631 return (NULL);
632 }
633 }
634 pf_init_ruleset(&anchor->ruleset);
635 anchor->ruleset.anchor = anchor;
636 parent = anchor;
637 if (r != NULL)
638 q = r + 1;
639 else
640 *q = 0;
641 }
642 return (&anchor->ruleset);
643}
644
645void
646pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
647{
648 struct pf_anchor *parent;
649 int i;
650
651 while (ruleset != NULL) {
652 if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL ||
653 !RB_EMPTY(&ruleset->anchor->children) ||
654 ruleset->anchor->refcnt > 0 || ruleset->tables > 0 ||
655 ruleset->topen)
656 return;
657 for (i = 0; i < PF_RULESET_MAX; ++i)
658 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) ||
659 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
660 ruleset->rules[i].inactive.open)
661 return;
662 RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor);
663 if ((parent = ruleset->anchor->parent) != NULL)
664 RB_REMOVE(pf_anchor_node, &parent->children,
665 ruleset->anchor);
666 free(ruleset->anchor, M_TEMP);
667 if (parent == NULL)
668 return;
669 ruleset = &parent->ruleset;
670 }
671}
672
673int
674pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s,
675 const char *name)
676{
677 static char *p, path[MAXPATHLEN];
678 struct pf_ruleset *ruleset;
679
680 r->anchor = NULL;
681 r->anchor_relative = 0;
682 r->anchor_wildcard = 0;
683 if (!name[0])
684 return (0);
685 if (name[0] == '/')
686 strlcpy(path, name + 1, sizeof(path));
687 else {
688 /* relative path */
689 r->anchor_relative = 1;
690 if (s->anchor == NULL || !s->anchor->path[0])
691 path[0] = 0;
692 else
693 strlcpy(path, s->anchor->path, sizeof(path));
694 while (name[0] == '.' && name[1] == '.' && name[2] == '/') {
695 if (!path[0]) {
696 printf("pf_anchor_setup: .. beyond root\n");
697 return (1);
698 }
699#ifdef __FreeBSD__
700 if ((p = rindex(path, '/')) != NULL)
701#else
702 if ((p = strrchr(path, '/')) != NULL)
703#endif
704 *p = 0;
705 else
706 path[0] = 0;
707 r->anchor_relative++;
708 name += 3;
709 }
710 if (path[0])
711 strlcat(path, "/", sizeof(path));
712 strlcat(path, name, sizeof(path));
713 }
714#ifdef __FreeBSD__
715 if ((p = rindex(path, '/')) != NULL && !strcmp(p, "/*")) {
716#else
717 if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) {
718#endif
719 r->anchor_wildcard = 1;
720 *p = 0;
721 }
722 ruleset = pf_find_or_create_ruleset(path);
723 if (ruleset == NULL || ruleset->anchor == NULL) {
724 printf("pf_anchor_setup: ruleset\n");
725 return (1);
726 }
727 r->anchor = ruleset->anchor;
728 r->anchor->refcnt++;
729 return (0);
730}
731
732int
733pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r,
734 struct pfioc_rule *pr)
735{
736 pr->anchor_call[0] = 0;
737 if (r->anchor == NULL)
738 return (0);
739 if (!r->anchor_relative) {
740 strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call));
741 strlcat(pr->anchor_call, r->anchor->path,
742 sizeof(pr->anchor_call));
743 } else {
744 char a[MAXPATHLEN], b[MAXPATHLEN], *p;
745 int i;
746
747 if (rs->anchor == NULL)
748 a[0] = 0;
749 else
750 strlcpy(a, rs->anchor->path, sizeof(a));
751 strlcpy(b, r->anchor->path, sizeof(b));
752 for (i = 1; i < r->anchor_relative; ++i) {
753#ifdef __FreeBSD__
754 if ((p = rindex(a, '/')) == NULL)
755#else
756 if ((p = strrchr(a, '/')) == NULL)
757#endif
758 p = a;
759 *p = 0;
760 strlcat(pr->anchor_call, "../",
761 sizeof(pr->anchor_call));
762 }
763 if (strncmp(a, b, strlen(a))) {
764 printf("pf_anchor_copyout: '%s' '%s'\n", a, b);
765 return (1);
766 }
767 if (strlen(b) > strlen(a))
768 strlcat(pr->anchor_call, b + (a[0] ? strlen(a) + 1 : 0),
769 sizeof(pr->anchor_call));
770 }
771 if (r->anchor_wildcard)
772 strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*",
773 sizeof(pr->anchor_call));
774 return (0);
775}
776
777void
778pf_anchor_remove(struct pf_rule *r)
779{
780 if (r->anchor == NULL)
781 return;
782 if (r->anchor->refcnt <= 0) {
783 printf("pf_anchor_remove: broken refcount");
784 r->anchor = NULL;
785 return;
786 }
787 if (!--r->anchor->refcnt)
788 pf_remove_if_empty_ruleset(&r->anchor->ruleset);
789 r->anchor = NULL;
790}
791
792void
793pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
794{
795 struct pf_pooladdr *mv_pool_pa;
796
797 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
798 TAILQ_REMOVE(poola, mv_pool_pa, entries);
799 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
800 }
801}
802
803void
804pf_empty_pool(struct pf_palist *poola)
805{
806 struct pf_pooladdr *empty_pool_pa;
807
808 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
809 pfi_dynaddr_remove(&empty_pool_pa->addr);
810 pf_tbladdr_remove(&empty_pool_pa->addr);
811 pfi_detach_rule(empty_pool_pa->kif);
812 TAILQ_REMOVE(poola, empty_pool_pa, entries);
813 pool_put(&pf_pooladdr_pl, empty_pool_pa);
814 }
815}
816
817void
818pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
819{
820 if (rulequeue != NULL) {
821 if (rule->states <= 0) {
822 /*
823 * XXX - we need to remove the table *before* detaching
824 * the rule to make sure the table code does not delete
825 * the anchor under our feet.
826 */
827 pf_tbladdr_remove(&rule->src.addr);
828 pf_tbladdr_remove(&rule->dst.addr);
829 if (rule->overload_tbl)
830 pfr_detach_table(rule->overload_tbl);
831 }
832 TAILQ_REMOVE(rulequeue, rule, entries);
833 rule->entries.tqe_prev = NULL;
834 rule->nr = -1;
835 }
836
837 if (rule->states > 0 || rule->src_nodes > 0 ||
838 rule->entries.tqe_prev != NULL)
839 return;
840 pf_tag_unref(rule->tag);
841 pf_tag_unref(rule->match_tag);
842#ifdef ALTQ
843 if (rule->pqid != rule->qid)
844 pf_qid_unref(rule->pqid);
845 pf_qid_unref(rule->qid);
846#endif
847 pf_rtlabel_remove(&rule->src.addr);
848 pf_rtlabel_remove(&rule->dst.addr);
849 pfi_dynaddr_remove(&rule->src.addr);
850 pfi_dynaddr_remove(&rule->dst.addr);
851 if (rulequeue == NULL) {
852 pf_tbladdr_remove(&rule->src.addr);
853 pf_tbladdr_remove(&rule->dst.addr);
854 if (rule->overload_tbl)
855 pfr_detach_table(rule->overload_tbl);
856 }
857 pfi_detach_rule(rule->kif);
858 pf_anchor_remove(rule);
859 pf_empty_pool(&rule->rpool.list);
860 pool_put(&pf_rule_pl, rule);
861}
862
863static u_int16_t
864tagname2tag(struct pf_tags *head, char *tagname)
865{
866 struct pf_tagname *tag, *p = NULL;
867 u_int16_t new_tagid = 1;
868
869 TAILQ_FOREACH(tag, head, entries)
870 if (strcmp(tagname, tag->name) == 0) {
871 tag->ref++;
872 return (tag->tag);
873 }
874
875 /*
876 * to avoid fragmentation, we do a linear search from the beginning
877 * and take the first free slot we find. if there is none or the list
878 * is empty, append a new entry at the end.
879 */
880
881 /* new entry */
882 if (!TAILQ_EMPTY(head))
883 for (p = TAILQ_FIRST(head); p != NULL &&
884 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
885 new_tagid = p->tag + 1;
886
887 if (new_tagid > TAGID_MAX)
888 return (0);
889
890 /* allocate and fill new struct pf_tagname */
891 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
892 M_TEMP, M_NOWAIT);
893 if (tag == NULL)
894 return (0);
895 bzero(tag, sizeof(struct pf_tagname));
896 strlcpy(tag->name, tagname, sizeof(tag->name));
897 tag->tag = new_tagid;
898 tag->ref++;
899
900 if (p != NULL) /* insert new entry before p */
901 TAILQ_INSERT_BEFORE(p, tag, entries);
902 else /* either list empty or no free slot in between */
903 TAILQ_INSERT_TAIL(head, tag, entries);
904
905 return (tag->tag);
906}
907
908static void
909tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
910{
911 struct pf_tagname *tag;
912
913 TAILQ_FOREACH(tag, head, entries)
914 if (tag->tag == tagid) {
915 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
916 return;
917 }
918}
919
920static void
921tag_unref(struct pf_tags *head, u_int16_t tag)
922{
923 struct pf_tagname *p, *next;
924
925 if (tag == 0)
926 return;
927
928 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
929 next = TAILQ_NEXT(p, entries);
930 if (tag == p->tag) {
931 if (--p->ref == 0) {
932 TAILQ_REMOVE(head, p, entries);
933 free(p, M_TEMP);
934 }
935 break;
936 }
937 }
938}
939
940u_int16_t
941pf_tagname2tag(char *tagname)
942{
943 return (tagname2tag(&pf_tags, tagname));
944}
945
946void
947pf_tag2tagname(u_int16_t tagid, char *p)
948{
949 return (tag2tagname(&pf_tags, tagid, p));
950}
951
952void
953pf_tag_ref(u_int16_t tag)
954{
955 struct pf_tagname *t;
956
957 TAILQ_FOREACH(t, &pf_tags, entries)
958 if (t->tag == tag)
959 break;
960 if (t != NULL)
961 t->ref++;
962}
963
964void
965pf_tag_unref(u_int16_t tag)
966{
967 return (tag_unref(&pf_tags, tag));
968}
969
970int
971pf_rtlabel_add(struct pf_addr_wrap *a)
972{
973#ifdef __FreeBSD__
974 /* XXX_IMPORT: later */
975 return (0);
976#else
977 if (a->type == PF_ADDR_RTLABEL &&
978 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
979 return (-1);
980 return (0);
981#endif
982}
983
984void
985pf_rtlabel_remove(struct pf_addr_wrap *a)
986{
987#ifdef __FreeBSD__
988 /* XXX_IMPORT: later */
989#else
990 if (a->type == PF_ADDR_RTLABEL)
991 rtlabel_unref(a->v.rtlabel);
992#endif
993}
994
995void
996pf_rtlabel_copyout(struct pf_addr_wrap *a)
997{
998#ifdef __FreeBSD__
999 /* XXX_IMPORT: later */
1000 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel)
1001 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname));
1002#else
1003 const char *name;
1004
1005 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
1006 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
1007 strlcpy(a->v.rtlabelname, "?",
1008 sizeof(a->v.rtlabelname));
1009 else
1010 strlcpy(a->v.rtlabelname, name,
1011 sizeof(a->v.rtlabelname));
1012 }
1013#endif
1014}
1015
1016#ifdef ALTQ
1017u_int32_t
1018pf_qname2qid(char *qname)
1019{
1020 return ((u_int32_t)tagname2tag(&pf_qids, qname));
1021}
1022
1023void
1024pf_qid2qname(u_int32_t qid, char *p)
1025{
1026 return (tag2tagname(&pf_qids, (u_int16_t)qid, p));
1027}
1028
1029void
1030pf_qid_unref(u_int32_t qid)
1031{
1032 return (tag_unref(&pf_qids, (u_int16_t)qid));
1033}
1034
1035int
1036pf_begin_altq(u_int32_t *ticket)
1037{
1038 struct pf_altq *altq;
1039 int error = 0;
1040
1041 /* Purge the old altq list */
1042 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
1043 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
1044 if (altq->qname[0] == 0) {
1045 /* detach and destroy the discipline */
1046 error = altq_remove(altq);
1047 } else
1048 pf_qid_unref(altq->qid);
1049 pool_put(&pf_altq_pl, altq);
1050 }
1051 if (error)
1052 return (error);
1053 *ticket = ++ticket_altqs_inactive;
1054 altqs_inactive_open = 1;
1055 return (0);
1056}
1057
1058int
1059pf_rollback_altq(u_int32_t ticket)
1060{
1061 struct pf_altq *altq;
1062 int error = 0;
1063
1064 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
1065 return (0);
1066 /* Purge the old altq list */
1067 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
1068 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
1069 if (altq->qname[0] == 0) {
1070 /* detach and destroy the discipline */
1071 error = altq_remove(altq);
1072 } else
1073 pf_qid_unref(altq->qid);
1074 pool_put(&pf_altq_pl, altq);
1075 }
1076 altqs_inactive_open = 0;
1077 return (error);
1078}
1079
1080int
1081pf_commit_altq(u_int32_t ticket)
1082{
1083 struct pf_altqqueue *old_altqs;
1084 struct pf_altq *altq;
1085 int s, err, error = 0;
1086
1087 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
1088 return (EBUSY);
1089
1090 /* swap altqs, keep the old. */
1091 s = splsoftnet();
1092 old_altqs = pf_altqs_active;
1093 pf_altqs_active = pf_altqs_inactive;
1094 pf_altqs_inactive = old_altqs;
1095 ticket_altqs_active = ticket_altqs_inactive;
1096
1097 /* Attach new disciplines */
1098 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1099 if (altq->qname[0] == 0) {
1100 /* attach the discipline */
1101 error = altq_pfattach(altq);
1102 if (error == 0 && pf_altq_running)
1103 error = pf_enable_altq(altq);
1104 if (error != 0) {
1105 splx(s);
1106 return (error);
1107 }
1108 }
1109 }
1110
1111 /* Purge the old altq list */
1112 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
1113 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
1114 if (altq->qname[0] == 0) {
1115 /* detach and destroy the discipline */
1116 if (pf_altq_running)
1117 error = pf_disable_altq(altq);
1118 err = altq_pfdetach(altq);
1119 if (err != 0 && error == 0)
1120 error = err;
1121 err = altq_remove(altq);
1122 if (err != 0 && error == 0)
1123 error = err;
1124 } else
1125 pf_qid_unref(altq->qid);
1126 pool_put(&pf_altq_pl, altq);
1127 }
1128 splx(s);
1129
1130 altqs_inactive_open = 0;
1131 return (error);
1132}
1133
1134int
1135pf_enable_altq(struct pf_altq *altq)
1136{
1137 struct ifnet *ifp;
1138 struct tb_profile tb;
1139 int s, error = 0;
1140
1141 if ((ifp = ifunit(altq->ifname)) == NULL)
1142 return (EINVAL);
1143
1144 if (ifp->if_snd.altq_type != ALTQT_NONE)
1145 error = altq_enable(&ifp->if_snd);
1146
1147 /* set tokenbucket regulator */
1148 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1149 tb.rate = altq->ifbandwidth;
1150 tb.depth = altq->tbrsize;
1151 s = splimp();
1152#ifdef __FreeBSD__
1153 PF_UNLOCK();
1154#endif
1155 error = tbr_set(&ifp->if_snd, &tb);
1156#ifdef __FreeBSD__
1157 PF_LOCK();
1158#endif
1159 splx(s);
1160 }
1161
1162 return (error);
1163}
1164
1165int
1166pf_disable_altq(struct pf_altq *altq)
1167{
1168 struct ifnet *ifp;
1169 struct tb_profile tb;
1170 int s, error;
1171
1172 if ((ifp = ifunit(altq->ifname)) == NULL)
1173 return (EINVAL);
1174
1175 /*
1176 * when the discipline is no longer referenced, it was overridden
1177 * by a new one. if so, just return.
1178 */
1179 if (altq->altq_disc != ifp->if_snd.altq_disc)
1180 return (0);
1181
1182 error = altq_disable(&ifp->if_snd);
1183
1184 if (error == 0) {
1185 /* clear tokenbucket regulator */
1186 tb.rate = 0;
1187 s = splimp();
1188#ifdef __FreeBSD__
1189 PF_UNLOCK();
1190#endif
1191 error = tbr_set(&ifp->if_snd, &tb);
1192#ifdef __FreeBSD__
1193 PF_LOCK();
1194#endif
1195 splx(s);
1196 }
1197
1198 return (error);
1199}
1200#endif /* ALTQ */
1201
1202int
1203pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1204{
1205 struct pf_ruleset *rs;
1206 struct pf_rule *rule;
1207
1208 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1209 return (EINVAL);
1210 rs = pf_find_or_create_ruleset(anchor);
1211 if (rs == NULL)
1212 return (EINVAL);
1213 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1214 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1215 *ticket = ++rs->rules[rs_num].inactive.ticket;
1216 rs->rules[rs_num].inactive.open = 1;
1217 return (0);
1218}
1219
1220int
1221pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1222{
1223 struct pf_ruleset *rs;
1224 struct pf_rule *rule;
1225
1226 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1227 return (EINVAL);
1228 rs = pf_find_ruleset(anchor);
1229 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1230 rs->rules[rs_num].inactive.ticket != ticket)
1231 return (0);
1232 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1233 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1234 rs->rules[rs_num].inactive.open = 0;
1235 return (0);
1236}
1237
1238int
1239pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1240{
1241 struct pf_ruleset *rs;
1242 struct pf_rule *rule;
1243 struct pf_rulequeue *old_rules;
1244 int s;
1245
1246 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1247 return (EINVAL);
1248 rs = pf_find_ruleset(anchor);
1249 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1250 ticket != rs->rules[rs_num].inactive.ticket)
1251 return (EBUSY);
1252
1253 /* Swap rules, keep the old. */
1254 s = splsoftnet();
1255 old_rules = rs->rules[rs_num].active.ptr;
1256 rs->rules[rs_num].active.ptr =
1257 rs->rules[rs_num].inactive.ptr;
1258 rs->rules[rs_num].inactive.ptr = old_rules;
1259 rs->rules[rs_num].active.ticket =
1260 rs->rules[rs_num].inactive.ticket;
1261 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1262
1263 /* Purge the old rule list. */
1264 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1265 pf_rm_rule(old_rules, rule);
1266 rs->rules[rs_num].inactive.open = 0;
1267 pf_remove_if_empty_ruleset(rs);
1268 splx(s);
1269 return (0);
1270}
1271
1272#ifdef __FreeBSD__
1273int
1274pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1275#else
1276int
1277pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1278#endif
1279{
1280 struct pf_pooladdr *pa = NULL;
1281 struct pf_pool *pool = NULL;
1282#ifndef __FreeBSD__
1283 int s;
1284#endif
1285 int error = 0;
1286
1287 /* XXX keep in sync with switch() below */
1288#ifdef __FreeBSD__
1289 if (securelevel_gt(td->td_ucred, 2))
1290#else
1291 if (securelevel > 1)
1292#endif
1293 switch (cmd) {
1294 case DIOCGETRULES:
1295 case DIOCGETRULE:
1296 case DIOCGETADDRS:
1297 case DIOCGETADDR:
1298 case DIOCGETSTATE:
1299 case DIOCSETSTATUSIF:
1300 case DIOCGETSTATUS:
1301 case DIOCCLRSTATUS:
1302 case DIOCNATLOOK:
1303 case DIOCSETDEBUG:
1304 case DIOCGETSTATES:
1305 case DIOCGETTIMEOUT:
1306 case DIOCCLRRULECTRS:
1307 case DIOCGETLIMIT:
1308 case DIOCGETALTQS:
1309 case DIOCGETALTQ:
1310 case DIOCGETQSTATS:
1311 case DIOCGETRULESETS:
1312 case DIOCGETRULESET:
1313 case DIOCRGETTABLES:
1314 case DIOCRGETTSTATS:
1315 case DIOCRCLRTSTATS:
1316 case DIOCRCLRADDRS:
1317 case DIOCRADDADDRS:
1318 case DIOCRDELADDRS:
1319 case DIOCRSETADDRS:
1320 case DIOCRGETADDRS:
1321 case DIOCRGETASTATS:
1322 case DIOCRCLRASTATS:
1323 case DIOCRTSTADDRS:
1324 case DIOCOSFPGET:
1325 case DIOCGETSRCNODES:
1326 case DIOCCLRSRCNODES:
1327 case DIOCIGETIFACES:
1328 case DIOCICLRISTATS:
1329#ifdef __FreeBSD__
1330 case DIOCGIFSPEED:
1331#endif
1332 case DIOCSETIFFLAG:
1333 case DIOCCLRIFFLAG:
1334 break;
1335 case DIOCRCLRTABLES:
1336 case DIOCRADDTABLES:
1337 case DIOCRDELTABLES:
1338 case DIOCRSETTFLAGS:
1339 if (((struct pfioc_table *)addr)->pfrio_flags &
1340 PFR_FLAG_DUMMY)
1341 break; /* dummy operation ok */
1342 return (EPERM);
1343 default:
1344 return (EPERM);
1345 }
1346
1347 if (!(flags & FWRITE))
1348 switch (cmd) {
1349 case DIOCGETRULES:
1350 case DIOCGETRULE:
1351 case DIOCGETADDRS:
1352 case DIOCGETADDR:
1353 case DIOCGETSTATE:
1354 case DIOCGETSTATUS:
1355 case DIOCGETSTATES:
1356 case DIOCGETTIMEOUT:
1357 case DIOCGETLIMIT:
1358 case DIOCGETALTQS:
1359 case DIOCGETALTQ:
1360 case DIOCGETQSTATS:
1361 case DIOCGETRULESETS:
1362 case DIOCGETRULESET:
1363 case DIOCRGETTABLES:
1364 case DIOCRGETTSTATS:
1365 case DIOCRGETADDRS:
1366 case DIOCRGETASTATS:
1367 case DIOCRTSTADDRS:
1368 case DIOCOSFPGET:
1369 case DIOCGETSRCNODES:
1370 case DIOCIGETIFACES:
1371#ifdef __FreeBSD__
1372 case DIOCGIFSPEED:
1373#endif
1374 break;
1375 case DIOCRCLRTABLES:
1376 case DIOCRADDTABLES:
1377 case DIOCRDELTABLES:
1378 case DIOCRCLRTSTATS:
1379 case DIOCRCLRADDRS:
1380 case DIOCRADDADDRS:
1381 case DIOCRDELADDRS:
1382 case DIOCRSETADDRS:
1383 case DIOCRSETTFLAGS:
1384 if (((struct pfioc_table *)addr)->pfrio_flags &
1385 PFR_FLAG_DUMMY)
1386 break; /* dummy operation ok */
1387 return (EACCES);
1388 default:
1389 return (EACCES);
1390 }
1391
1392#ifdef __FreeBSD__
1393 PF_LOCK();
1394#else
1395 s = splsoftnet();
1396#endif
1397 switch (cmd) {
1398
1399 case DIOCSTART:
1400 if (pf_status.running)
1401 error = EEXIST;
1402 else {
1403#ifdef __FreeBSD__
1404 PF_UNLOCK();
1405 error = hook_pf();
1406 PF_LOCK();
1407 if (error) {
1408 DPFPRINTF(PF_DEBUG_MISC,
1409 ("pf: pfil registeration fail\n"));
1410 break;
1411 }
1412#endif
1413 pf_status.running = 1;
1414 pf_status.since = time_second;
1415 if (pf_status.stateid == 0) {
1416 pf_status.stateid = time_second;
1417 pf_status.stateid = pf_status.stateid << 32;
1418 }
1419 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1420 }
1421 break;
1422
1423 case DIOCSTOP:
1424 if (!pf_status.running)
1425 error = ENOENT;
1426 else {
1427 pf_status.running = 0;
1428#ifdef __FreeBSD__
1429 PF_UNLOCK();
1430 error = dehook_pf();
1431 PF_LOCK();
1432 if (error) {
1433 pf_status.running = 1;
1434 DPFPRINTF(PF_DEBUG_MISC,
1435 ("pf: pfil unregisteration failed\n"));
1436 }
1437#endif
1438 pf_status.since = time_second;
1439 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1440 }
1441 break;
1442
1443 case DIOCADDRULE: {
1444 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1445 struct pf_ruleset *ruleset;
1446 struct pf_rule *rule, *tail;
1447 struct pf_pooladdr *pa;
1448 int rs_num;
1449
1450 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1451 ruleset = pf_find_ruleset(pr->anchor);
1452 if (ruleset == NULL) {
1453 error = EINVAL;
1454 break;
1455 }
1456 rs_num = pf_get_ruleset_number(pr->rule.action);
1457 if (rs_num >= PF_RULESET_MAX) {
1458 error = EINVAL;
1459 break;
1460 }
1461 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1462 error = EINVAL;
1463 break;
1464 }
1465 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1466 printf("ticket: %d != [%d]%d\n", pr->ticket,
1467 rs_num, ruleset->rules[rs_num].inactive.ticket);
1468 error = EBUSY;
1469 break;
1470 }
1471 if (pr->pool_ticket != ticket_pabuf) {
1472 printf("pool_ticket: %d != %d\n", pr->pool_ticket,
1473 ticket_pabuf);
1474 error = EBUSY;
1475 break;
1476 }
1477 rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1478 if (rule == NULL) {
1479 error = ENOMEM;
1480 break;
1481 }
1482 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1483 rule->anchor = NULL;
1484 rule->kif = NULL;
1485 TAILQ_INIT(&rule->rpool.list);
1486 /* initialize refcounting */
1487 rule->states = 0;
1488 rule->src_nodes = 0;
1489 rule->entries.tqe_prev = NULL;
1490#ifndef INET
1491 if (rule->af == AF_INET) {
1492 pool_put(&pf_rule_pl, rule);
1493 error = EAFNOSUPPORT;
1494 break;
1495 }
1496#endif /* INET */
1497#ifndef INET6
1498 if (rule->af == AF_INET6) {
1499 pool_put(&pf_rule_pl, rule);
1500 error = EAFNOSUPPORT;
1501 break;
1502 }
1503#endif /* INET6 */
1504 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1505 pf_rulequeue);
1506 if (tail)
1507 rule->nr = tail->nr + 1;
1508 else
1509 rule->nr = 0;
1510 if (rule->ifname[0]) {
1511 rule->kif = pfi_attach_rule(rule->ifname);
1512 if (rule->kif == NULL) {
1513 pool_put(&pf_rule_pl, rule);
1514 error = EINVAL;
1515 break;
1516 }
1517 }
1518
1519#ifdef ALTQ
1520 /* set queue IDs */
1521 if (rule->qname[0] != 0) {
1522 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1523 error = EBUSY;
1524 else if (rule->pqname[0] != 0) {
1525 if ((rule->pqid =
1526 pf_qname2qid(rule->pqname)) == 0)
1527 error = EBUSY;
1528 } else
1529 rule->pqid = rule->qid;
1530 }
1531#endif
1532 if (rule->tagname[0])
1533 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1534 error = EBUSY;
1535 if (rule->match_tagname[0])
1536 if ((rule->match_tag =
1537 pf_tagname2tag(rule->match_tagname)) == 0)
1538 error = EBUSY;
1539 if (rule->rt && !rule->direction)
1540 error = EINVAL;
1541 if (pf_rtlabel_add(&rule->src.addr) ||
1542 pf_rtlabel_add(&rule->dst.addr))
1543 error = EBUSY;
1544 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1545 error = EINVAL;
1546 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1547 error = EINVAL;
1548 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1549 error = EINVAL;
1550 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1551 error = EINVAL;
1552 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1553 error = EINVAL;
1554 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1555 if (pf_tbladdr_setup(ruleset, &pa->addr))
1556 error = EINVAL;
1557
1558 if (rule->overload_tblname[0]) {
1559 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1560 rule->overload_tblname)) == NULL)
1561 error = EINVAL;
1562 else
1563 rule->overload_tbl->pfrkt_flags |=
1564 PFR_TFLAG_ACTIVE;
1565 }
1566
1567 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1568 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1569 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1570 (rule->rt > PF_FASTROUTE)) &&
1571 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1572 error = EINVAL;
1573
1574 if (error) {
1575 pf_rm_rule(NULL, rule);
1576 break;
1577 }
1578 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1579 rule->evaluations = rule->packets = rule->bytes = 0;
1580 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1581 rule, entries);
1582 break;
1583 }
1584
1585 case DIOCGETRULES: {
1586 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1587 struct pf_ruleset *ruleset;
1588 struct pf_rule *tail;
1589 int rs_num;
1590
1591 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1592 ruleset = pf_find_ruleset(pr->anchor);
1593 if (ruleset == NULL) {
1594 error = EINVAL;
1595 break;
1596 }
1597 rs_num = pf_get_ruleset_number(pr->rule.action);
1598 if (rs_num >= PF_RULESET_MAX) {
1599 error = EINVAL;
1600 break;
1601 }
1602 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1603 pf_rulequeue);
1604 if (tail)
1605 pr->nr = tail->nr + 1;
1606 else
1607 pr->nr = 0;
1608 pr->ticket = ruleset->rules[rs_num].active.ticket;
1609 break;
1610 }
1611
1612 case DIOCGETRULE: {
1613 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1614 struct pf_ruleset *ruleset;
1615 struct pf_rule *rule;
1616 int rs_num, i;
1617
1618 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1619 ruleset = pf_find_ruleset(pr->anchor);
1620 if (ruleset == NULL) {
1621 error = EINVAL;
1622 break;
1623 }
1624 rs_num = pf_get_ruleset_number(pr->rule.action);
1625 if (rs_num >= PF_RULESET_MAX) {
1626 error = EINVAL;
1627 break;
1628 }
1629 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1630 error = EBUSY;
1631 break;
1632 }
1633 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1634 while ((rule != NULL) && (rule->nr != pr->nr))
1635 rule = TAILQ_NEXT(rule, entries);
1636 if (rule == NULL) {
1637 error = EBUSY;
1638 break;
1639 }
1640 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1641 if (pf_anchor_copyout(ruleset, rule, pr)) {
1642 error = EBUSY;
1643 break;
1644 }
1645 pfi_dynaddr_copyout(&pr->rule.src.addr);
1646 pfi_dynaddr_copyout(&pr->rule.dst.addr);
1647 pf_tbladdr_copyout(&pr->rule.src.addr);
1648 pf_tbladdr_copyout(&pr->rule.dst.addr);
1649 pf_rtlabel_copyout(&pr->rule.src.addr);
1650 pf_rtlabel_copyout(&pr->rule.dst.addr);
1651 for (i = 0; i < PF_SKIP_COUNT; ++i)
1652 if (rule->skip[i].ptr == NULL)
1653 pr->rule.skip[i].nr = -1;
1654 else
1655 pr->rule.skip[i].nr =
1656 rule->skip[i].ptr->nr;
1657 break;
1658 }
1659
1660 case DIOCCHANGERULE: {
1661 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1662 struct pf_ruleset *ruleset;
1663 struct pf_rule *oldrule = NULL, *newrule = NULL;
1664 u_int32_t nr = 0;
1665 int rs_num;
1666
1667 if (!(pcr->action == PF_CHANGE_REMOVE ||
1668 pcr->action == PF_CHANGE_GET_TICKET) &&
1669 pcr->pool_ticket != ticket_pabuf) {
1670 error = EBUSY;
1671 break;
1672 }
1673
1674 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1675 pcr->action > PF_CHANGE_GET_TICKET) {
1676 error = EINVAL;
1677 break;
1678 }
1679 ruleset = pf_find_ruleset(pcr->anchor);
1680 if (ruleset == NULL) {
1681 error = EINVAL;
1682 break;
1683 }
1684 rs_num = pf_get_ruleset_number(pcr->rule.action);
1685 if (rs_num >= PF_RULESET_MAX) {
1686 error = EINVAL;
1687 break;
1688 }
1689
1690 if (pcr->action == PF_CHANGE_GET_TICKET) {
1691 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1692 break;
1693 } else {
1694 if (pcr->ticket !=
1695 ruleset->rules[rs_num].active.ticket) {
1696 error = EINVAL;
1697 break;
1698 }
1699 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1700 error = EINVAL;
1701 break;
1702 }
1703 }
1704
1705 if (pcr->action != PF_CHANGE_REMOVE) {
1706 newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1707 if (newrule == NULL) {
1708 error = ENOMEM;
1709 break;
1710 }
1711 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1712 TAILQ_INIT(&newrule->rpool.list);
1713 /* initialize refcounting */
1714 newrule->states = 0;
1715 newrule->entries.tqe_prev = NULL;
1716#ifndef INET
1717 if (newrule->af == AF_INET) {
1718 pool_put(&pf_rule_pl, newrule);
1719 error = EAFNOSUPPORT;
1720 break;
1721 }
1722#endif /* INET */
1723#ifndef INET6
1724 if (newrule->af == AF_INET6) {
1725 pool_put(&pf_rule_pl, newrule);
1726 error = EAFNOSUPPORT;
1727 break;
1728 }
1729#endif /* INET6 */
1730 if (newrule->ifname[0]) {
1731 newrule->kif = pfi_attach_rule(newrule->ifname);
1732 if (newrule->kif == NULL) {
1733 pool_put(&pf_rule_pl, newrule);
1734 error = EINVAL;
1735 break;
1736 }
1737 } else
1738 newrule->kif = NULL;
1739
1740#ifdef ALTQ
1741 /* set queue IDs */
1742 if (newrule->qname[0] != 0) {
1743 if ((newrule->qid =
1744 pf_qname2qid(newrule->qname)) == 0)
1745 error = EBUSY;
1746 else if (newrule->pqname[0] != 0) {
1747 if ((newrule->pqid =
1748 pf_qname2qid(newrule->pqname)) == 0)
1749 error = EBUSY;
1750 } else
1751 newrule->pqid = newrule->qid;
1752 }
1753#endif /* ALTQ */
1754 if (newrule->tagname[0])
1755 if ((newrule->tag =
1756 pf_tagname2tag(newrule->tagname)) == 0)
1757 error = EBUSY;
1758 if (newrule->match_tagname[0])
1759 if ((newrule->match_tag = pf_tagname2tag(
1760 newrule->match_tagname)) == 0)
1761 error = EBUSY;
1762 if (newrule->rt && !newrule->direction)
1763 error = EINVAL;
1764 if (pf_rtlabel_add(&newrule->src.addr) ||
1765 pf_rtlabel_add(&newrule->dst.addr))
1766 error = EBUSY;
1767 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1768 error = EINVAL;
1769 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1770 error = EINVAL;
1771 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1772 error = EINVAL;
1773 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1774 error = EINVAL;
1775 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1776 error = EINVAL;
1777
1778 if (newrule->overload_tblname[0]) {
1779 if ((newrule->overload_tbl = pfr_attach_table(
1780 ruleset, newrule->overload_tblname)) ==
1781 NULL)
1782 error = EINVAL;
1783 else
1784 newrule->overload_tbl->pfrkt_flags |=
1785 PFR_TFLAG_ACTIVE;
1786 }
1787
1788 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1789 if (((((newrule->action == PF_NAT) ||
1790 (newrule->action == PF_RDR) ||
1791 (newrule->action == PF_BINAT) ||
1792 (newrule->rt > PF_FASTROUTE)) &&
1793 !pcr->anchor[0])) &&
1794 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1795 error = EINVAL;
1796
1797 if (error) {
1798 pf_rm_rule(NULL, newrule);
1799 break;
1800 }
1801 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1802 newrule->evaluations = newrule->packets = 0;
1803 newrule->bytes = 0;
1804 }
1805 pf_empty_pool(&pf_pabuf);
1806
1807 if (pcr->action == PF_CHANGE_ADD_HEAD)
1808 oldrule = TAILQ_FIRST(
1809 ruleset->rules[rs_num].active.ptr);
1810 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1811 oldrule = TAILQ_LAST(
1812 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1813 else {
1814 oldrule = TAILQ_FIRST(
1815 ruleset->rules[rs_num].active.ptr);
1816 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1817 oldrule = TAILQ_NEXT(oldrule, entries);
1818 if (oldrule == NULL) {
1819 if (newrule != NULL)
1820 pf_rm_rule(NULL, newrule);
1821 error = EINVAL;
1822 break;
1823 }
1824 }
1825
1826 if (pcr->action == PF_CHANGE_REMOVE)
1827 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1828 else {
1829 if (oldrule == NULL)
1830 TAILQ_INSERT_TAIL(
1831 ruleset->rules[rs_num].active.ptr,
1832 newrule, entries);
1833 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1834 pcr->action == PF_CHANGE_ADD_BEFORE)
1835 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1836 else
1837 TAILQ_INSERT_AFTER(
1838 ruleset->rules[rs_num].active.ptr,
1839 oldrule, newrule, entries);
1840 }
1841
1842 nr = 0;
1843 TAILQ_FOREACH(oldrule,
1844 ruleset->rules[rs_num].active.ptr, entries)
1845 oldrule->nr = nr++;
1846
1847 ruleset->rules[rs_num].active.ticket++;
1848
1849 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1850 pf_remove_if_empty_ruleset(ruleset);
1851
1852 break;
1853 }
1854
1855 case DIOCCLRSTATES: {
1856 struct pf_state *state;
1857 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1858 int killed = 0;
1859
1860 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1861 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1862 state->u.s.kif->pfik_name)) {
1863 state->timeout = PFTM_PURGE;
1864#if NPFSYNC
1865 /* don't send out individual delete messages */
1866 state->sync_flags = PFSTATE_NOSYNC;
1867#endif
1868 killed++;
1869 }
1870 }
1871 pf_purge_expired_states();
1872 pf_status.states = 0;
1873 psk->psk_af = killed;
1874#if NPFSYNC
1875 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1876#endif
1877 break;
1878 }
1879
1880 case DIOCKILLSTATES: {
1881 struct pf_state *state;
1882 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1883 int killed = 0;
1884
1885 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1886 if ((!psk->psk_af || state->af == psk->psk_af)
1887 && (!psk->psk_proto || psk->psk_proto ==
1888 state->proto) &&
1889 PF_MATCHA(psk->psk_src.neg,
1890 &psk->psk_src.addr.v.a.addr,
1891 &psk->psk_src.addr.v.a.mask,
1892 &state->lan.addr, state->af) &&
1893 PF_MATCHA(psk->psk_dst.neg,
1894 &psk->psk_dst.addr.v.a.addr,
1895 &psk->psk_dst.addr.v.a.mask,
1896 &state->ext.addr, state->af) &&
1897 (psk->psk_src.port_op == 0 ||
1898 pf_match_port(psk->psk_src.port_op,
1899 psk->psk_src.port[0], psk->psk_src.port[1],
1900 state->lan.port)) &&
1901 (psk->psk_dst.port_op == 0 ||
1902 pf_match_port(psk->psk_dst.port_op,
1903 psk->psk_dst.port[0], psk->psk_dst.port[1],
1904 state->ext.port)) &&
1905 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1906 state->u.s.kif->pfik_name))) {
1907 state->timeout = PFTM_PURGE;
1908 killed++;
1909 }
1910 }
1911 pf_purge_expired_states();
1912 psk->psk_af = killed;
1913 break;
1914 }
1915
1916 case DIOCADDSTATE: {
1917 struct pfioc_state *ps = (struct pfioc_state *)addr;
1918 struct pf_state *state;
1919 struct pfi_kif *kif;
1920
1921 if (ps->state.timeout >= PFTM_MAX &&
1922 ps->state.timeout != PFTM_UNTIL_PACKET) {
1923 error = EINVAL;
1924 break;
1925 }
1926 state = pool_get(&pf_state_pl, PR_NOWAIT);
1927 if (state == NULL) {
1928 error = ENOMEM;
1929 break;
1930 }
1931 kif = pfi_lookup_create(ps->state.u.ifname);
1932 if (kif == NULL) {
1933 pool_put(&pf_state_pl, state);
1934 error = ENOENT;
1935 break;
1936 }
1937 bcopy(&ps->state, state, sizeof(struct pf_state));
1938 bzero(&state->u, sizeof(state->u));
1939 state->rule.ptr = &pf_default_rule;
1940 state->nat_rule.ptr = NULL;
1941 state->anchor.ptr = NULL;
1942 state->rt_kif = NULL;
1943 state->creation = time_second;
1944 state->pfsync_time = 0;
1945 state->packets[0] = state->packets[1] = 0;
1946 state->bytes[0] = state->bytes[1] = 0;
1947
1948 if (pf_insert_state(kif, state)) {
1949 pfi_maybe_destroy(kif);
1950 pool_put(&pf_state_pl, state);
1951 error = ENOMEM;
1952 }
1953 break;
1954 }
1955
1956 case DIOCGETSTATE: {
1957 struct pfioc_state *ps = (struct pfioc_state *)addr;
1958 struct pf_state *state;
1959 u_int32_t nr;
1960
1961 nr = 0;
1962 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1963 if (nr >= ps->nr)
1964 break;
1965 nr++;
1966 }
1967 if (state == NULL) {
1968 error = EBUSY;
1969 break;
1970 }
1971 bcopy(state, &ps->state, sizeof(struct pf_state));
1972 ps->state.rule.nr = state->rule.ptr->nr;
1973 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ?
1974 -1 : state->nat_rule.ptr->nr;
1975 ps->state.anchor.nr = (state->anchor.ptr == NULL) ?
1976 -1 : state->anchor.ptr->nr;
1977 ps->state.expire = pf_state_expires(state);
1978 if (ps->state.expire > time_second)
1979 ps->state.expire -= time_second;
1980 else
1981 ps->state.expire = 0;
1982 break;
1983 }
1984
1985 case DIOCGETSTATES: {
1986 struct pfioc_states *ps = (struct pfioc_states *)addr;
1987 struct pf_state *state;
1988 struct pf_state *p, pstore;
1989 struct pfi_kif *kif;
1990 u_int32_t nr = 0;
1991 int space = ps->ps_len;
1992
1993 if (space == 0) {
1994 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1995 nr += kif->pfik_states;
1996 ps->ps_len = sizeof(struct pf_state) * nr;
1997 break;
1998 }
1999
2000 p = ps->ps_states;
2001 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
2002 RB_FOREACH(state, pf_state_tree_ext_gwy,
2003 &kif->pfik_ext_gwy) {
2004 int secs = time_second;
2005
2006 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
2007 break;
2008
2009 bcopy(state, &pstore, sizeof(pstore));
2010 strlcpy(pstore.u.ifname, kif->pfik_name,
2011 sizeof(pstore.u.ifname));
2012 pstore.rule.nr = state->rule.ptr->nr;
2013 pstore.nat_rule.nr = (state->nat_rule.ptr ==
2014 NULL) ? -1 : state->nat_rule.ptr->nr;
2015 pstore.anchor.nr = (state->anchor.ptr ==
2016 NULL) ? -1 : state->anchor.ptr->nr;
2017 pstore.creation = secs - pstore.creation;
2018 pstore.expire = pf_state_expires(state);
2019 if (pstore.expire > secs)
2020 pstore.expire -= secs;
2021 else
2022 pstore.expire = 0;
2023#ifdef __FreeBSD__
2024 PF_COPYOUT(&pstore, p, sizeof(*p), error);
2025#else
2026 error = copyout(&pstore, p, sizeof(*p));
2027#endif
2028 if (error)
2029 goto fail;
2030 p++;
2031 nr++;
2032 }
2033 ps->ps_len = sizeof(struct pf_state) * nr;
2034 break;
2035 }
2036
2037 case DIOCGETSTATUS: {
2038 struct pf_status *s = (struct pf_status *)addr;
2039 bcopy(&pf_status, s, sizeof(struct pf_status));
2040 pfi_fill_oldstatus(s);
2041 break;
2042 }
2043
2044 case DIOCSETSTATUSIF: {
2045 struct pfioc_if *pi = (struct pfioc_if *)addr;
2046
2047 if (pi->ifname[0] == 0) {
2048 bzero(pf_status.ifname, IFNAMSIZ);
2049 break;
2050 }
2051 if (ifunit(pi->ifname) == NULL) {
2052 error = EINVAL;
2053 break;
2054 }
2055 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
2056 break;
2057 }
2058
2059 case DIOCCLRSTATUS: {
2060 bzero(pf_status.counters, sizeof(pf_status.counters));
2061 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
2062 bzero(pf_status.scounters, sizeof(pf_status.scounters));
2063 if (*pf_status.ifname)
2064 pfi_clr_istats(pf_status.ifname, NULL,
2065 PFI_FLAG_INSTANCE);
2066 break;
2067 }
2068
2069 case DIOCNATLOOK: {
2070 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
2071 struct pf_state *state;
2072 struct pf_state key;
2073 int m = 0, direction = pnl->direction;
2074
2075 key.af = pnl->af;
2076 key.proto = pnl->proto;
2077
2078 if (!pnl->proto ||
2079 PF_AZERO(&pnl->saddr, pnl->af) ||
2080 PF_AZERO(&pnl->daddr, pnl->af) ||
2081 !pnl->dport || !pnl->sport)
2082 error = EINVAL;
2083 else {
2084 /*
2085 * userland gives us source and dest of connection,
2086 * reverse the lookup so we ask for what happens with
2087 * the return traffic, enabling us to find it in the
2088 * state tree.
2089 */
2090 if (direction == PF_IN) {
2091 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
2092 key.ext.port = pnl->dport;
2093 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
2094 key.gwy.port = pnl->sport;
2095 state = pf_find_state_all(&key, PF_EXT_GWY, &m);
2096 } else {
2097 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
2098 key.lan.port = pnl->dport;
2099 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
2100 key.ext.port = pnl->sport;
2101 state = pf_find_state_all(&key, PF_LAN_EXT, &m);
2102 }
2103 if (m > 1)
2104 error = E2BIG; /* more than one state */
2105 else if (state != NULL) {
2106 if (direction == PF_IN) {
2107 PF_ACPY(&pnl->rsaddr, &state->lan.addr,
2108 state->af);
2109 pnl->rsport = state->lan.port;
2110 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
2111 pnl->af);
2112 pnl->rdport = pnl->dport;
2113 } else {
2114 PF_ACPY(&pnl->rdaddr, &state->gwy.addr,
2115 state->af);
2116 pnl->rdport = state->gwy.port;
2117 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
2118 pnl->af);
2119 pnl->rsport = pnl->sport;
2120 }
2121 } else
2122 error = ENOENT;
2123 }
2124 break;
2125 }
2126
2127 case DIOCSETTIMEOUT: {
2128 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2129 int old;
2130
2131 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
2132 pt->seconds < 0) {
2133 error = EINVAL;
2134 goto fail;
2135 }
2136 old = pf_default_rule.timeout[pt->timeout];
2137 pf_default_rule.timeout[pt->timeout] = pt->seconds;
2138 pt->seconds = old;
2139 break;
2140 }
2141
2142 case DIOCGETTIMEOUT: {
2143 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2144
2145 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
2146 error = EINVAL;
2147 goto fail;
2148 }
2149 pt->seconds = pf_default_rule.timeout[pt->timeout];
2150 break;
2151 }
2152
2153 case DIOCGETLIMIT: {
2154 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2155
2156 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
2157 error = EINVAL;
2158 goto fail;
2159 }
2160 pl->limit = pf_pool_limits[pl->index].limit;
2161 break;
2162 }
2163
2164 case DIOCSETLIMIT: {
2165 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2166 int old_limit;
2167
2168 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
2169 pf_pool_limits[pl->index].pp == NULL) {
2170 error = EINVAL;
2171 goto fail;
2172 }
2173#ifdef __FreeBSD__
2174 uma_zone_set_max(pf_pool_limits[pl->index].pp, pl->limit);
2175#else
2176 if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
2177 pl->limit, NULL, 0) != 0) {
2178 error = EBUSY;
2179 goto fail;
2180 }
2181#endif
2182 old_limit = pf_pool_limits[pl->index].limit;
2183 pf_pool_limits[pl->index].limit = pl->limit;
2184 pl->limit = old_limit;
2185 break;
2186 }
2187
2188 case DIOCSETDEBUG: {
2189 u_int32_t *level = (u_int32_t *)addr;
2190
2191 pf_status.debug = *level;
2192 break;
2193 }
2194
2195 case DIOCCLRRULECTRS: {
2196 struct pf_ruleset *ruleset = &pf_main_ruleset;
2197 struct pf_rule *rule;
2198
2199 TAILQ_FOREACH(rule,
2200 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries)
2201 rule->evaluations = rule->packets =
2202 rule->bytes = 0;
2203 break;
2204 }
2205
2206#ifdef __FreeBSD__
2207 case DIOCGIFSPEED: {
2208 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr;
2209 struct pf_ifspeed ps;
2210 struct ifnet *ifp;
2211
2212 if (psp->ifname[0] != 0) {
2213 /* Can we completely trust user-land? */
2214 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
2215 ifp = ifunit(ps.ifname);
2216 if (ifp != NULL)
2217 psp->baudrate = ifp->if_baudrate;
2218 else
2219 error = EINVAL;
2220 } else
2221 error = EINVAL;
2222 break;
2223 }
2224#endif /* __FreeBSD__ */
2225
2226#ifdef ALTQ
2227 case DIOCSTARTALTQ: {
2228 struct pf_altq *altq;
2229
2230 /* enable all altq interfaces on active list */
2231 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2232 if (altq->qname[0] == 0) {
2233 error = pf_enable_altq(altq);
2234 if (error != 0)
2235 break;
2236 }
2237 }
2238 if (error == 0)
2239 pf_altq_running = 1;
2240 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2241 break;
2242 }
2243
2244 case DIOCSTOPALTQ: {
2245 struct pf_altq *altq;
2246
2247 /* disable all altq interfaces on active list */
2248 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2249 if (altq->qname[0] == 0) {
2250 error = pf_disable_altq(altq);
2251 if (error != 0)
2252 break;
2253 }
2254 }
2255 if (error == 0)
2256 pf_altq_running = 0;
2257 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2258 break;
2259 }
2260
2261 case DIOCADDALTQ: {
2262 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2263 struct pf_altq *altq, *a;
2264
2265 if (pa->ticket != ticket_altqs_inactive) {
2266 error = EBUSY;
2267 break;
2268 }
2269 altq = pool_get(&pf_altq_pl, PR_NOWAIT);
2270 if (altq == NULL) {
2271 error = ENOMEM;
2272 break;
2273 }
2274 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2275
2276 /*
2277 * if this is for a queue, find the discipline and
2278 * copy the necessary fields
2279 */
2280 if (altq->qname[0] != 0) {
2281 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2282 error = EBUSY;
2283 pool_put(&pf_altq_pl, altq);
2284 break;
2285 }
2286 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2287 if (strncmp(a->ifname, altq->ifname,
2288 IFNAMSIZ) == 0 && a->qname[0] == 0) {
2289 altq->altq_disc = a->altq_disc;
2290 break;
2291 }
2292 }
2293 }
2294
2295#ifdef __FreeBSD__
2296 PF_UNLOCK();
2297#endif
2298 error = altq_add(altq);
2299#ifdef __FreeBSD__
2300 PF_LOCK();
2301#endif
2302 if (error) {
2303 pool_put(&pf_altq_pl, altq);
2304 break;
2305 }
2306
2307 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2308 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2309 break;
2310 }
2311
2312 case DIOCGETALTQS: {
2313 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2314 struct pf_altq *altq;
2315
2316 pa->nr = 0;
2317 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2318 pa->nr++;
2319 pa->ticket = ticket_altqs_active;
2320 break;
2321 }
2322
2323 case DIOCGETALTQ: {
2324 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2325 struct pf_altq *altq;
2326 u_int32_t nr;
2327
2328 if (pa->ticket != ticket_altqs_active) {
2329 error = EBUSY;
2330 break;
2331 }
2332 nr = 0;
2333 altq = TAILQ_FIRST(pf_altqs_active);
2334 while ((altq != NULL) && (nr < pa->nr)) {
2335 altq = TAILQ_NEXT(altq, entries);
2336 nr++;
2337 }
2338 if (altq == NULL) {
2339 error = EBUSY;
2340 break;
2341 }
2342 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2343 break;
2344 }
2345
2346 case DIOCCHANGEALTQ:
2347 /* CHANGEALTQ not supported yet! */
2348 error = ENODEV;
2349 break;
2350
2351 case DIOCGETQSTATS: {
2352 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2353 struct pf_altq *altq;
2354 u_int32_t nr;
2355 int nbytes;
2356
2357 if (pq->ticket != ticket_altqs_active) {
2358 error = EBUSY;
2359 break;
2360 }
2361 nbytes = pq->nbytes;
2362 nr = 0;
2363 altq = TAILQ_FIRST(pf_altqs_active);
2364 while ((altq != NULL) && (nr < pq->nr)) {
2365 altq = TAILQ_NEXT(altq, entries);
2366 nr++;
2367 }
2368 if (altq == NULL) {
2369 error = EBUSY;
2370 break;
2371 }
2372#ifdef __FreeBSD__
2373 PF_UNLOCK();
2374#endif
2375 error = altq_getqstats(altq, pq->buf, &nbytes);
2376#ifdef __FreeBSD__
2377 PF_LOCK();
2378#endif
2379 if (error == 0) {
2380 pq->scheduler = altq->scheduler;
2381 pq->nbytes = nbytes;
2382 }
2383 break;
2384 }
2385#endif /* ALTQ */
2386
2387 case DIOCBEGINADDRS: {
2388 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2389
2390 pf_empty_pool(&pf_pabuf);
2391 pp->ticket = ++ticket_pabuf;
2392 break;
2393 }
2394
2395 case DIOCADDADDR: {
2396 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2397
2398#ifndef INET
2399 if (pp->af == AF_INET) {
2400 error = EAFNOSUPPORT;
2401 break;
2402 }
2403#endif /* INET */
2404#ifndef INET6
2405 if (pp->af == AF_INET6) {
2406 error = EAFNOSUPPORT;
2407 break;
2408 }
2409#endif /* INET6 */
2410 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2411 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2412 pp->addr.addr.type != PF_ADDR_TABLE) {
2413 error = EINVAL;
2414 break;
2415 }
2416 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2417 if (pa == NULL) {
2418 error = ENOMEM;
2419 break;
2420 }
2421 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2422 if (pa->ifname[0]) {
2423 pa->kif = pfi_attach_rule(pa->ifname);
2424 if (pa->kif == NULL) {
2425 pool_put(&pf_pooladdr_pl, pa);
2426 error = EINVAL;
2427 break;
2428 }
2429 }
2430 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2431 pfi_dynaddr_remove(&pa->addr);
2432 pfi_detach_rule(pa->kif);
2433 pool_put(&pf_pooladdr_pl, pa);
2434 error = EINVAL;
2435 break;
2436 }
2437 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2438 break;
2439 }
2440
2441 case DIOCGETADDRS: {
2442 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2443
2444 pp->nr = 0;
2445 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2446 pp->r_num, 0, 1, 0);
2447 if (pool == NULL) {
2448 error = EBUSY;
2449 break;
2450 }
2451 TAILQ_FOREACH(pa, &pool->list, entries)
2452 pp->nr++;
2453 break;
2454 }
2455
2456 case DIOCGETADDR: {
2457 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2458 u_int32_t nr = 0;
2459
2460 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2461 pp->r_num, 0, 1, 1);
2462 if (pool == NULL) {
2463 error = EBUSY;
2464 break;
2465 }
2466 pa = TAILQ_FIRST(&pool->list);
2467 while ((pa != NULL) && (nr < pp->nr)) {
2468 pa = TAILQ_NEXT(pa, entries);
2469 nr++;
2470 }
2471 if (pa == NULL) {
2472 error = EBUSY;
2473 break;
2474 }
2475 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2476 pfi_dynaddr_copyout(&pp->addr.addr);
2477 pf_tbladdr_copyout(&pp->addr.addr);
2478 pf_rtlabel_copyout(&pp->addr.addr);
2479 break;
2480 }
2481
2482 case DIOCCHANGEADDR: {
2483 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2484 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2485 struct pf_ruleset *ruleset;
2486
2487 if (pca->action < PF_CHANGE_ADD_HEAD ||
2488 pca->action > PF_CHANGE_REMOVE) {
2489 error = EINVAL;
2490 break;
2491 }
2492 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2493 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2494 pca->addr.addr.type != PF_ADDR_TABLE) {
2495 error = EINVAL;
2496 break;
2497 }
2498
2499 ruleset = pf_find_ruleset(pca->anchor);
2500 if (ruleset == NULL) {
2501 error = EBUSY;
2502 break;
2503 }
2504 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2505 pca->r_num, pca->r_last, 1, 1);
2506 if (pool == NULL) {
2507 error = EBUSY;
2508 break;
2509 }
2510 if (pca->action != PF_CHANGE_REMOVE) {
2511 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2512 if (newpa == NULL) {
2513 error = ENOMEM;
2514 break;
2515 }
2516 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2517#ifndef INET
2518 if (pca->af == AF_INET) {
2519 pool_put(&pf_pooladdr_pl, newpa);
2520 error = EAFNOSUPPORT;
2521 break;
2522 }
2523#endif /* INET */
2524#ifndef INET6
2525 if (pca->af == AF_INET6) {
2526 pool_put(&pf_pooladdr_pl, newpa);
2527 error = EAFNOSUPPORT;
2528 break;
2529 }
2530#endif /* INET6 */
2531 if (newpa->ifname[0]) {
2532 newpa->kif = pfi_attach_rule(newpa->ifname);
2533 if (newpa->kif == NULL) {
2534 pool_put(&pf_pooladdr_pl, newpa);
2535 error = EINVAL;
2536 break;
2537 }
2538 } else
2539 newpa->kif = NULL;
2540 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2541 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2542 pfi_dynaddr_remove(&newpa->addr);
2543 pfi_detach_rule(newpa->kif);
2544 pool_put(&pf_pooladdr_pl, newpa);
2545 error = EINVAL;
2546 break;
2547 }
2548 }
2549
2550 if (pca->action == PF_CHANGE_ADD_HEAD)
2551 oldpa = TAILQ_FIRST(&pool->list);
2552 else if (pca->action == PF_CHANGE_ADD_TAIL)
2553 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2554 else {
2555 int i = 0;
2556
2557 oldpa = TAILQ_FIRST(&pool->list);
2558 while ((oldpa != NULL) && (i < pca->nr)) {
2559 oldpa = TAILQ_NEXT(oldpa, entries);
2560 i++;
2561 }
2562 if (oldpa == NULL) {
2563 error = EINVAL;
2564 break;
2565 }
2566 }
2567
2568 if (pca->action == PF_CHANGE_REMOVE) {
2569 TAILQ_REMOVE(&pool->list, oldpa, entries);
2570 pfi_dynaddr_remove(&oldpa->addr);
2571 pf_tbladdr_remove(&oldpa->addr);
2572 pfi_detach_rule(oldpa->kif);
2573 pool_put(&pf_pooladdr_pl, oldpa);
2574 } else {
2575 if (oldpa == NULL)
2576 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2577 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2578 pca->action == PF_CHANGE_ADD_BEFORE)
2579 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2580 else
2581 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2582 newpa, entries);
2583 }
2584
2585 pool->cur = TAILQ_FIRST(&pool->list);
2586 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2587 pca->af);
2588 break;
2589 }
2590
2591 case DIOCGETRULESETS: {
2592 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2593 struct pf_ruleset *ruleset;
2594 struct pf_anchor *anchor;
2595
2596 pr->path[sizeof(pr->path) - 1] = 0;
2597 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2598 error = EINVAL;
2599 break;
2600 }
2601 pr->nr = 0;
2602 if (ruleset->anchor == NULL) {
2603 /* XXX kludge for pf_main_ruleset */
2604 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2605 if (anchor->parent == NULL)
2606 pr->nr++;
2607 } else {
2608 RB_FOREACH(anchor, pf_anchor_node,
2609 &ruleset->anchor->children)
2610 pr->nr++;
2611 }
2612 break;
2613 }
2614
2615 case DIOCGETRULESET: {
2616 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2617 struct pf_ruleset *ruleset;
2618 struct pf_anchor *anchor;
2619 u_int32_t nr = 0;
2620
2621 pr->path[sizeof(pr->path) - 1] = 0;
2622 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2623 error = EINVAL;
2624 break;
2625 }
2626 pr->name[0] = 0;
2627 if (ruleset->anchor == NULL) {
2628 /* XXX kludge for pf_main_ruleset */
2629 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2630 if (anchor->parent == NULL && nr++ == pr->nr) {
2631 strlcpy(pr->name, anchor->name,
2632 sizeof(pr->name));
2633 break;
2634 }
2635 } else {
2636 RB_FOREACH(anchor, pf_anchor_node,
2637 &ruleset->anchor->children)
2638 if (nr++ == pr->nr) {
2639 strlcpy(pr->name, anchor->name,
2640 sizeof(pr->name));
2641 break;
2642 }
2643 }
2644 if (!pr->name[0])
2645 error = EBUSY;
2646 break;
2647 }
2648
2649 case DIOCRCLRTABLES: {
2650 struct pfioc_table *io = (struct pfioc_table *)addr;
2651
2652 if (io->pfrio_esize != 0) {
2653 error = ENODEV;
2654 break;
2655 }
2656 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2657 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2658 break;
2659 }
2660
2661 case DIOCRADDTABLES: {
2662 struct pfioc_table *io = (struct pfioc_table *)addr;
2663
2664 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2665 error = ENODEV;
2666 break;
2667 }
2668 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2669 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2670 break;
2671 }
2672
2673 case DIOCRDELTABLES: {
2674 struct pfioc_table *io = (struct pfioc_table *)addr;
2675
2676 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2677 error = ENODEV;
2678 break;
2679 }
2680 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2681 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2682 break;
2683 }
2684
2685 case DIOCRGETTABLES: {
2686 struct pfioc_table *io = (struct pfioc_table *)addr;
2687
2688 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2689 error = ENODEV;
2690 break;
2691 }
2692 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2693 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2694 break;
2695 }
2696
2697 case DIOCRGETTSTATS: {
2698 struct pfioc_table *io = (struct pfioc_table *)addr;
2699
2700 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2701 error = ENODEV;
2702 break;
2703 }
2704 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2705 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2706 break;
2707 }
2708
2709 case DIOCRCLRTSTATS: {
2710 struct pfioc_table *io = (struct pfioc_table *)addr;
2711
2712 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2713 error = ENODEV;
2714 break;
2715 }
2716 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2717 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2718 break;
2719 }
2720
2721 case DIOCRSETTFLAGS: {
2722 struct pfioc_table *io = (struct pfioc_table *)addr;
2723
2724 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2725 error = ENODEV;
2726 break;
2727 }
2728 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2729 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2730 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2731 break;
2732 }
2733
2734 case DIOCRCLRADDRS: {
2735 struct pfioc_table *io = (struct pfioc_table *)addr;
2736
2737 if (io->pfrio_esize != 0) {
2738 error = ENODEV;
2739 break;
2740 }
2741 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2742 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2743 break;
2744 }
2745
2746 case DIOCRADDADDRS: {
2747 struct pfioc_table *io = (struct pfioc_table *)addr;
2748
2749 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2750 error = ENODEV;
2751 break;
2752 }
2753 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2754 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2755 PFR_FLAG_USERIOCTL);
2756 break;
2757 }
2758
2759 case DIOCRDELADDRS: {
2760 struct pfioc_table *io = (struct pfioc_table *)addr;
2761
2762 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2763 error = ENODEV;
2764 break;
2765 }
2766 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2767 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2768 PFR_FLAG_USERIOCTL);
2769 break;
2770 }
2771
2772 case DIOCRSETADDRS: {
2773 struct pfioc_table *io = (struct pfioc_table *)addr;
2774
2775 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2776 error = ENODEV;
2777 break;
2778 }
2779 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2780 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2781 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2782 PFR_FLAG_USERIOCTL);
2783 break;
2784 }
2785
2786 case DIOCRGETADDRS: {
2787 struct pfioc_table *io = (struct pfioc_table *)addr;
2788
2789 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2790 error = ENODEV;
2791 break;
2792 }
2793 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2794 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2795 break;
2796 }
2797
2798 case DIOCRGETASTATS: {
2799 struct pfioc_table *io = (struct pfioc_table *)addr;
2800
2801 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2802 error = ENODEV;
2803 break;
2804 }
2805 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2806 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2807 break;
2808 }
2809
2810 case DIOCRCLRASTATS: {
2811 struct pfioc_table *io = (struct pfioc_table *)addr;
2812
2813 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2814 error = ENODEV;
2815 break;
2816 }
2817 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2818 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2819 PFR_FLAG_USERIOCTL);
2820 break;
2821 }
2822
2823 case DIOCRTSTADDRS: {
2824 struct pfioc_table *io = (struct pfioc_table *)addr;
2825
2826 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2827 error = ENODEV;
2828 break;
2829 }
2830 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2831 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2832 PFR_FLAG_USERIOCTL);
2833 break;
2834 }
2835
2836 case DIOCRINADEFINE: {
2837 struct pfioc_table *io = (struct pfioc_table *)addr;
2838
2839 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2840 error = ENODEV;
2841 break;
2842 }
2843 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2844 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2845 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2846 break;
2847 }
2848
2849 case DIOCOSFPADD: {
2850 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2851 error = pf_osfp_add(io);
2852 break;
2853 }
2854
2855 case DIOCOSFPGET: {
2856 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2857 error = pf_osfp_get(io);
2858 break;
2859 }
2860
2861 case DIOCXBEGIN: {
2862 struct pfioc_trans *io = (struct pfioc_trans *)
2863 addr;
2864 static struct pfioc_trans_e ioe;
2865 static struct pfr_table table;
2866 int i;
2867
2868 if (io->esize != sizeof(ioe)) {
2869 error = ENODEV;
2870 goto fail;
2871 }
2872 for (i = 0; i < io->size; i++) {
2873#ifdef __FreeBSD__
2874 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error);
2875 if (error) {
2876#else
2877 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2878#endif
2879 error = EFAULT;
2880 goto fail;
2881 }
2882 switch (ioe.rs_num) {
2883#ifdef ALTQ
2884 case PF_RULESET_ALTQ:
2885 if (ioe.anchor[0]) {
2886 error = EINVAL;
2887 goto fail;
2888 }
2889 if ((error = pf_begin_altq(&ioe.ticket)))
2890 goto fail;
2891 break;
2892#endif /* ALTQ */
2893 case PF_RULESET_TABLE:
2894 bzero(&table, sizeof(table));
2895 strlcpy(table.pfrt_anchor, ioe.anchor,
2896 sizeof(table.pfrt_anchor));
2897 if ((error = pfr_ina_begin(&table,
2898 &ioe.ticket, NULL, 0)))
2899 goto fail;
2900 break;
2901 default:
2902 if ((error = pf_begin_rules(&ioe.ticket,
2903 ioe.rs_num, ioe.anchor)))
2904 goto fail;
2905 break;
2906 }
2907#ifdef __FreeBSD__
2908 PF_COPYOUT(&ioe, io->array+i, sizeof(io->array[i]),
2909 error);
2910 if (error) {
2911#else
2912 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) {
2913#endif
2914 error = EFAULT;
2915 goto fail;
2916 }
2917 }
2918 break;
2919 }
2920
2921 case DIOCXROLLBACK: {
2922 struct pfioc_trans *io = (struct pfioc_trans *)
2923 addr;
2924 static struct pfioc_trans_e ioe;
2925 static struct pfr_table table;
2926 int i;
2927
2928 if (io->esize != sizeof(ioe)) {
2929 error = ENODEV;
2930 goto fail;
2931 }
2932 for (i = 0; i < io->size; i++) {
2933#ifdef __FreeBSD__
2934 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error);
2935 if (error) {
2936#else
2937 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2938#endif
2939 error = EFAULT;
2940 goto fail;
2941 }
2942 switch (ioe.rs_num) {
2943#ifdef ALTQ
2944 case PF_RULESET_ALTQ:
2945 if (ioe.anchor[0]) {
2946 error = EINVAL;
2947 goto fail;
2948 }
2949 if ((error = pf_rollback_altq(ioe.ticket)))
2950 goto fail; /* really bad */
2951 break;
2952#endif /* ALTQ */
2953 case PF_RULESET_TABLE:
2954 bzero(&table, sizeof(table));
2955 strlcpy(table.pfrt_anchor, ioe.anchor,
2956 sizeof(table.pfrt_anchor));
2957 if ((error = pfr_ina_rollback(&table,
2958 ioe.ticket, NULL, 0)))
2959 goto fail; /* really bad */
2960 break;
2961 default:
2962 if ((error = pf_rollback_rules(ioe.ticket,
2963 ioe.rs_num, ioe.anchor)))
2964 goto fail; /* really bad */
2965 break;
2966 }
2967 }
2968 break;
2969 }
2970
2971 case DIOCXCOMMIT: {
2972 struct pfioc_trans *io = (struct pfioc_trans *)
2973 addr;
2974 static struct pfioc_trans_e ioe;
2975 static struct pfr_table table;
2976 struct pf_ruleset *rs;
2977 int i;
2978
2979 if (io->esize != sizeof(ioe)) {
2980 error = ENODEV;
2981 goto fail;
2982 }
2983 /* first makes sure everything will succeed */
2984 for (i = 0; i < io->size; i++) {
2985#ifdef __FreeBSD__
2986 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error);
2987 if (error) {
2988#else
2989 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2990#endif
2991 error = EFAULT;
2992 goto fail;
2993 }
2994 switch (ioe.rs_num) {
2995#ifdef ALTQ
2996 case PF_RULESET_ALTQ:
2997 if (ioe.anchor[0]) {
2998 error = EINVAL;
2999 goto fail;
3000 }
3001 if (!altqs_inactive_open || ioe.ticket !=
3002 ticket_altqs_inactive) {
3003 error = EBUSY;
3004 goto fail;
3005 }
3006 break;
3007#endif /* ALTQ */
3008 case PF_RULESET_TABLE:
3009 rs = pf_find_ruleset(ioe.anchor);
3010 if (rs == NULL || !rs->topen || ioe.ticket !=
3011 rs->tticket) {
3012 error = EBUSY;
3013 goto fail;
3014 }
3015 break;
3016 default:
3017 if (ioe.rs_num < 0 || ioe.rs_num >=
3018 PF_RULESET_MAX) {
3019 error = EINVAL;
3020 goto fail;
3021 }
3022 rs = pf_find_ruleset(ioe.anchor);
3023 if (rs == NULL ||
3024 !rs->rules[ioe.rs_num].inactive.open ||
3025 rs->rules[ioe.rs_num].inactive.ticket !=
3026 ioe.ticket) {
3027 error = EBUSY;
3028 goto fail;
3029 }
3030 break;
3031 }
3032 }
3033 /* now do the commit - no errors should happen here */
3034 for (i = 0; i < io->size; i++) {
3035#ifdef __FreeBSD__
3036 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error);
3037 if (error) {
3038#else
3039 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
3040#endif
3041 error = EFAULT;
3042 goto fail;
3043 }
3044 switch (ioe.rs_num) {
3045#ifdef ALTQ
3046 case PF_RULESET_ALTQ:
3047 if ((error = pf_commit_altq(ioe.ticket)))
3048 goto fail; /* really bad */
3049 break;
3050#endif /* ALTQ */
3051 case PF_RULESET_TABLE:
3052 bzero(&table, sizeof(table));
3053 strlcpy(table.pfrt_anchor, ioe.anchor,
3054 sizeof(table.pfrt_anchor));
3055 if ((error = pfr_ina_commit(&table, ioe.ticket,
3056 NULL, NULL, 0)))
3057 goto fail; /* really bad */
3058 break;
3059 default:
3060 if ((error = pf_commit_rules(ioe.ticket,
3061 ioe.rs_num, ioe.anchor)))
3062 goto fail; /* really bad */
3063 break;
3064 }
3065 }
3066 break;
3067 }
3068
3069 case DIOCGETSRCNODES: {
3070 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
3071 struct pf_src_node *n;
3072 struct pf_src_node *p, pstore;
3073 u_int32_t nr = 0;
3074 int space = psn->psn_len;
3075
3076 if (space == 0) {
3077 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
3078 nr++;
3079 psn->psn_len = sizeof(struct pf_src_node) * nr;
3080 break;
3081 }
3082
3083 p = psn->psn_src_nodes;
3084 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3085 int secs = time_second, diff;
3086
3087 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
3088 break;
3089
3090 bcopy(n, &pstore, sizeof(pstore));
3091 if (n->rule.ptr != NULL)
3092 pstore.rule.nr = n->rule.ptr->nr;
3093 pstore.creation = secs - pstore.creation;
3094 if (pstore.expire > secs)
3095 pstore.expire -= secs;
3096 else
3097 pstore.expire = 0;
3098
3099 /* adjust the connection rate estimate */
3100 diff = secs - n->conn_rate.last;
3101 if (diff >= n->conn_rate.seconds)
3102 pstore.conn_rate.count = 0;
3103 else
3104 pstore.conn_rate.count -=
3105 n->conn_rate.count * diff /
3106 n->conn_rate.seconds;
3107
3108#ifdef __FreeBSD__
3109 PF_COPYOUT(&pstore, p, sizeof(*p), error);
3110#else
3111 error = copyout(&pstore, p, sizeof(*p));
3112#endif
3113 if (error)
3114 goto fail;
3115 p++;
3116 nr++;
3117 }
3118 psn->psn_len = sizeof(struct pf_src_node) * nr;
3119 break;
3120 }
3121
3122 case DIOCCLRSRCNODES: {
3123 struct pf_src_node *n;
3124 struct pf_state *state;
3125
3126 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
3127 state->src_node = NULL;
3128 state->nat_src_node = NULL;
3129 }
3130 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3131 n->expire = 1;
3132 n->states = 0;
3133 }
3134 pf_purge_expired_src_nodes();
3135 pf_status.src_nodes = 0;
3136 break;
3137 }
3138
3139 case DIOCSETHOSTID: {
3140 u_int32_t *hostid = (u_int32_t *)addr;
3141
3142 if (*hostid == 0)
3143 pf_status.hostid = arc4random();
3144 else
3145 pf_status.hostid = *hostid;
3146 break;
3147 }
3148
3149 case DIOCOSFPFLUSH:
3150 pf_osfp_flush();
3151 break;
3152
3153 case DIOCIGETIFACES: {
3154 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3155
3156 if (io->pfiio_esize != sizeof(struct pfi_if)) {
3157 error = ENODEV;
3158 break;
3159 }
3160 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
3161 &io->pfiio_size, io->pfiio_flags);
3162 break;
3163 }
3164
3165 case DIOCICLRISTATS: {
3166 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3167
3168 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero,
3169 io->pfiio_flags);
3170 break;
3171 }
3172
3173 case DIOCSETIFFLAG: {
3174 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3175
3176 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
3177 break;
3178 }
3179
3180 case DIOCCLRIFFLAG: {
3181 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3182
3183 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
3184 break;
3185 }
3186
3187 default:
3188 error = ENODEV;
3189 break;
3190 }
3191fail:
3192#ifdef __FreeBSD__
3193 PF_UNLOCK();
3194#else
3195 splx(s);
3196#endif
3197 return (error);
3198}
3199
3200#ifdef __FreeBSD__
3201/*
3202 * XXX - Check for version missmatch!!!
3203 */
3204static void
3205pf_clear_states(void)
3206{
3207 struct pf_state *state;
3208
3209 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
3210 state->timeout = PFTM_PURGE;
3211#if NPFSYNC
3212 /* don't send out individual delete messages */
3213 state->sync_flags = PFSTATE_NOSYNC;
3214#endif
3215 }
3216 pf_purge_expired_states();
3217 pf_status.states = 0;
3218#if 0 /* NPFSYNC */
3219/*
3220 * XXX This is called on module unload, we do not want to sync that over? */
3221 */
3222 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3223#endif
3224}
3225
3226static int
3227pf_clear_tables(void)
3228{
3229 struct pfioc_table io;
3230 int error;
3231
3232 bzero(&io, sizeof(io));
3233
3234 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
3235 io.pfrio_flags);
3236
3237 return (error);
3238}
3239
3240static void
3241pf_clear_srcnodes(void)
3242{
3243 struct pf_src_node *n;
3244 struct pf_state *state;
3245
3246 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
3247 state->src_node = NULL;
3248 state->nat_src_node = NULL;
3249 }
3250 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3251 n->expire = 1;
3252 n->states = 0;
3253 }
3254 pf_purge_expired_src_nodes();
3255 pf_status.src_nodes = 0;
3256}
3257/*
3258 * XXX - Check for version missmatch!!!
3259 */
3260
3261/*
3262 * Duplicate pfctl -Fa operation to get rid of as much as we can.
3263 */
3264static int
3265shutdown_pf(void)
3266{
3267 int error = 0;
3268 u_int32_t t[5];
3269 char nn = '\0';
3270
3271 callout_stop(&pf_expire_to);
3272
3273 pf_status.running = 0;
3274 do {
3275 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
3276 != 0) {
3277 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
3278 break;
3279 }
3280 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
3281 != 0) {
3282 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
3283 break; /* XXX: rollback? */
3284 }
3285 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
3286 != 0) {
3287 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
3288 break; /* XXX: rollback? */
3289 }
3290 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
3291 != 0) {
3292 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
3293 break; /* XXX: rollback? */
3294 }
3295 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
3296 != 0) {
3297 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
3298 break; /* XXX: rollback? */
3299 }
3300
3301 /* XXX: these should always succeed here */
3302 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
3303 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
3304 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
3305 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
3306 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
3307
3308 if ((error = pf_clear_tables()) != 0)
3309 break;
3310
3311#ifdef ALTQ
3312 if ((error = pf_begin_altq(&t[0])) != 0) {
3313 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
3314 break;
3315 }
3316 pf_commit_altq(t[0]);
3317#endif
3318
3319 pf_clear_states();
3320
3321 pf_clear_srcnodes();
3322
3323 /* status does not use malloced mem so no need to cleanup */
3324 /* fingerprints and interfaces have thier own cleanup code */
3325 } while(0);
3326
3327 return (error);
3328}
3329
3330static int
3331pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3332 struct inpcb *inp)
3333{
3334 /*
3335 * XXX Wed Jul 9 22:03:16 2003 UTC
3336 * OpenBSD has changed its byte ordering convention on ip_len/ip_off
3337 * in network stack. OpenBSD's network stack have converted
3338 * ip_len/ip_off to host byte order frist as FreeBSD.
3339 * Now this is not true anymore , so we should convert back to network
3340 * byte order.
3341 */
3342 struct ip *h = NULL;
3343 int chk;
3344
3345 if ((*m)->m_pkthdr.len >= (int)sizeof(struct ip)) {
3346 /* if m_pkthdr.len is less than ip header, pf will handle. */
3347 h = mtod(*m, struct ip *);
3348 HTONS(h->ip_len);
3349 HTONS(h->ip_off);
3350 }
3351 chk = pf_test(PF_IN, ifp, m, NULL, inp);
3352 if (chk && *m) {
3353 m_freem(*m);
3354 *m = NULL;
3355 }
3356 if (*m != NULL) {
3357 /* pf_test can change ip header location */
3358 h = mtod(*m, struct ip *);
3359 NTOHS(h->ip_len);
3360 NTOHS(h->ip_off);
3361 }
3362 return chk;
3363}
3364
3365static int
3366pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3367 struct inpcb *inp)
3368{
3369 /*
3370 * XXX Wed Jul 9 22:03:16 2003 UTC
3371 * OpenBSD has changed its byte ordering convention on ip_len/ip_off
3372 * in network stack. OpenBSD's network stack have converted
3373 * ip_len/ip_off to host byte order frist as FreeBSD.
3374 * Now this is not true anymore , so we should convert back to network
3375 * byte order.
3376 */
3377 struct ip *h = NULL;
3378 int chk;
3379
3380 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
3381 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3382 in_delayed_cksum(*m);
3383 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
3384 }
3385 if ((*m)->m_pkthdr.len >= (int)sizeof(*h)) {
3386 /* if m_pkthdr.len is less than ip header, pf will handle. */
3387 h = mtod(*m, struct ip *);
3388 HTONS(h->ip_len);
3389 HTONS(h->ip_off);
3390 }
3391 chk = pf_test(PF_OUT, ifp, m, NULL, inp);
3392 if (chk && *m) {
3393 m_freem(*m);
3394 *m = NULL;
3395 }
3396 if (*m != NULL) {
3397 /* pf_test can change ip header location */
3398 h = mtod(*m, struct ip *);
3399 NTOHS(h->ip_len);
3400 NTOHS(h->ip_off);
3401 }
3402 return chk;
3403}
3404
3405#ifdef INET6
3406static int
3407pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3408 struct inpcb *inp)
3409{
3410 /*
3411 * IPv6 does not affected ip_len/ip_off byte order changes.
3412 */
3413 int chk;
3414
3415 chk = pf_test6(PF_IN, ifp, m, NULL, inp);
3416 if (chk && *m) {
3417 m_freem(*m);
3418 *m = NULL;
3419 }
3420 return chk;
3421}
3422
3423static int
3424pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3425 struct inpcb *inp)
3426{
3427 /*
3428 * IPv6 does not affected ip_len/ip_off byte order changes.
3429 */
3430 int chk;
3431
3432 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
3433 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3434 in_delayed_cksum(*m);
3435 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
3436 }
3437 chk = pf_test6(PF_OUT, ifp, m, NULL, inp);
3438 if (chk && *m) {
3439 m_freem(*m);
3440 *m = NULL;
3441 }
3442 return chk;
3443}
3444#endif /* INET6 */
3445
3446static int
3447hook_pf(void)
3448{
3449 struct pfil_head *pfh_inet;
3450#ifdef INET6
3451 struct pfil_head *pfh_inet6;
3452#endif
3453
3454 PF_ASSERT(MA_NOTOWNED);
3455
3456 if (pf_pfil_hooked)
3457 return (0);
3458
3459 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3460 if (pfh_inet == NULL)
3461 return (ESRCH); /* XXX */
3462 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet);
3463 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet);
3464#ifdef INET6
3465 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3466 if (pfh_inet6 == NULL) {
3467 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3468 pfh_inet);
3469 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3470 pfh_inet);
3471 return (ESRCH); /* XXX */
3472 }
3473 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6);
3474 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6);
3475#endif
3476
3477 pf_pfil_hooked = 1;
3478 return (0);
3479}
3480
3481static int
3482dehook_pf(void)
3483{
3484 struct pfil_head *pfh_inet;
3485#ifdef INET6
3486 struct pfil_head *pfh_inet6;
3487#endif
3488
3489 PF_ASSERT(MA_NOTOWNED);
3490
3491 if (pf_pfil_hooked == 0)
3492 return (0);
3493
3494 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3495 if (pfh_inet == NULL)
3496 return (ESRCH); /* XXX */
3497 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3498 pfh_inet);
3499 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3500 pfh_inet);
3501#ifdef INET6
3502 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3503 if (pfh_inet6 == NULL)
3504 return (ESRCH); /* XXX */
3505 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK,
3506 pfh_inet6);
3507 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK,
3508 pfh_inet6);
3509#endif
3510
3511 pf_pfil_hooked = 0;
3512 return (0);
3513}
3514
3515static int
3516pf_load(void)
3517{
3518 init_zone_var();
3519 init_pf_mutex();
3520 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
3521 if (pfattach() < 0) {
3522 destroy_dev(pf_dev);
3523 destroy_pf_mutex();
3524 return (ENOMEM);
3525 }
3526 return (0);
3527}
3528
3529static int
3530pf_unload(void)
3531{
3532 int error = 0;
3533
3534 PF_LOCK();
3535 pf_status.running = 0;
3536 PF_UNLOCK();
3537 error = dehook_pf();
3538 if (error) {
3539 /*
3540 * Should not happen!
3541 * XXX Due to error code ESRCH, kldunload will show
3542 * a message like 'No such process'.
3543 */
3544 printf("%s : pfil unregisteration fail\n", __FUNCTION__);
3545 return error;
3546 }
3547 PF_LOCK();
3548 shutdown_pf();
3549 pfi_cleanup();
3550 pf_osfp_flush();
3551 pf_osfp_cleanup();
3552 cleanup_pf_zone();
3553 PF_UNLOCK();
3554 destroy_dev(pf_dev);
3555 destroy_pf_mutex();
3556 return error;
3557}
3558
3559static int
3560pf_modevent(module_t mod, int type, void *data)
3561{
3562 int error = 0;
3563
3564 switch(type) {
3565 case MOD_LOAD:
3566 error = pf_load();
3567 break;
3568
3569 case MOD_UNLOAD:
3570 error = pf_unload();
3571 break;
3572 default:
3573 error = EINVAL;
3574 break;
3575 }
3576 return error;
3577}
3578
3579static moduledata_t pf_mod = {
3580 "pf",
3581 pf_modevent,
3582 0
3583};
3584
3585DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST);
3586MODULE_VERSION(pf, PF_MODVER);
3587#endif /* __FreeBSD__ */
339 callout_reset(&pf_expire_to, my_timeout[PFTM_INTERVAL] * hz,
340 pf_purge_timeout, &pf_expire_to);
341
342 pf_normalize_init();
343 bzero(&pf_status, sizeof(pf_status));
344 pf_pfil_hooked = 0;
345
346 /* XXX do our best to avoid a conflict */
347 pf_status.hostid = arc4random();
348
349 return (error);
350}
351#else /* !__FreeBSD__ */
352void
353pfattach(int num)
354{
355 u_int32_t *timeout = pf_default_rule.timeout;
356
357 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
358 &pool_allocator_nointr);
359 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
360 "pfsrctrpl", NULL);
361 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
362 NULL);
363 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
364 &pool_allocator_nointr);
365 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
366 "pfpooladdrpl", &pool_allocator_nointr);
367 pfr_initialize();
368 pfi_initialize();
369 pf_osfp_initialize();
370
371 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
372 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
373
374 RB_INIT(&tree_src_tracking);
375 RB_INIT(&pf_anchors);
376 pf_init_ruleset(&pf_main_ruleset);
377 TAILQ_INIT(&pf_altqs[0]);
378 TAILQ_INIT(&pf_altqs[1]);
379 TAILQ_INIT(&pf_pabuf);
380 pf_altqs_active = &pf_altqs[0];
381 pf_altqs_inactive = &pf_altqs[1];
382 TAILQ_INIT(&state_updates);
383
384 /* default rule should never be garbage collected */
385 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
386 pf_default_rule.action = PF_PASS;
387 pf_default_rule.nr = -1;
388
389 /* initialize default timeouts */
390 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
391 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
392 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
393 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
394 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
395 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
396 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
397 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
398 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
399 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
400 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
401 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
402 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
403 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
404 timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
405 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
406 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
407 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
408
409 timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to);
410 timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz);
411
412 pf_normalize_init();
413 bzero(&pf_status, sizeof(pf_status));
414 pf_status.debug = PF_DEBUG_URGENT;
415
416 /* XXX do our best to avoid a conflict */
417 pf_status.hostid = arc4random();
418}
419
420int
421pfopen(struct cdev *dev, int flags, int fmt, struct proc *p)
422{
423 if (minor(dev) >= 1)
424 return (ENXIO);
425 return (0);
426}
427
428int
429pfclose(struct cdev *dev, int flags, int fmt, struct proc *p)
430{
431 if (minor(dev) >= 1)
432 return (ENXIO);
433 return (0);
434}
435#endif /* __FreeBSD__ */
436
437struct pf_pool *
438pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
439 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
440 u_int8_t check_ticket)
441{
442 struct pf_ruleset *ruleset;
443 struct pf_rule *rule;
444 int rs_num;
445
446 ruleset = pf_find_ruleset(anchor);
447 if (ruleset == NULL)
448 return (NULL);
449 rs_num = pf_get_ruleset_number(rule_action);
450 if (rs_num >= PF_RULESET_MAX)
451 return (NULL);
452 if (active) {
453 if (check_ticket && ticket !=
454 ruleset->rules[rs_num].active.ticket)
455 return (NULL);
456 if (r_last)
457 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
458 pf_rulequeue);
459 else
460 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
461 } else {
462 if (check_ticket && ticket !=
463 ruleset->rules[rs_num].inactive.ticket)
464 return (NULL);
465 if (r_last)
466 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
467 pf_rulequeue);
468 else
469 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
470 }
471 if (!r_last) {
472 while ((rule != NULL) && (rule->nr != rule_number))
473 rule = TAILQ_NEXT(rule, entries);
474 }
475 if (rule == NULL)
476 return (NULL);
477
478 return (&rule->rpool);
479}
480
481int
482pf_get_ruleset_number(u_int8_t action)
483{
484 switch (action) {
485 case PF_SCRUB:
486 case PF_NOSCRUB:
487 return (PF_RULESET_SCRUB);
488 break;
489 case PF_PASS:
490 case PF_DROP:
491 return (PF_RULESET_FILTER);
492 break;
493 case PF_NAT:
494 case PF_NONAT:
495 return (PF_RULESET_NAT);
496 break;
497 case PF_BINAT:
498 case PF_NOBINAT:
499 return (PF_RULESET_BINAT);
500 break;
501 case PF_RDR:
502 case PF_NORDR:
503 return (PF_RULESET_RDR);
504 break;
505 default:
506 return (PF_RULESET_MAX);
507 break;
508 }
509}
510
511void
512pf_init_ruleset(struct pf_ruleset *ruleset)
513{
514 int i;
515
516 memset(ruleset, 0, sizeof(struct pf_ruleset));
517 for (i = 0; i < PF_RULESET_MAX; i++) {
518 TAILQ_INIT(&ruleset->rules[i].queues[0]);
519 TAILQ_INIT(&ruleset->rules[i].queues[1]);
520 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0];
521 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1];
522 }
523}
524
525struct pf_anchor *
526pf_find_anchor(const char *path)
527{
528 static struct pf_anchor key;
529
530 memset(&key, 0, sizeof(key));
531 strlcpy(key.path, path, sizeof(key.path));
532 return (RB_FIND(pf_anchor_global, &pf_anchors, &key));
533}
534
535struct pf_ruleset *
536pf_find_ruleset(const char *path)
537{
538 struct pf_anchor *anchor;
539
540 while (*path == '/')
541 path++;
542 if (!*path)
543 return (&pf_main_ruleset);
544 anchor = pf_find_anchor(path);
545 if (anchor == NULL)
546 return (NULL);
547 else
548 return (&anchor->ruleset);
549}
550
551struct pf_ruleset *
552pf_find_or_create_ruleset(const char *path)
553{
554 static char p[MAXPATHLEN];
555 char *q = NULL, *r; /* make the compiler happy */
556 struct pf_ruleset *ruleset;
557 struct pf_anchor *anchor = NULL, *dup, *parent = NULL;
558
559 while (*path == '/')
560 path++;
561 ruleset = pf_find_ruleset(path);
562 if (ruleset != NULL)
563 return (ruleset);
564 strlcpy(p, path, sizeof(p));
565#ifdef __FreeBSD__
566 while (parent == NULL && (q = rindex(p, '/')) != NULL) {
567#else
568 while (parent == NULL && (q = strrchr(p, '/')) != NULL) {
569#endif
570 *q = 0;
571 if ((ruleset = pf_find_ruleset(p)) != NULL) {
572 parent = ruleset->anchor;
573 break;
574 }
575 }
576 if (q == NULL)
577 q = p;
578 else
579 q++;
580 strlcpy(p, path, sizeof(p));
581 if (!*q)
582 return (NULL);
583#ifdef __FreeBSD__
584 while ((r = index(q, '/')) != NULL || *q) {
585#else
586 while ((r = strchr(q, '/')) != NULL || *q) {
587#endif
588 if (r != NULL)
589 *r = 0;
590 if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE ||
591 (parent != NULL && strlen(parent->path) >=
592 MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1))
593 return (NULL);
594 anchor = (struct pf_anchor *)malloc(sizeof(*anchor), M_TEMP,
595 M_NOWAIT);
596 if (anchor == NULL)
597 return (NULL);
598 memset(anchor, 0, sizeof(*anchor));
599 RB_INIT(&anchor->children);
600 strlcpy(anchor->name, q, sizeof(anchor->name));
601 if (parent != NULL) {
602 strlcpy(anchor->path, parent->path,
603 sizeof(anchor->path));
604 strlcat(anchor->path, "/", sizeof(anchor->path));
605 }
606 strlcat(anchor->path, anchor->name, sizeof(anchor->path));
607 if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) !=
608 NULL) {
609 printf("pf_find_or_create_ruleset: RB_INSERT1 "
610 "'%s' '%s' collides with '%s' '%s'\n",
611 anchor->path, anchor->name, dup->path, dup->name);
612 free(anchor, M_TEMP);
613 return (NULL);
614 }
615 if (parent != NULL) {
616 anchor->parent = parent;
617 if ((dup = RB_INSERT(pf_anchor_node, &parent->children,
618 anchor)) != NULL) {
619 printf("pf_find_or_create_ruleset: "
620 "RB_INSERT2 '%s' '%s' collides with "
621 "'%s' '%s'\n", anchor->path, anchor->name,
622 dup->path, dup->name);
623 RB_REMOVE(pf_anchor_global, &pf_anchors,
624 anchor);
625 free(anchor, M_TEMP);
626 return (NULL);
627 }
628 }
629 pf_init_ruleset(&anchor->ruleset);
630 anchor->ruleset.anchor = anchor;
631 parent = anchor;
632 if (r != NULL)
633 q = r + 1;
634 else
635 *q = 0;
636 }
637 return (&anchor->ruleset);
638}
639
640void
641pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
642{
643 struct pf_anchor *parent;
644 int i;
645
646 while (ruleset != NULL) {
647 if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL ||
648 !RB_EMPTY(&ruleset->anchor->children) ||
649 ruleset->anchor->refcnt > 0 || ruleset->tables > 0 ||
650 ruleset->topen)
651 return;
652 for (i = 0; i < PF_RULESET_MAX; ++i)
653 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) ||
654 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
655 ruleset->rules[i].inactive.open)
656 return;
657 RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor);
658 if ((parent = ruleset->anchor->parent) != NULL)
659 RB_REMOVE(pf_anchor_node, &parent->children,
660 ruleset->anchor);
661 free(ruleset->anchor, M_TEMP);
662 if (parent == NULL)
663 return;
664 ruleset = &parent->ruleset;
665 }
666}
667
668int
669pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s,
670 const char *name)
671{
672 static char *p, path[MAXPATHLEN];
673 struct pf_ruleset *ruleset;
674
675 r->anchor = NULL;
676 r->anchor_relative = 0;
677 r->anchor_wildcard = 0;
678 if (!name[0])
679 return (0);
680 if (name[0] == '/')
681 strlcpy(path, name + 1, sizeof(path));
682 else {
683 /* relative path */
684 r->anchor_relative = 1;
685 if (s->anchor == NULL || !s->anchor->path[0])
686 path[0] = 0;
687 else
688 strlcpy(path, s->anchor->path, sizeof(path));
689 while (name[0] == '.' && name[1] == '.' && name[2] == '/') {
690 if (!path[0]) {
691 printf("pf_anchor_setup: .. beyond root\n");
692 return (1);
693 }
694#ifdef __FreeBSD__
695 if ((p = rindex(path, '/')) != NULL)
696#else
697 if ((p = strrchr(path, '/')) != NULL)
698#endif
699 *p = 0;
700 else
701 path[0] = 0;
702 r->anchor_relative++;
703 name += 3;
704 }
705 if (path[0])
706 strlcat(path, "/", sizeof(path));
707 strlcat(path, name, sizeof(path));
708 }
709#ifdef __FreeBSD__
710 if ((p = rindex(path, '/')) != NULL && !strcmp(p, "/*")) {
711#else
712 if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) {
713#endif
714 r->anchor_wildcard = 1;
715 *p = 0;
716 }
717 ruleset = pf_find_or_create_ruleset(path);
718 if (ruleset == NULL || ruleset->anchor == NULL) {
719 printf("pf_anchor_setup: ruleset\n");
720 return (1);
721 }
722 r->anchor = ruleset->anchor;
723 r->anchor->refcnt++;
724 return (0);
725}
726
727int
728pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r,
729 struct pfioc_rule *pr)
730{
731 pr->anchor_call[0] = 0;
732 if (r->anchor == NULL)
733 return (0);
734 if (!r->anchor_relative) {
735 strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call));
736 strlcat(pr->anchor_call, r->anchor->path,
737 sizeof(pr->anchor_call));
738 } else {
739 char a[MAXPATHLEN], b[MAXPATHLEN], *p;
740 int i;
741
742 if (rs->anchor == NULL)
743 a[0] = 0;
744 else
745 strlcpy(a, rs->anchor->path, sizeof(a));
746 strlcpy(b, r->anchor->path, sizeof(b));
747 for (i = 1; i < r->anchor_relative; ++i) {
748#ifdef __FreeBSD__
749 if ((p = rindex(a, '/')) == NULL)
750#else
751 if ((p = strrchr(a, '/')) == NULL)
752#endif
753 p = a;
754 *p = 0;
755 strlcat(pr->anchor_call, "../",
756 sizeof(pr->anchor_call));
757 }
758 if (strncmp(a, b, strlen(a))) {
759 printf("pf_anchor_copyout: '%s' '%s'\n", a, b);
760 return (1);
761 }
762 if (strlen(b) > strlen(a))
763 strlcat(pr->anchor_call, b + (a[0] ? strlen(a) + 1 : 0),
764 sizeof(pr->anchor_call));
765 }
766 if (r->anchor_wildcard)
767 strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*",
768 sizeof(pr->anchor_call));
769 return (0);
770}
771
772void
773pf_anchor_remove(struct pf_rule *r)
774{
775 if (r->anchor == NULL)
776 return;
777 if (r->anchor->refcnt <= 0) {
778 printf("pf_anchor_remove: broken refcount");
779 r->anchor = NULL;
780 return;
781 }
782 if (!--r->anchor->refcnt)
783 pf_remove_if_empty_ruleset(&r->anchor->ruleset);
784 r->anchor = NULL;
785}
786
787void
788pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
789{
790 struct pf_pooladdr *mv_pool_pa;
791
792 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
793 TAILQ_REMOVE(poola, mv_pool_pa, entries);
794 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
795 }
796}
797
798void
799pf_empty_pool(struct pf_palist *poola)
800{
801 struct pf_pooladdr *empty_pool_pa;
802
803 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
804 pfi_dynaddr_remove(&empty_pool_pa->addr);
805 pf_tbladdr_remove(&empty_pool_pa->addr);
806 pfi_detach_rule(empty_pool_pa->kif);
807 TAILQ_REMOVE(poola, empty_pool_pa, entries);
808 pool_put(&pf_pooladdr_pl, empty_pool_pa);
809 }
810}
811
812void
813pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
814{
815 if (rulequeue != NULL) {
816 if (rule->states <= 0) {
817 /*
818 * XXX - we need to remove the table *before* detaching
819 * the rule to make sure the table code does not delete
820 * the anchor under our feet.
821 */
822 pf_tbladdr_remove(&rule->src.addr);
823 pf_tbladdr_remove(&rule->dst.addr);
824 if (rule->overload_tbl)
825 pfr_detach_table(rule->overload_tbl);
826 }
827 TAILQ_REMOVE(rulequeue, rule, entries);
828 rule->entries.tqe_prev = NULL;
829 rule->nr = -1;
830 }
831
832 if (rule->states > 0 || rule->src_nodes > 0 ||
833 rule->entries.tqe_prev != NULL)
834 return;
835 pf_tag_unref(rule->tag);
836 pf_tag_unref(rule->match_tag);
837#ifdef ALTQ
838 if (rule->pqid != rule->qid)
839 pf_qid_unref(rule->pqid);
840 pf_qid_unref(rule->qid);
841#endif
842 pf_rtlabel_remove(&rule->src.addr);
843 pf_rtlabel_remove(&rule->dst.addr);
844 pfi_dynaddr_remove(&rule->src.addr);
845 pfi_dynaddr_remove(&rule->dst.addr);
846 if (rulequeue == NULL) {
847 pf_tbladdr_remove(&rule->src.addr);
848 pf_tbladdr_remove(&rule->dst.addr);
849 if (rule->overload_tbl)
850 pfr_detach_table(rule->overload_tbl);
851 }
852 pfi_detach_rule(rule->kif);
853 pf_anchor_remove(rule);
854 pf_empty_pool(&rule->rpool.list);
855 pool_put(&pf_rule_pl, rule);
856}
857
858static u_int16_t
859tagname2tag(struct pf_tags *head, char *tagname)
860{
861 struct pf_tagname *tag, *p = NULL;
862 u_int16_t new_tagid = 1;
863
864 TAILQ_FOREACH(tag, head, entries)
865 if (strcmp(tagname, tag->name) == 0) {
866 tag->ref++;
867 return (tag->tag);
868 }
869
870 /*
871 * to avoid fragmentation, we do a linear search from the beginning
872 * and take the first free slot we find. if there is none or the list
873 * is empty, append a new entry at the end.
874 */
875
876 /* new entry */
877 if (!TAILQ_EMPTY(head))
878 for (p = TAILQ_FIRST(head); p != NULL &&
879 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
880 new_tagid = p->tag + 1;
881
882 if (new_tagid > TAGID_MAX)
883 return (0);
884
885 /* allocate and fill new struct pf_tagname */
886 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
887 M_TEMP, M_NOWAIT);
888 if (tag == NULL)
889 return (0);
890 bzero(tag, sizeof(struct pf_tagname));
891 strlcpy(tag->name, tagname, sizeof(tag->name));
892 tag->tag = new_tagid;
893 tag->ref++;
894
895 if (p != NULL) /* insert new entry before p */
896 TAILQ_INSERT_BEFORE(p, tag, entries);
897 else /* either list empty or no free slot in between */
898 TAILQ_INSERT_TAIL(head, tag, entries);
899
900 return (tag->tag);
901}
902
903static void
904tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
905{
906 struct pf_tagname *tag;
907
908 TAILQ_FOREACH(tag, head, entries)
909 if (tag->tag == tagid) {
910 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
911 return;
912 }
913}
914
915static void
916tag_unref(struct pf_tags *head, u_int16_t tag)
917{
918 struct pf_tagname *p, *next;
919
920 if (tag == 0)
921 return;
922
923 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
924 next = TAILQ_NEXT(p, entries);
925 if (tag == p->tag) {
926 if (--p->ref == 0) {
927 TAILQ_REMOVE(head, p, entries);
928 free(p, M_TEMP);
929 }
930 break;
931 }
932 }
933}
934
935u_int16_t
936pf_tagname2tag(char *tagname)
937{
938 return (tagname2tag(&pf_tags, tagname));
939}
940
941void
942pf_tag2tagname(u_int16_t tagid, char *p)
943{
944 return (tag2tagname(&pf_tags, tagid, p));
945}
946
947void
948pf_tag_ref(u_int16_t tag)
949{
950 struct pf_tagname *t;
951
952 TAILQ_FOREACH(t, &pf_tags, entries)
953 if (t->tag == tag)
954 break;
955 if (t != NULL)
956 t->ref++;
957}
958
959void
960pf_tag_unref(u_int16_t tag)
961{
962 return (tag_unref(&pf_tags, tag));
963}
964
965int
966pf_rtlabel_add(struct pf_addr_wrap *a)
967{
968#ifdef __FreeBSD__
969 /* XXX_IMPORT: later */
970 return (0);
971#else
972 if (a->type == PF_ADDR_RTLABEL &&
973 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
974 return (-1);
975 return (0);
976#endif
977}
978
979void
980pf_rtlabel_remove(struct pf_addr_wrap *a)
981{
982#ifdef __FreeBSD__
983 /* XXX_IMPORT: later */
984#else
985 if (a->type == PF_ADDR_RTLABEL)
986 rtlabel_unref(a->v.rtlabel);
987#endif
988}
989
990void
991pf_rtlabel_copyout(struct pf_addr_wrap *a)
992{
993#ifdef __FreeBSD__
994 /* XXX_IMPORT: later */
995 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel)
996 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname));
997#else
998 const char *name;
999
1000 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
1001 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
1002 strlcpy(a->v.rtlabelname, "?",
1003 sizeof(a->v.rtlabelname));
1004 else
1005 strlcpy(a->v.rtlabelname, name,
1006 sizeof(a->v.rtlabelname));
1007 }
1008#endif
1009}
1010
1011#ifdef ALTQ
1012u_int32_t
1013pf_qname2qid(char *qname)
1014{
1015 return ((u_int32_t)tagname2tag(&pf_qids, qname));
1016}
1017
1018void
1019pf_qid2qname(u_int32_t qid, char *p)
1020{
1021 return (tag2tagname(&pf_qids, (u_int16_t)qid, p));
1022}
1023
1024void
1025pf_qid_unref(u_int32_t qid)
1026{
1027 return (tag_unref(&pf_qids, (u_int16_t)qid));
1028}
1029
1030int
1031pf_begin_altq(u_int32_t *ticket)
1032{
1033 struct pf_altq *altq;
1034 int error = 0;
1035
1036 /* Purge the old altq list */
1037 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
1038 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
1039 if (altq->qname[0] == 0) {
1040 /* detach and destroy the discipline */
1041 error = altq_remove(altq);
1042 } else
1043 pf_qid_unref(altq->qid);
1044 pool_put(&pf_altq_pl, altq);
1045 }
1046 if (error)
1047 return (error);
1048 *ticket = ++ticket_altqs_inactive;
1049 altqs_inactive_open = 1;
1050 return (0);
1051}
1052
1053int
1054pf_rollback_altq(u_int32_t ticket)
1055{
1056 struct pf_altq *altq;
1057 int error = 0;
1058
1059 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
1060 return (0);
1061 /* Purge the old altq list */
1062 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
1063 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
1064 if (altq->qname[0] == 0) {
1065 /* detach and destroy the discipline */
1066 error = altq_remove(altq);
1067 } else
1068 pf_qid_unref(altq->qid);
1069 pool_put(&pf_altq_pl, altq);
1070 }
1071 altqs_inactive_open = 0;
1072 return (error);
1073}
1074
1075int
1076pf_commit_altq(u_int32_t ticket)
1077{
1078 struct pf_altqqueue *old_altqs;
1079 struct pf_altq *altq;
1080 int s, err, error = 0;
1081
1082 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
1083 return (EBUSY);
1084
1085 /* swap altqs, keep the old. */
1086 s = splsoftnet();
1087 old_altqs = pf_altqs_active;
1088 pf_altqs_active = pf_altqs_inactive;
1089 pf_altqs_inactive = old_altqs;
1090 ticket_altqs_active = ticket_altqs_inactive;
1091
1092 /* Attach new disciplines */
1093 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1094 if (altq->qname[0] == 0) {
1095 /* attach the discipline */
1096 error = altq_pfattach(altq);
1097 if (error == 0 && pf_altq_running)
1098 error = pf_enable_altq(altq);
1099 if (error != 0) {
1100 splx(s);
1101 return (error);
1102 }
1103 }
1104 }
1105
1106 /* Purge the old altq list */
1107 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
1108 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
1109 if (altq->qname[0] == 0) {
1110 /* detach and destroy the discipline */
1111 if (pf_altq_running)
1112 error = pf_disable_altq(altq);
1113 err = altq_pfdetach(altq);
1114 if (err != 0 && error == 0)
1115 error = err;
1116 err = altq_remove(altq);
1117 if (err != 0 && error == 0)
1118 error = err;
1119 } else
1120 pf_qid_unref(altq->qid);
1121 pool_put(&pf_altq_pl, altq);
1122 }
1123 splx(s);
1124
1125 altqs_inactive_open = 0;
1126 return (error);
1127}
1128
1129int
1130pf_enable_altq(struct pf_altq *altq)
1131{
1132 struct ifnet *ifp;
1133 struct tb_profile tb;
1134 int s, error = 0;
1135
1136 if ((ifp = ifunit(altq->ifname)) == NULL)
1137 return (EINVAL);
1138
1139 if (ifp->if_snd.altq_type != ALTQT_NONE)
1140 error = altq_enable(&ifp->if_snd);
1141
1142 /* set tokenbucket regulator */
1143 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1144 tb.rate = altq->ifbandwidth;
1145 tb.depth = altq->tbrsize;
1146 s = splimp();
1147#ifdef __FreeBSD__
1148 PF_UNLOCK();
1149#endif
1150 error = tbr_set(&ifp->if_snd, &tb);
1151#ifdef __FreeBSD__
1152 PF_LOCK();
1153#endif
1154 splx(s);
1155 }
1156
1157 return (error);
1158}
1159
1160int
1161pf_disable_altq(struct pf_altq *altq)
1162{
1163 struct ifnet *ifp;
1164 struct tb_profile tb;
1165 int s, error;
1166
1167 if ((ifp = ifunit(altq->ifname)) == NULL)
1168 return (EINVAL);
1169
1170 /*
1171 * when the discipline is no longer referenced, it was overridden
1172 * by a new one. if so, just return.
1173 */
1174 if (altq->altq_disc != ifp->if_snd.altq_disc)
1175 return (0);
1176
1177 error = altq_disable(&ifp->if_snd);
1178
1179 if (error == 0) {
1180 /* clear tokenbucket regulator */
1181 tb.rate = 0;
1182 s = splimp();
1183#ifdef __FreeBSD__
1184 PF_UNLOCK();
1185#endif
1186 error = tbr_set(&ifp->if_snd, &tb);
1187#ifdef __FreeBSD__
1188 PF_LOCK();
1189#endif
1190 splx(s);
1191 }
1192
1193 return (error);
1194}
1195#endif /* ALTQ */
1196
1197int
1198pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1199{
1200 struct pf_ruleset *rs;
1201 struct pf_rule *rule;
1202
1203 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1204 return (EINVAL);
1205 rs = pf_find_or_create_ruleset(anchor);
1206 if (rs == NULL)
1207 return (EINVAL);
1208 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1209 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1210 *ticket = ++rs->rules[rs_num].inactive.ticket;
1211 rs->rules[rs_num].inactive.open = 1;
1212 return (0);
1213}
1214
1215int
1216pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1217{
1218 struct pf_ruleset *rs;
1219 struct pf_rule *rule;
1220
1221 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1222 return (EINVAL);
1223 rs = pf_find_ruleset(anchor);
1224 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1225 rs->rules[rs_num].inactive.ticket != ticket)
1226 return (0);
1227 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1228 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1229 rs->rules[rs_num].inactive.open = 0;
1230 return (0);
1231}
1232
1233int
1234pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1235{
1236 struct pf_ruleset *rs;
1237 struct pf_rule *rule;
1238 struct pf_rulequeue *old_rules;
1239 int s;
1240
1241 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1242 return (EINVAL);
1243 rs = pf_find_ruleset(anchor);
1244 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1245 ticket != rs->rules[rs_num].inactive.ticket)
1246 return (EBUSY);
1247
1248 /* Swap rules, keep the old. */
1249 s = splsoftnet();
1250 old_rules = rs->rules[rs_num].active.ptr;
1251 rs->rules[rs_num].active.ptr =
1252 rs->rules[rs_num].inactive.ptr;
1253 rs->rules[rs_num].inactive.ptr = old_rules;
1254 rs->rules[rs_num].active.ticket =
1255 rs->rules[rs_num].inactive.ticket;
1256 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1257
1258 /* Purge the old rule list. */
1259 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1260 pf_rm_rule(old_rules, rule);
1261 rs->rules[rs_num].inactive.open = 0;
1262 pf_remove_if_empty_ruleset(rs);
1263 splx(s);
1264 return (0);
1265}
1266
1267#ifdef __FreeBSD__
1268int
1269pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1270#else
1271int
1272pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1273#endif
1274{
1275 struct pf_pooladdr *pa = NULL;
1276 struct pf_pool *pool = NULL;
1277#ifndef __FreeBSD__
1278 int s;
1279#endif
1280 int error = 0;
1281
1282 /* XXX keep in sync with switch() below */
1283#ifdef __FreeBSD__
1284 if (securelevel_gt(td->td_ucred, 2))
1285#else
1286 if (securelevel > 1)
1287#endif
1288 switch (cmd) {
1289 case DIOCGETRULES:
1290 case DIOCGETRULE:
1291 case DIOCGETADDRS:
1292 case DIOCGETADDR:
1293 case DIOCGETSTATE:
1294 case DIOCSETSTATUSIF:
1295 case DIOCGETSTATUS:
1296 case DIOCCLRSTATUS:
1297 case DIOCNATLOOK:
1298 case DIOCSETDEBUG:
1299 case DIOCGETSTATES:
1300 case DIOCGETTIMEOUT:
1301 case DIOCCLRRULECTRS:
1302 case DIOCGETLIMIT:
1303 case DIOCGETALTQS:
1304 case DIOCGETALTQ:
1305 case DIOCGETQSTATS:
1306 case DIOCGETRULESETS:
1307 case DIOCGETRULESET:
1308 case DIOCRGETTABLES:
1309 case DIOCRGETTSTATS:
1310 case DIOCRCLRTSTATS:
1311 case DIOCRCLRADDRS:
1312 case DIOCRADDADDRS:
1313 case DIOCRDELADDRS:
1314 case DIOCRSETADDRS:
1315 case DIOCRGETADDRS:
1316 case DIOCRGETASTATS:
1317 case DIOCRCLRASTATS:
1318 case DIOCRTSTADDRS:
1319 case DIOCOSFPGET:
1320 case DIOCGETSRCNODES:
1321 case DIOCCLRSRCNODES:
1322 case DIOCIGETIFACES:
1323 case DIOCICLRISTATS:
1324#ifdef __FreeBSD__
1325 case DIOCGIFSPEED:
1326#endif
1327 case DIOCSETIFFLAG:
1328 case DIOCCLRIFFLAG:
1329 break;
1330 case DIOCRCLRTABLES:
1331 case DIOCRADDTABLES:
1332 case DIOCRDELTABLES:
1333 case DIOCRSETTFLAGS:
1334 if (((struct pfioc_table *)addr)->pfrio_flags &
1335 PFR_FLAG_DUMMY)
1336 break; /* dummy operation ok */
1337 return (EPERM);
1338 default:
1339 return (EPERM);
1340 }
1341
1342 if (!(flags & FWRITE))
1343 switch (cmd) {
1344 case DIOCGETRULES:
1345 case DIOCGETRULE:
1346 case DIOCGETADDRS:
1347 case DIOCGETADDR:
1348 case DIOCGETSTATE:
1349 case DIOCGETSTATUS:
1350 case DIOCGETSTATES:
1351 case DIOCGETTIMEOUT:
1352 case DIOCGETLIMIT:
1353 case DIOCGETALTQS:
1354 case DIOCGETALTQ:
1355 case DIOCGETQSTATS:
1356 case DIOCGETRULESETS:
1357 case DIOCGETRULESET:
1358 case DIOCRGETTABLES:
1359 case DIOCRGETTSTATS:
1360 case DIOCRGETADDRS:
1361 case DIOCRGETASTATS:
1362 case DIOCRTSTADDRS:
1363 case DIOCOSFPGET:
1364 case DIOCGETSRCNODES:
1365 case DIOCIGETIFACES:
1366#ifdef __FreeBSD__
1367 case DIOCGIFSPEED:
1368#endif
1369 break;
1370 case DIOCRCLRTABLES:
1371 case DIOCRADDTABLES:
1372 case DIOCRDELTABLES:
1373 case DIOCRCLRTSTATS:
1374 case DIOCRCLRADDRS:
1375 case DIOCRADDADDRS:
1376 case DIOCRDELADDRS:
1377 case DIOCRSETADDRS:
1378 case DIOCRSETTFLAGS:
1379 if (((struct pfioc_table *)addr)->pfrio_flags &
1380 PFR_FLAG_DUMMY)
1381 break; /* dummy operation ok */
1382 return (EACCES);
1383 default:
1384 return (EACCES);
1385 }
1386
1387#ifdef __FreeBSD__
1388 PF_LOCK();
1389#else
1390 s = splsoftnet();
1391#endif
1392 switch (cmd) {
1393
1394 case DIOCSTART:
1395 if (pf_status.running)
1396 error = EEXIST;
1397 else {
1398#ifdef __FreeBSD__
1399 PF_UNLOCK();
1400 error = hook_pf();
1401 PF_LOCK();
1402 if (error) {
1403 DPFPRINTF(PF_DEBUG_MISC,
1404 ("pf: pfil registeration fail\n"));
1405 break;
1406 }
1407#endif
1408 pf_status.running = 1;
1409 pf_status.since = time_second;
1410 if (pf_status.stateid == 0) {
1411 pf_status.stateid = time_second;
1412 pf_status.stateid = pf_status.stateid << 32;
1413 }
1414 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1415 }
1416 break;
1417
1418 case DIOCSTOP:
1419 if (!pf_status.running)
1420 error = ENOENT;
1421 else {
1422 pf_status.running = 0;
1423#ifdef __FreeBSD__
1424 PF_UNLOCK();
1425 error = dehook_pf();
1426 PF_LOCK();
1427 if (error) {
1428 pf_status.running = 1;
1429 DPFPRINTF(PF_DEBUG_MISC,
1430 ("pf: pfil unregisteration failed\n"));
1431 }
1432#endif
1433 pf_status.since = time_second;
1434 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1435 }
1436 break;
1437
1438 case DIOCADDRULE: {
1439 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1440 struct pf_ruleset *ruleset;
1441 struct pf_rule *rule, *tail;
1442 struct pf_pooladdr *pa;
1443 int rs_num;
1444
1445 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1446 ruleset = pf_find_ruleset(pr->anchor);
1447 if (ruleset == NULL) {
1448 error = EINVAL;
1449 break;
1450 }
1451 rs_num = pf_get_ruleset_number(pr->rule.action);
1452 if (rs_num >= PF_RULESET_MAX) {
1453 error = EINVAL;
1454 break;
1455 }
1456 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1457 error = EINVAL;
1458 break;
1459 }
1460 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1461 printf("ticket: %d != [%d]%d\n", pr->ticket,
1462 rs_num, ruleset->rules[rs_num].inactive.ticket);
1463 error = EBUSY;
1464 break;
1465 }
1466 if (pr->pool_ticket != ticket_pabuf) {
1467 printf("pool_ticket: %d != %d\n", pr->pool_ticket,
1468 ticket_pabuf);
1469 error = EBUSY;
1470 break;
1471 }
1472 rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1473 if (rule == NULL) {
1474 error = ENOMEM;
1475 break;
1476 }
1477 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1478 rule->anchor = NULL;
1479 rule->kif = NULL;
1480 TAILQ_INIT(&rule->rpool.list);
1481 /* initialize refcounting */
1482 rule->states = 0;
1483 rule->src_nodes = 0;
1484 rule->entries.tqe_prev = NULL;
1485#ifndef INET
1486 if (rule->af == AF_INET) {
1487 pool_put(&pf_rule_pl, rule);
1488 error = EAFNOSUPPORT;
1489 break;
1490 }
1491#endif /* INET */
1492#ifndef INET6
1493 if (rule->af == AF_INET6) {
1494 pool_put(&pf_rule_pl, rule);
1495 error = EAFNOSUPPORT;
1496 break;
1497 }
1498#endif /* INET6 */
1499 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1500 pf_rulequeue);
1501 if (tail)
1502 rule->nr = tail->nr + 1;
1503 else
1504 rule->nr = 0;
1505 if (rule->ifname[0]) {
1506 rule->kif = pfi_attach_rule(rule->ifname);
1507 if (rule->kif == NULL) {
1508 pool_put(&pf_rule_pl, rule);
1509 error = EINVAL;
1510 break;
1511 }
1512 }
1513
1514#ifdef ALTQ
1515 /* set queue IDs */
1516 if (rule->qname[0] != 0) {
1517 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1518 error = EBUSY;
1519 else if (rule->pqname[0] != 0) {
1520 if ((rule->pqid =
1521 pf_qname2qid(rule->pqname)) == 0)
1522 error = EBUSY;
1523 } else
1524 rule->pqid = rule->qid;
1525 }
1526#endif
1527 if (rule->tagname[0])
1528 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1529 error = EBUSY;
1530 if (rule->match_tagname[0])
1531 if ((rule->match_tag =
1532 pf_tagname2tag(rule->match_tagname)) == 0)
1533 error = EBUSY;
1534 if (rule->rt && !rule->direction)
1535 error = EINVAL;
1536 if (pf_rtlabel_add(&rule->src.addr) ||
1537 pf_rtlabel_add(&rule->dst.addr))
1538 error = EBUSY;
1539 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1540 error = EINVAL;
1541 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1542 error = EINVAL;
1543 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1544 error = EINVAL;
1545 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1546 error = EINVAL;
1547 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1548 error = EINVAL;
1549 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1550 if (pf_tbladdr_setup(ruleset, &pa->addr))
1551 error = EINVAL;
1552
1553 if (rule->overload_tblname[0]) {
1554 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1555 rule->overload_tblname)) == NULL)
1556 error = EINVAL;
1557 else
1558 rule->overload_tbl->pfrkt_flags |=
1559 PFR_TFLAG_ACTIVE;
1560 }
1561
1562 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1563 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1564 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1565 (rule->rt > PF_FASTROUTE)) &&
1566 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1567 error = EINVAL;
1568
1569 if (error) {
1570 pf_rm_rule(NULL, rule);
1571 break;
1572 }
1573 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1574 rule->evaluations = rule->packets = rule->bytes = 0;
1575 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1576 rule, entries);
1577 break;
1578 }
1579
1580 case DIOCGETRULES: {
1581 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1582 struct pf_ruleset *ruleset;
1583 struct pf_rule *tail;
1584 int rs_num;
1585
1586 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1587 ruleset = pf_find_ruleset(pr->anchor);
1588 if (ruleset == NULL) {
1589 error = EINVAL;
1590 break;
1591 }
1592 rs_num = pf_get_ruleset_number(pr->rule.action);
1593 if (rs_num >= PF_RULESET_MAX) {
1594 error = EINVAL;
1595 break;
1596 }
1597 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1598 pf_rulequeue);
1599 if (tail)
1600 pr->nr = tail->nr + 1;
1601 else
1602 pr->nr = 0;
1603 pr->ticket = ruleset->rules[rs_num].active.ticket;
1604 break;
1605 }
1606
1607 case DIOCGETRULE: {
1608 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1609 struct pf_ruleset *ruleset;
1610 struct pf_rule *rule;
1611 int rs_num, i;
1612
1613 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1614 ruleset = pf_find_ruleset(pr->anchor);
1615 if (ruleset == NULL) {
1616 error = EINVAL;
1617 break;
1618 }
1619 rs_num = pf_get_ruleset_number(pr->rule.action);
1620 if (rs_num >= PF_RULESET_MAX) {
1621 error = EINVAL;
1622 break;
1623 }
1624 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1625 error = EBUSY;
1626 break;
1627 }
1628 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1629 while ((rule != NULL) && (rule->nr != pr->nr))
1630 rule = TAILQ_NEXT(rule, entries);
1631 if (rule == NULL) {
1632 error = EBUSY;
1633 break;
1634 }
1635 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1636 if (pf_anchor_copyout(ruleset, rule, pr)) {
1637 error = EBUSY;
1638 break;
1639 }
1640 pfi_dynaddr_copyout(&pr->rule.src.addr);
1641 pfi_dynaddr_copyout(&pr->rule.dst.addr);
1642 pf_tbladdr_copyout(&pr->rule.src.addr);
1643 pf_tbladdr_copyout(&pr->rule.dst.addr);
1644 pf_rtlabel_copyout(&pr->rule.src.addr);
1645 pf_rtlabel_copyout(&pr->rule.dst.addr);
1646 for (i = 0; i < PF_SKIP_COUNT; ++i)
1647 if (rule->skip[i].ptr == NULL)
1648 pr->rule.skip[i].nr = -1;
1649 else
1650 pr->rule.skip[i].nr =
1651 rule->skip[i].ptr->nr;
1652 break;
1653 }
1654
1655 case DIOCCHANGERULE: {
1656 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1657 struct pf_ruleset *ruleset;
1658 struct pf_rule *oldrule = NULL, *newrule = NULL;
1659 u_int32_t nr = 0;
1660 int rs_num;
1661
1662 if (!(pcr->action == PF_CHANGE_REMOVE ||
1663 pcr->action == PF_CHANGE_GET_TICKET) &&
1664 pcr->pool_ticket != ticket_pabuf) {
1665 error = EBUSY;
1666 break;
1667 }
1668
1669 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1670 pcr->action > PF_CHANGE_GET_TICKET) {
1671 error = EINVAL;
1672 break;
1673 }
1674 ruleset = pf_find_ruleset(pcr->anchor);
1675 if (ruleset == NULL) {
1676 error = EINVAL;
1677 break;
1678 }
1679 rs_num = pf_get_ruleset_number(pcr->rule.action);
1680 if (rs_num >= PF_RULESET_MAX) {
1681 error = EINVAL;
1682 break;
1683 }
1684
1685 if (pcr->action == PF_CHANGE_GET_TICKET) {
1686 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1687 break;
1688 } else {
1689 if (pcr->ticket !=
1690 ruleset->rules[rs_num].active.ticket) {
1691 error = EINVAL;
1692 break;
1693 }
1694 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1695 error = EINVAL;
1696 break;
1697 }
1698 }
1699
1700 if (pcr->action != PF_CHANGE_REMOVE) {
1701 newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1702 if (newrule == NULL) {
1703 error = ENOMEM;
1704 break;
1705 }
1706 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1707 TAILQ_INIT(&newrule->rpool.list);
1708 /* initialize refcounting */
1709 newrule->states = 0;
1710 newrule->entries.tqe_prev = NULL;
1711#ifndef INET
1712 if (newrule->af == AF_INET) {
1713 pool_put(&pf_rule_pl, newrule);
1714 error = EAFNOSUPPORT;
1715 break;
1716 }
1717#endif /* INET */
1718#ifndef INET6
1719 if (newrule->af == AF_INET6) {
1720 pool_put(&pf_rule_pl, newrule);
1721 error = EAFNOSUPPORT;
1722 break;
1723 }
1724#endif /* INET6 */
1725 if (newrule->ifname[0]) {
1726 newrule->kif = pfi_attach_rule(newrule->ifname);
1727 if (newrule->kif == NULL) {
1728 pool_put(&pf_rule_pl, newrule);
1729 error = EINVAL;
1730 break;
1731 }
1732 } else
1733 newrule->kif = NULL;
1734
1735#ifdef ALTQ
1736 /* set queue IDs */
1737 if (newrule->qname[0] != 0) {
1738 if ((newrule->qid =
1739 pf_qname2qid(newrule->qname)) == 0)
1740 error = EBUSY;
1741 else if (newrule->pqname[0] != 0) {
1742 if ((newrule->pqid =
1743 pf_qname2qid(newrule->pqname)) == 0)
1744 error = EBUSY;
1745 } else
1746 newrule->pqid = newrule->qid;
1747 }
1748#endif /* ALTQ */
1749 if (newrule->tagname[0])
1750 if ((newrule->tag =
1751 pf_tagname2tag(newrule->tagname)) == 0)
1752 error = EBUSY;
1753 if (newrule->match_tagname[0])
1754 if ((newrule->match_tag = pf_tagname2tag(
1755 newrule->match_tagname)) == 0)
1756 error = EBUSY;
1757 if (newrule->rt && !newrule->direction)
1758 error = EINVAL;
1759 if (pf_rtlabel_add(&newrule->src.addr) ||
1760 pf_rtlabel_add(&newrule->dst.addr))
1761 error = EBUSY;
1762 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1763 error = EINVAL;
1764 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1765 error = EINVAL;
1766 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1767 error = EINVAL;
1768 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1769 error = EINVAL;
1770 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1771 error = EINVAL;
1772
1773 if (newrule->overload_tblname[0]) {
1774 if ((newrule->overload_tbl = pfr_attach_table(
1775 ruleset, newrule->overload_tblname)) ==
1776 NULL)
1777 error = EINVAL;
1778 else
1779 newrule->overload_tbl->pfrkt_flags |=
1780 PFR_TFLAG_ACTIVE;
1781 }
1782
1783 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1784 if (((((newrule->action == PF_NAT) ||
1785 (newrule->action == PF_RDR) ||
1786 (newrule->action == PF_BINAT) ||
1787 (newrule->rt > PF_FASTROUTE)) &&
1788 !pcr->anchor[0])) &&
1789 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1790 error = EINVAL;
1791
1792 if (error) {
1793 pf_rm_rule(NULL, newrule);
1794 break;
1795 }
1796 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1797 newrule->evaluations = newrule->packets = 0;
1798 newrule->bytes = 0;
1799 }
1800 pf_empty_pool(&pf_pabuf);
1801
1802 if (pcr->action == PF_CHANGE_ADD_HEAD)
1803 oldrule = TAILQ_FIRST(
1804 ruleset->rules[rs_num].active.ptr);
1805 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1806 oldrule = TAILQ_LAST(
1807 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1808 else {
1809 oldrule = TAILQ_FIRST(
1810 ruleset->rules[rs_num].active.ptr);
1811 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1812 oldrule = TAILQ_NEXT(oldrule, entries);
1813 if (oldrule == NULL) {
1814 if (newrule != NULL)
1815 pf_rm_rule(NULL, newrule);
1816 error = EINVAL;
1817 break;
1818 }
1819 }
1820
1821 if (pcr->action == PF_CHANGE_REMOVE)
1822 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1823 else {
1824 if (oldrule == NULL)
1825 TAILQ_INSERT_TAIL(
1826 ruleset->rules[rs_num].active.ptr,
1827 newrule, entries);
1828 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1829 pcr->action == PF_CHANGE_ADD_BEFORE)
1830 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1831 else
1832 TAILQ_INSERT_AFTER(
1833 ruleset->rules[rs_num].active.ptr,
1834 oldrule, newrule, entries);
1835 }
1836
1837 nr = 0;
1838 TAILQ_FOREACH(oldrule,
1839 ruleset->rules[rs_num].active.ptr, entries)
1840 oldrule->nr = nr++;
1841
1842 ruleset->rules[rs_num].active.ticket++;
1843
1844 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1845 pf_remove_if_empty_ruleset(ruleset);
1846
1847 break;
1848 }
1849
1850 case DIOCCLRSTATES: {
1851 struct pf_state *state;
1852 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1853 int killed = 0;
1854
1855 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1856 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1857 state->u.s.kif->pfik_name)) {
1858 state->timeout = PFTM_PURGE;
1859#if NPFSYNC
1860 /* don't send out individual delete messages */
1861 state->sync_flags = PFSTATE_NOSYNC;
1862#endif
1863 killed++;
1864 }
1865 }
1866 pf_purge_expired_states();
1867 pf_status.states = 0;
1868 psk->psk_af = killed;
1869#if NPFSYNC
1870 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1871#endif
1872 break;
1873 }
1874
1875 case DIOCKILLSTATES: {
1876 struct pf_state *state;
1877 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1878 int killed = 0;
1879
1880 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1881 if ((!psk->psk_af || state->af == psk->psk_af)
1882 && (!psk->psk_proto || psk->psk_proto ==
1883 state->proto) &&
1884 PF_MATCHA(psk->psk_src.neg,
1885 &psk->psk_src.addr.v.a.addr,
1886 &psk->psk_src.addr.v.a.mask,
1887 &state->lan.addr, state->af) &&
1888 PF_MATCHA(psk->psk_dst.neg,
1889 &psk->psk_dst.addr.v.a.addr,
1890 &psk->psk_dst.addr.v.a.mask,
1891 &state->ext.addr, state->af) &&
1892 (psk->psk_src.port_op == 0 ||
1893 pf_match_port(psk->psk_src.port_op,
1894 psk->psk_src.port[0], psk->psk_src.port[1],
1895 state->lan.port)) &&
1896 (psk->psk_dst.port_op == 0 ||
1897 pf_match_port(psk->psk_dst.port_op,
1898 psk->psk_dst.port[0], psk->psk_dst.port[1],
1899 state->ext.port)) &&
1900 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1901 state->u.s.kif->pfik_name))) {
1902 state->timeout = PFTM_PURGE;
1903 killed++;
1904 }
1905 }
1906 pf_purge_expired_states();
1907 psk->psk_af = killed;
1908 break;
1909 }
1910
1911 case DIOCADDSTATE: {
1912 struct pfioc_state *ps = (struct pfioc_state *)addr;
1913 struct pf_state *state;
1914 struct pfi_kif *kif;
1915
1916 if (ps->state.timeout >= PFTM_MAX &&
1917 ps->state.timeout != PFTM_UNTIL_PACKET) {
1918 error = EINVAL;
1919 break;
1920 }
1921 state = pool_get(&pf_state_pl, PR_NOWAIT);
1922 if (state == NULL) {
1923 error = ENOMEM;
1924 break;
1925 }
1926 kif = pfi_lookup_create(ps->state.u.ifname);
1927 if (kif == NULL) {
1928 pool_put(&pf_state_pl, state);
1929 error = ENOENT;
1930 break;
1931 }
1932 bcopy(&ps->state, state, sizeof(struct pf_state));
1933 bzero(&state->u, sizeof(state->u));
1934 state->rule.ptr = &pf_default_rule;
1935 state->nat_rule.ptr = NULL;
1936 state->anchor.ptr = NULL;
1937 state->rt_kif = NULL;
1938 state->creation = time_second;
1939 state->pfsync_time = 0;
1940 state->packets[0] = state->packets[1] = 0;
1941 state->bytes[0] = state->bytes[1] = 0;
1942
1943 if (pf_insert_state(kif, state)) {
1944 pfi_maybe_destroy(kif);
1945 pool_put(&pf_state_pl, state);
1946 error = ENOMEM;
1947 }
1948 break;
1949 }
1950
1951 case DIOCGETSTATE: {
1952 struct pfioc_state *ps = (struct pfioc_state *)addr;
1953 struct pf_state *state;
1954 u_int32_t nr;
1955
1956 nr = 0;
1957 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1958 if (nr >= ps->nr)
1959 break;
1960 nr++;
1961 }
1962 if (state == NULL) {
1963 error = EBUSY;
1964 break;
1965 }
1966 bcopy(state, &ps->state, sizeof(struct pf_state));
1967 ps->state.rule.nr = state->rule.ptr->nr;
1968 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ?
1969 -1 : state->nat_rule.ptr->nr;
1970 ps->state.anchor.nr = (state->anchor.ptr == NULL) ?
1971 -1 : state->anchor.ptr->nr;
1972 ps->state.expire = pf_state_expires(state);
1973 if (ps->state.expire > time_second)
1974 ps->state.expire -= time_second;
1975 else
1976 ps->state.expire = 0;
1977 break;
1978 }
1979
1980 case DIOCGETSTATES: {
1981 struct pfioc_states *ps = (struct pfioc_states *)addr;
1982 struct pf_state *state;
1983 struct pf_state *p, pstore;
1984 struct pfi_kif *kif;
1985 u_int32_t nr = 0;
1986 int space = ps->ps_len;
1987
1988 if (space == 0) {
1989 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1990 nr += kif->pfik_states;
1991 ps->ps_len = sizeof(struct pf_state) * nr;
1992 break;
1993 }
1994
1995 p = ps->ps_states;
1996 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1997 RB_FOREACH(state, pf_state_tree_ext_gwy,
1998 &kif->pfik_ext_gwy) {
1999 int secs = time_second;
2000
2001 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
2002 break;
2003
2004 bcopy(state, &pstore, sizeof(pstore));
2005 strlcpy(pstore.u.ifname, kif->pfik_name,
2006 sizeof(pstore.u.ifname));
2007 pstore.rule.nr = state->rule.ptr->nr;
2008 pstore.nat_rule.nr = (state->nat_rule.ptr ==
2009 NULL) ? -1 : state->nat_rule.ptr->nr;
2010 pstore.anchor.nr = (state->anchor.ptr ==
2011 NULL) ? -1 : state->anchor.ptr->nr;
2012 pstore.creation = secs - pstore.creation;
2013 pstore.expire = pf_state_expires(state);
2014 if (pstore.expire > secs)
2015 pstore.expire -= secs;
2016 else
2017 pstore.expire = 0;
2018#ifdef __FreeBSD__
2019 PF_COPYOUT(&pstore, p, sizeof(*p), error);
2020#else
2021 error = copyout(&pstore, p, sizeof(*p));
2022#endif
2023 if (error)
2024 goto fail;
2025 p++;
2026 nr++;
2027 }
2028 ps->ps_len = sizeof(struct pf_state) * nr;
2029 break;
2030 }
2031
2032 case DIOCGETSTATUS: {
2033 struct pf_status *s = (struct pf_status *)addr;
2034 bcopy(&pf_status, s, sizeof(struct pf_status));
2035 pfi_fill_oldstatus(s);
2036 break;
2037 }
2038
2039 case DIOCSETSTATUSIF: {
2040 struct pfioc_if *pi = (struct pfioc_if *)addr;
2041
2042 if (pi->ifname[0] == 0) {
2043 bzero(pf_status.ifname, IFNAMSIZ);
2044 break;
2045 }
2046 if (ifunit(pi->ifname) == NULL) {
2047 error = EINVAL;
2048 break;
2049 }
2050 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
2051 break;
2052 }
2053
2054 case DIOCCLRSTATUS: {
2055 bzero(pf_status.counters, sizeof(pf_status.counters));
2056 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
2057 bzero(pf_status.scounters, sizeof(pf_status.scounters));
2058 if (*pf_status.ifname)
2059 pfi_clr_istats(pf_status.ifname, NULL,
2060 PFI_FLAG_INSTANCE);
2061 break;
2062 }
2063
2064 case DIOCNATLOOK: {
2065 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
2066 struct pf_state *state;
2067 struct pf_state key;
2068 int m = 0, direction = pnl->direction;
2069
2070 key.af = pnl->af;
2071 key.proto = pnl->proto;
2072
2073 if (!pnl->proto ||
2074 PF_AZERO(&pnl->saddr, pnl->af) ||
2075 PF_AZERO(&pnl->daddr, pnl->af) ||
2076 !pnl->dport || !pnl->sport)
2077 error = EINVAL;
2078 else {
2079 /*
2080 * userland gives us source and dest of connection,
2081 * reverse the lookup so we ask for what happens with
2082 * the return traffic, enabling us to find it in the
2083 * state tree.
2084 */
2085 if (direction == PF_IN) {
2086 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
2087 key.ext.port = pnl->dport;
2088 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
2089 key.gwy.port = pnl->sport;
2090 state = pf_find_state_all(&key, PF_EXT_GWY, &m);
2091 } else {
2092 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
2093 key.lan.port = pnl->dport;
2094 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
2095 key.ext.port = pnl->sport;
2096 state = pf_find_state_all(&key, PF_LAN_EXT, &m);
2097 }
2098 if (m > 1)
2099 error = E2BIG; /* more than one state */
2100 else if (state != NULL) {
2101 if (direction == PF_IN) {
2102 PF_ACPY(&pnl->rsaddr, &state->lan.addr,
2103 state->af);
2104 pnl->rsport = state->lan.port;
2105 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
2106 pnl->af);
2107 pnl->rdport = pnl->dport;
2108 } else {
2109 PF_ACPY(&pnl->rdaddr, &state->gwy.addr,
2110 state->af);
2111 pnl->rdport = state->gwy.port;
2112 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
2113 pnl->af);
2114 pnl->rsport = pnl->sport;
2115 }
2116 } else
2117 error = ENOENT;
2118 }
2119 break;
2120 }
2121
2122 case DIOCSETTIMEOUT: {
2123 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2124 int old;
2125
2126 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
2127 pt->seconds < 0) {
2128 error = EINVAL;
2129 goto fail;
2130 }
2131 old = pf_default_rule.timeout[pt->timeout];
2132 pf_default_rule.timeout[pt->timeout] = pt->seconds;
2133 pt->seconds = old;
2134 break;
2135 }
2136
2137 case DIOCGETTIMEOUT: {
2138 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2139
2140 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
2141 error = EINVAL;
2142 goto fail;
2143 }
2144 pt->seconds = pf_default_rule.timeout[pt->timeout];
2145 break;
2146 }
2147
2148 case DIOCGETLIMIT: {
2149 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2150
2151 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
2152 error = EINVAL;
2153 goto fail;
2154 }
2155 pl->limit = pf_pool_limits[pl->index].limit;
2156 break;
2157 }
2158
2159 case DIOCSETLIMIT: {
2160 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2161 int old_limit;
2162
2163 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
2164 pf_pool_limits[pl->index].pp == NULL) {
2165 error = EINVAL;
2166 goto fail;
2167 }
2168#ifdef __FreeBSD__
2169 uma_zone_set_max(pf_pool_limits[pl->index].pp, pl->limit);
2170#else
2171 if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
2172 pl->limit, NULL, 0) != 0) {
2173 error = EBUSY;
2174 goto fail;
2175 }
2176#endif
2177 old_limit = pf_pool_limits[pl->index].limit;
2178 pf_pool_limits[pl->index].limit = pl->limit;
2179 pl->limit = old_limit;
2180 break;
2181 }
2182
2183 case DIOCSETDEBUG: {
2184 u_int32_t *level = (u_int32_t *)addr;
2185
2186 pf_status.debug = *level;
2187 break;
2188 }
2189
2190 case DIOCCLRRULECTRS: {
2191 struct pf_ruleset *ruleset = &pf_main_ruleset;
2192 struct pf_rule *rule;
2193
2194 TAILQ_FOREACH(rule,
2195 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries)
2196 rule->evaluations = rule->packets =
2197 rule->bytes = 0;
2198 break;
2199 }
2200
2201#ifdef __FreeBSD__
2202 case DIOCGIFSPEED: {
2203 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr;
2204 struct pf_ifspeed ps;
2205 struct ifnet *ifp;
2206
2207 if (psp->ifname[0] != 0) {
2208 /* Can we completely trust user-land? */
2209 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
2210 ifp = ifunit(ps.ifname);
2211 if (ifp != NULL)
2212 psp->baudrate = ifp->if_baudrate;
2213 else
2214 error = EINVAL;
2215 } else
2216 error = EINVAL;
2217 break;
2218 }
2219#endif /* __FreeBSD__ */
2220
2221#ifdef ALTQ
2222 case DIOCSTARTALTQ: {
2223 struct pf_altq *altq;
2224
2225 /* enable all altq interfaces on active list */
2226 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2227 if (altq->qname[0] == 0) {
2228 error = pf_enable_altq(altq);
2229 if (error != 0)
2230 break;
2231 }
2232 }
2233 if (error == 0)
2234 pf_altq_running = 1;
2235 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2236 break;
2237 }
2238
2239 case DIOCSTOPALTQ: {
2240 struct pf_altq *altq;
2241
2242 /* disable all altq interfaces on active list */
2243 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2244 if (altq->qname[0] == 0) {
2245 error = pf_disable_altq(altq);
2246 if (error != 0)
2247 break;
2248 }
2249 }
2250 if (error == 0)
2251 pf_altq_running = 0;
2252 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2253 break;
2254 }
2255
2256 case DIOCADDALTQ: {
2257 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2258 struct pf_altq *altq, *a;
2259
2260 if (pa->ticket != ticket_altqs_inactive) {
2261 error = EBUSY;
2262 break;
2263 }
2264 altq = pool_get(&pf_altq_pl, PR_NOWAIT);
2265 if (altq == NULL) {
2266 error = ENOMEM;
2267 break;
2268 }
2269 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2270
2271 /*
2272 * if this is for a queue, find the discipline and
2273 * copy the necessary fields
2274 */
2275 if (altq->qname[0] != 0) {
2276 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2277 error = EBUSY;
2278 pool_put(&pf_altq_pl, altq);
2279 break;
2280 }
2281 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2282 if (strncmp(a->ifname, altq->ifname,
2283 IFNAMSIZ) == 0 && a->qname[0] == 0) {
2284 altq->altq_disc = a->altq_disc;
2285 break;
2286 }
2287 }
2288 }
2289
2290#ifdef __FreeBSD__
2291 PF_UNLOCK();
2292#endif
2293 error = altq_add(altq);
2294#ifdef __FreeBSD__
2295 PF_LOCK();
2296#endif
2297 if (error) {
2298 pool_put(&pf_altq_pl, altq);
2299 break;
2300 }
2301
2302 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2303 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2304 break;
2305 }
2306
2307 case DIOCGETALTQS: {
2308 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2309 struct pf_altq *altq;
2310
2311 pa->nr = 0;
2312 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2313 pa->nr++;
2314 pa->ticket = ticket_altqs_active;
2315 break;
2316 }
2317
2318 case DIOCGETALTQ: {
2319 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2320 struct pf_altq *altq;
2321 u_int32_t nr;
2322
2323 if (pa->ticket != ticket_altqs_active) {
2324 error = EBUSY;
2325 break;
2326 }
2327 nr = 0;
2328 altq = TAILQ_FIRST(pf_altqs_active);
2329 while ((altq != NULL) && (nr < pa->nr)) {
2330 altq = TAILQ_NEXT(altq, entries);
2331 nr++;
2332 }
2333 if (altq == NULL) {
2334 error = EBUSY;
2335 break;
2336 }
2337 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2338 break;
2339 }
2340
2341 case DIOCCHANGEALTQ:
2342 /* CHANGEALTQ not supported yet! */
2343 error = ENODEV;
2344 break;
2345
2346 case DIOCGETQSTATS: {
2347 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2348 struct pf_altq *altq;
2349 u_int32_t nr;
2350 int nbytes;
2351
2352 if (pq->ticket != ticket_altqs_active) {
2353 error = EBUSY;
2354 break;
2355 }
2356 nbytes = pq->nbytes;
2357 nr = 0;
2358 altq = TAILQ_FIRST(pf_altqs_active);
2359 while ((altq != NULL) && (nr < pq->nr)) {
2360 altq = TAILQ_NEXT(altq, entries);
2361 nr++;
2362 }
2363 if (altq == NULL) {
2364 error = EBUSY;
2365 break;
2366 }
2367#ifdef __FreeBSD__
2368 PF_UNLOCK();
2369#endif
2370 error = altq_getqstats(altq, pq->buf, &nbytes);
2371#ifdef __FreeBSD__
2372 PF_LOCK();
2373#endif
2374 if (error == 0) {
2375 pq->scheduler = altq->scheduler;
2376 pq->nbytes = nbytes;
2377 }
2378 break;
2379 }
2380#endif /* ALTQ */
2381
2382 case DIOCBEGINADDRS: {
2383 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2384
2385 pf_empty_pool(&pf_pabuf);
2386 pp->ticket = ++ticket_pabuf;
2387 break;
2388 }
2389
2390 case DIOCADDADDR: {
2391 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2392
2393#ifndef INET
2394 if (pp->af == AF_INET) {
2395 error = EAFNOSUPPORT;
2396 break;
2397 }
2398#endif /* INET */
2399#ifndef INET6
2400 if (pp->af == AF_INET6) {
2401 error = EAFNOSUPPORT;
2402 break;
2403 }
2404#endif /* INET6 */
2405 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2406 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2407 pp->addr.addr.type != PF_ADDR_TABLE) {
2408 error = EINVAL;
2409 break;
2410 }
2411 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2412 if (pa == NULL) {
2413 error = ENOMEM;
2414 break;
2415 }
2416 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2417 if (pa->ifname[0]) {
2418 pa->kif = pfi_attach_rule(pa->ifname);
2419 if (pa->kif == NULL) {
2420 pool_put(&pf_pooladdr_pl, pa);
2421 error = EINVAL;
2422 break;
2423 }
2424 }
2425 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2426 pfi_dynaddr_remove(&pa->addr);
2427 pfi_detach_rule(pa->kif);
2428 pool_put(&pf_pooladdr_pl, pa);
2429 error = EINVAL;
2430 break;
2431 }
2432 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2433 break;
2434 }
2435
2436 case DIOCGETADDRS: {
2437 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2438
2439 pp->nr = 0;
2440 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2441 pp->r_num, 0, 1, 0);
2442 if (pool == NULL) {
2443 error = EBUSY;
2444 break;
2445 }
2446 TAILQ_FOREACH(pa, &pool->list, entries)
2447 pp->nr++;
2448 break;
2449 }
2450
2451 case DIOCGETADDR: {
2452 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2453 u_int32_t nr = 0;
2454
2455 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2456 pp->r_num, 0, 1, 1);
2457 if (pool == NULL) {
2458 error = EBUSY;
2459 break;
2460 }
2461 pa = TAILQ_FIRST(&pool->list);
2462 while ((pa != NULL) && (nr < pp->nr)) {
2463 pa = TAILQ_NEXT(pa, entries);
2464 nr++;
2465 }
2466 if (pa == NULL) {
2467 error = EBUSY;
2468 break;
2469 }
2470 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2471 pfi_dynaddr_copyout(&pp->addr.addr);
2472 pf_tbladdr_copyout(&pp->addr.addr);
2473 pf_rtlabel_copyout(&pp->addr.addr);
2474 break;
2475 }
2476
2477 case DIOCCHANGEADDR: {
2478 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2479 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2480 struct pf_ruleset *ruleset;
2481
2482 if (pca->action < PF_CHANGE_ADD_HEAD ||
2483 pca->action > PF_CHANGE_REMOVE) {
2484 error = EINVAL;
2485 break;
2486 }
2487 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2488 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2489 pca->addr.addr.type != PF_ADDR_TABLE) {
2490 error = EINVAL;
2491 break;
2492 }
2493
2494 ruleset = pf_find_ruleset(pca->anchor);
2495 if (ruleset == NULL) {
2496 error = EBUSY;
2497 break;
2498 }
2499 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2500 pca->r_num, pca->r_last, 1, 1);
2501 if (pool == NULL) {
2502 error = EBUSY;
2503 break;
2504 }
2505 if (pca->action != PF_CHANGE_REMOVE) {
2506 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2507 if (newpa == NULL) {
2508 error = ENOMEM;
2509 break;
2510 }
2511 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2512#ifndef INET
2513 if (pca->af == AF_INET) {
2514 pool_put(&pf_pooladdr_pl, newpa);
2515 error = EAFNOSUPPORT;
2516 break;
2517 }
2518#endif /* INET */
2519#ifndef INET6
2520 if (pca->af == AF_INET6) {
2521 pool_put(&pf_pooladdr_pl, newpa);
2522 error = EAFNOSUPPORT;
2523 break;
2524 }
2525#endif /* INET6 */
2526 if (newpa->ifname[0]) {
2527 newpa->kif = pfi_attach_rule(newpa->ifname);
2528 if (newpa->kif == NULL) {
2529 pool_put(&pf_pooladdr_pl, newpa);
2530 error = EINVAL;
2531 break;
2532 }
2533 } else
2534 newpa->kif = NULL;
2535 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2536 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2537 pfi_dynaddr_remove(&newpa->addr);
2538 pfi_detach_rule(newpa->kif);
2539 pool_put(&pf_pooladdr_pl, newpa);
2540 error = EINVAL;
2541 break;
2542 }
2543 }
2544
2545 if (pca->action == PF_CHANGE_ADD_HEAD)
2546 oldpa = TAILQ_FIRST(&pool->list);
2547 else if (pca->action == PF_CHANGE_ADD_TAIL)
2548 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2549 else {
2550 int i = 0;
2551
2552 oldpa = TAILQ_FIRST(&pool->list);
2553 while ((oldpa != NULL) && (i < pca->nr)) {
2554 oldpa = TAILQ_NEXT(oldpa, entries);
2555 i++;
2556 }
2557 if (oldpa == NULL) {
2558 error = EINVAL;
2559 break;
2560 }
2561 }
2562
2563 if (pca->action == PF_CHANGE_REMOVE) {
2564 TAILQ_REMOVE(&pool->list, oldpa, entries);
2565 pfi_dynaddr_remove(&oldpa->addr);
2566 pf_tbladdr_remove(&oldpa->addr);
2567 pfi_detach_rule(oldpa->kif);
2568 pool_put(&pf_pooladdr_pl, oldpa);
2569 } else {
2570 if (oldpa == NULL)
2571 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2572 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2573 pca->action == PF_CHANGE_ADD_BEFORE)
2574 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2575 else
2576 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2577 newpa, entries);
2578 }
2579
2580 pool->cur = TAILQ_FIRST(&pool->list);
2581 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2582 pca->af);
2583 break;
2584 }
2585
2586 case DIOCGETRULESETS: {
2587 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2588 struct pf_ruleset *ruleset;
2589 struct pf_anchor *anchor;
2590
2591 pr->path[sizeof(pr->path) - 1] = 0;
2592 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2593 error = EINVAL;
2594 break;
2595 }
2596 pr->nr = 0;
2597 if (ruleset->anchor == NULL) {
2598 /* XXX kludge for pf_main_ruleset */
2599 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2600 if (anchor->parent == NULL)
2601 pr->nr++;
2602 } else {
2603 RB_FOREACH(anchor, pf_anchor_node,
2604 &ruleset->anchor->children)
2605 pr->nr++;
2606 }
2607 break;
2608 }
2609
2610 case DIOCGETRULESET: {
2611 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2612 struct pf_ruleset *ruleset;
2613 struct pf_anchor *anchor;
2614 u_int32_t nr = 0;
2615
2616 pr->path[sizeof(pr->path) - 1] = 0;
2617 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2618 error = EINVAL;
2619 break;
2620 }
2621 pr->name[0] = 0;
2622 if (ruleset->anchor == NULL) {
2623 /* XXX kludge for pf_main_ruleset */
2624 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2625 if (anchor->parent == NULL && nr++ == pr->nr) {
2626 strlcpy(pr->name, anchor->name,
2627 sizeof(pr->name));
2628 break;
2629 }
2630 } else {
2631 RB_FOREACH(anchor, pf_anchor_node,
2632 &ruleset->anchor->children)
2633 if (nr++ == pr->nr) {
2634 strlcpy(pr->name, anchor->name,
2635 sizeof(pr->name));
2636 break;
2637 }
2638 }
2639 if (!pr->name[0])
2640 error = EBUSY;
2641 break;
2642 }
2643
2644 case DIOCRCLRTABLES: {
2645 struct pfioc_table *io = (struct pfioc_table *)addr;
2646
2647 if (io->pfrio_esize != 0) {
2648 error = ENODEV;
2649 break;
2650 }
2651 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2652 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2653 break;
2654 }
2655
2656 case DIOCRADDTABLES: {
2657 struct pfioc_table *io = (struct pfioc_table *)addr;
2658
2659 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2660 error = ENODEV;
2661 break;
2662 }
2663 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2664 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2665 break;
2666 }
2667
2668 case DIOCRDELTABLES: {
2669 struct pfioc_table *io = (struct pfioc_table *)addr;
2670
2671 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2672 error = ENODEV;
2673 break;
2674 }
2675 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2676 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2677 break;
2678 }
2679
2680 case DIOCRGETTABLES: {
2681 struct pfioc_table *io = (struct pfioc_table *)addr;
2682
2683 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2684 error = ENODEV;
2685 break;
2686 }
2687 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2688 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2689 break;
2690 }
2691
2692 case DIOCRGETTSTATS: {
2693 struct pfioc_table *io = (struct pfioc_table *)addr;
2694
2695 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2696 error = ENODEV;
2697 break;
2698 }
2699 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2700 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2701 break;
2702 }
2703
2704 case DIOCRCLRTSTATS: {
2705 struct pfioc_table *io = (struct pfioc_table *)addr;
2706
2707 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2708 error = ENODEV;
2709 break;
2710 }
2711 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2712 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2713 break;
2714 }
2715
2716 case DIOCRSETTFLAGS: {
2717 struct pfioc_table *io = (struct pfioc_table *)addr;
2718
2719 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2720 error = ENODEV;
2721 break;
2722 }
2723 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2724 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2725 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2726 break;
2727 }
2728
2729 case DIOCRCLRADDRS: {
2730 struct pfioc_table *io = (struct pfioc_table *)addr;
2731
2732 if (io->pfrio_esize != 0) {
2733 error = ENODEV;
2734 break;
2735 }
2736 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2737 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2738 break;
2739 }
2740
2741 case DIOCRADDADDRS: {
2742 struct pfioc_table *io = (struct pfioc_table *)addr;
2743
2744 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2745 error = ENODEV;
2746 break;
2747 }
2748 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2749 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2750 PFR_FLAG_USERIOCTL);
2751 break;
2752 }
2753
2754 case DIOCRDELADDRS: {
2755 struct pfioc_table *io = (struct pfioc_table *)addr;
2756
2757 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2758 error = ENODEV;
2759 break;
2760 }
2761 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2762 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2763 PFR_FLAG_USERIOCTL);
2764 break;
2765 }
2766
2767 case DIOCRSETADDRS: {
2768 struct pfioc_table *io = (struct pfioc_table *)addr;
2769
2770 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2771 error = ENODEV;
2772 break;
2773 }
2774 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2775 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2776 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2777 PFR_FLAG_USERIOCTL);
2778 break;
2779 }
2780
2781 case DIOCRGETADDRS: {
2782 struct pfioc_table *io = (struct pfioc_table *)addr;
2783
2784 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2785 error = ENODEV;
2786 break;
2787 }
2788 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2789 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2790 break;
2791 }
2792
2793 case DIOCRGETASTATS: {
2794 struct pfioc_table *io = (struct pfioc_table *)addr;
2795
2796 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2797 error = ENODEV;
2798 break;
2799 }
2800 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2801 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2802 break;
2803 }
2804
2805 case DIOCRCLRASTATS: {
2806 struct pfioc_table *io = (struct pfioc_table *)addr;
2807
2808 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2809 error = ENODEV;
2810 break;
2811 }
2812 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2813 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2814 PFR_FLAG_USERIOCTL);
2815 break;
2816 }
2817
2818 case DIOCRTSTADDRS: {
2819 struct pfioc_table *io = (struct pfioc_table *)addr;
2820
2821 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2822 error = ENODEV;
2823 break;
2824 }
2825 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2826 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2827 PFR_FLAG_USERIOCTL);
2828 break;
2829 }
2830
2831 case DIOCRINADEFINE: {
2832 struct pfioc_table *io = (struct pfioc_table *)addr;
2833
2834 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2835 error = ENODEV;
2836 break;
2837 }
2838 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2839 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2840 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2841 break;
2842 }
2843
2844 case DIOCOSFPADD: {
2845 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2846 error = pf_osfp_add(io);
2847 break;
2848 }
2849
2850 case DIOCOSFPGET: {
2851 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2852 error = pf_osfp_get(io);
2853 break;
2854 }
2855
2856 case DIOCXBEGIN: {
2857 struct pfioc_trans *io = (struct pfioc_trans *)
2858 addr;
2859 static struct pfioc_trans_e ioe;
2860 static struct pfr_table table;
2861 int i;
2862
2863 if (io->esize != sizeof(ioe)) {
2864 error = ENODEV;
2865 goto fail;
2866 }
2867 for (i = 0; i < io->size; i++) {
2868#ifdef __FreeBSD__
2869 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error);
2870 if (error) {
2871#else
2872 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2873#endif
2874 error = EFAULT;
2875 goto fail;
2876 }
2877 switch (ioe.rs_num) {
2878#ifdef ALTQ
2879 case PF_RULESET_ALTQ:
2880 if (ioe.anchor[0]) {
2881 error = EINVAL;
2882 goto fail;
2883 }
2884 if ((error = pf_begin_altq(&ioe.ticket)))
2885 goto fail;
2886 break;
2887#endif /* ALTQ */
2888 case PF_RULESET_TABLE:
2889 bzero(&table, sizeof(table));
2890 strlcpy(table.pfrt_anchor, ioe.anchor,
2891 sizeof(table.pfrt_anchor));
2892 if ((error = pfr_ina_begin(&table,
2893 &ioe.ticket, NULL, 0)))
2894 goto fail;
2895 break;
2896 default:
2897 if ((error = pf_begin_rules(&ioe.ticket,
2898 ioe.rs_num, ioe.anchor)))
2899 goto fail;
2900 break;
2901 }
2902#ifdef __FreeBSD__
2903 PF_COPYOUT(&ioe, io->array+i, sizeof(io->array[i]),
2904 error);
2905 if (error) {
2906#else
2907 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) {
2908#endif
2909 error = EFAULT;
2910 goto fail;
2911 }
2912 }
2913 break;
2914 }
2915
2916 case DIOCXROLLBACK: {
2917 struct pfioc_trans *io = (struct pfioc_trans *)
2918 addr;
2919 static struct pfioc_trans_e ioe;
2920 static struct pfr_table table;
2921 int i;
2922
2923 if (io->esize != sizeof(ioe)) {
2924 error = ENODEV;
2925 goto fail;
2926 }
2927 for (i = 0; i < io->size; i++) {
2928#ifdef __FreeBSD__
2929 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error);
2930 if (error) {
2931#else
2932 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2933#endif
2934 error = EFAULT;
2935 goto fail;
2936 }
2937 switch (ioe.rs_num) {
2938#ifdef ALTQ
2939 case PF_RULESET_ALTQ:
2940 if (ioe.anchor[0]) {
2941 error = EINVAL;
2942 goto fail;
2943 }
2944 if ((error = pf_rollback_altq(ioe.ticket)))
2945 goto fail; /* really bad */
2946 break;
2947#endif /* ALTQ */
2948 case PF_RULESET_TABLE:
2949 bzero(&table, sizeof(table));
2950 strlcpy(table.pfrt_anchor, ioe.anchor,
2951 sizeof(table.pfrt_anchor));
2952 if ((error = pfr_ina_rollback(&table,
2953 ioe.ticket, NULL, 0)))
2954 goto fail; /* really bad */
2955 break;
2956 default:
2957 if ((error = pf_rollback_rules(ioe.ticket,
2958 ioe.rs_num, ioe.anchor)))
2959 goto fail; /* really bad */
2960 break;
2961 }
2962 }
2963 break;
2964 }
2965
2966 case DIOCXCOMMIT: {
2967 struct pfioc_trans *io = (struct pfioc_trans *)
2968 addr;
2969 static struct pfioc_trans_e ioe;
2970 static struct pfr_table table;
2971 struct pf_ruleset *rs;
2972 int i;
2973
2974 if (io->esize != sizeof(ioe)) {
2975 error = ENODEV;
2976 goto fail;
2977 }
2978 /* first makes sure everything will succeed */
2979 for (i = 0; i < io->size; i++) {
2980#ifdef __FreeBSD__
2981 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error);
2982 if (error) {
2983#else
2984 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2985#endif
2986 error = EFAULT;
2987 goto fail;
2988 }
2989 switch (ioe.rs_num) {
2990#ifdef ALTQ
2991 case PF_RULESET_ALTQ:
2992 if (ioe.anchor[0]) {
2993 error = EINVAL;
2994 goto fail;
2995 }
2996 if (!altqs_inactive_open || ioe.ticket !=
2997 ticket_altqs_inactive) {
2998 error = EBUSY;
2999 goto fail;
3000 }
3001 break;
3002#endif /* ALTQ */
3003 case PF_RULESET_TABLE:
3004 rs = pf_find_ruleset(ioe.anchor);
3005 if (rs == NULL || !rs->topen || ioe.ticket !=
3006 rs->tticket) {
3007 error = EBUSY;
3008 goto fail;
3009 }
3010 break;
3011 default:
3012 if (ioe.rs_num < 0 || ioe.rs_num >=
3013 PF_RULESET_MAX) {
3014 error = EINVAL;
3015 goto fail;
3016 }
3017 rs = pf_find_ruleset(ioe.anchor);
3018 if (rs == NULL ||
3019 !rs->rules[ioe.rs_num].inactive.open ||
3020 rs->rules[ioe.rs_num].inactive.ticket !=
3021 ioe.ticket) {
3022 error = EBUSY;
3023 goto fail;
3024 }
3025 break;
3026 }
3027 }
3028 /* now do the commit - no errors should happen here */
3029 for (i = 0; i < io->size; i++) {
3030#ifdef __FreeBSD__
3031 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error);
3032 if (error) {
3033#else
3034 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
3035#endif
3036 error = EFAULT;
3037 goto fail;
3038 }
3039 switch (ioe.rs_num) {
3040#ifdef ALTQ
3041 case PF_RULESET_ALTQ:
3042 if ((error = pf_commit_altq(ioe.ticket)))
3043 goto fail; /* really bad */
3044 break;
3045#endif /* ALTQ */
3046 case PF_RULESET_TABLE:
3047 bzero(&table, sizeof(table));
3048 strlcpy(table.pfrt_anchor, ioe.anchor,
3049 sizeof(table.pfrt_anchor));
3050 if ((error = pfr_ina_commit(&table, ioe.ticket,
3051 NULL, NULL, 0)))
3052 goto fail; /* really bad */
3053 break;
3054 default:
3055 if ((error = pf_commit_rules(ioe.ticket,
3056 ioe.rs_num, ioe.anchor)))
3057 goto fail; /* really bad */
3058 break;
3059 }
3060 }
3061 break;
3062 }
3063
3064 case DIOCGETSRCNODES: {
3065 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
3066 struct pf_src_node *n;
3067 struct pf_src_node *p, pstore;
3068 u_int32_t nr = 0;
3069 int space = psn->psn_len;
3070
3071 if (space == 0) {
3072 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
3073 nr++;
3074 psn->psn_len = sizeof(struct pf_src_node) * nr;
3075 break;
3076 }
3077
3078 p = psn->psn_src_nodes;
3079 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3080 int secs = time_second, diff;
3081
3082 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
3083 break;
3084
3085 bcopy(n, &pstore, sizeof(pstore));
3086 if (n->rule.ptr != NULL)
3087 pstore.rule.nr = n->rule.ptr->nr;
3088 pstore.creation = secs - pstore.creation;
3089 if (pstore.expire > secs)
3090 pstore.expire -= secs;
3091 else
3092 pstore.expire = 0;
3093
3094 /* adjust the connection rate estimate */
3095 diff = secs - n->conn_rate.last;
3096 if (diff >= n->conn_rate.seconds)
3097 pstore.conn_rate.count = 0;
3098 else
3099 pstore.conn_rate.count -=
3100 n->conn_rate.count * diff /
3101 n->conn_rate.seconds;
3102
3103#ifdef __FreeBSD__
3104 PF_COPYOUT(&pstore, p, sizeof(*p), error);
3105#else
3106 error = copyout(&pstore, p, sizeof(*p));
3107#endif
3108 if (error)
3109 goto fail;
3110 p++;
3111 nr++;
3112 }
3113 psn->psn_len = sizeof(struct pf_src_node) * nr;
3114 break;
3115 }
3116
3117 case DIOCCLRSRCNODES: {
3118 struct pf_src_node *n;
3119 struct pf_state *state;
3120
3121 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
3122 state->src_node = NULL;
3123 state->nat_src_node = NULL;
3124 }
3125 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3126 n->expire = 1;
3127 n->states = 0;
3128 }
3129 pf_purge_expired_src_nodes();
3130 pf_status.src_nodes = 0;
3131 break;
3132 }
3133
3134 case DIOCSETHOSTID: {
3135 u_int32_t *hostid = (u_int32_t *)addr;
3136
3137 if (*hostid == 0)
3138 pf_status.hostid = arc4random();
3139 else
3140 pf_status.hostid = *hostid;
3141 break;
3142 }
3143
3144 case DIOCOSFPFLUSH:
3145 pf_osfp_flush();
3146 break;
3147
3148 case DIOCIGETIFACES: {
3149 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3150
3151 if (io->pfiio_esize != sizeof(struct pfi_if)) {
3152 error = ENODEV;
3153 break;
3154 }
3155 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
3156 &io->pfiio_size, io->pfiio_flags);
3157 break;
3158 }
3159
3160 case DIOCICLRISTATS: {
3161 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3162
3163 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero,
3164 io->pfiio_flags);
3165 break;
3166 }
3167
3168 case DIOCSETIFFLAG: {
3169 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3170
3171 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
3172 break;
3173 }
3174
3175 case DIOCCLRIFFLAG: {
3176 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3177
3178 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
3179 break;
3180 }
3181
3182 default:
3183 error = ENODEV;
3184 break;
3185 }
3186fail:
3187#ifdef __FreeBSD__
3188 PF_UNLOCK();
3189#else
3190 splx(s);
3191#endif
3192 return (error);
3193}
3194
3195#ifdef __FreeBSD__
3196/*
3197 * XXX - Check for version missmatch!!!
3198 */
3199static void
3200pf_clear_states(void)
3201{
3202 struct pf_state *state;
3203
3204 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
3205 state->timeout = PFTM_PURGE;
3206#if NPFSYNC
3207 /* don't send out individual delete messages */
3208 state->sync_flags = PFSTATE_NOSYNC;
3209#endif
3210 }
3211 pf_purge_expired_states();
3212 pf_status.states = 0;
3213#if 0 /* NPFSYNC */
3214/*
3215 * XXX This is called on module unload, we do not want to sync that over? */
3216 */
3217 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3218#endif
3219}
3220
3221static int
3222pf_clear_tables(void)
3223{
3224 struct pfioc_table io;
3225 int error;
3226
3227 bzero(&io, sizeof(io));
3228
3229 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
3230 io.pfrio_flags);
3231
3232 return (error);
3233}
3234
3235static void
3236pf_clear_srcnodes(void)
3237{
3238 struct pf_src_node *n;
3239 struct pf_state *state;
3240
3241 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
3242 state->src_node = NULL;
3243 state->nat_src_node = NULL;
3244 }
3245 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3246 n->expire = 1;
3247 n->states = 0;
3248 }
3249 pf_purge_expired_src_nodes();
3250 pf_status.src_nodes = 0;
3251}
3252/*
3253 * XXX - Check for version missmatch!!!
3254 */
3255
3256/*
3257 * Duplicate pfctl -Fa operation to get rid of as much as we can.
3258 */
3259static int
3260shutdown_pf(void)
3261{
3262 int error = 0;
3263 u_int32_t t[5];
3264 char nn = '\0';
3265
3266 callout_stop(&pf_expire_to);
3267
3268 pf_status.running = 0;
3269 do {
3270 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
3271 != 0) {
3272 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
3273 break;
3274 }
3275 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
3276 != 0) {
3277 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
3278 break; /* XXX: rollback? */
3279 }
3280 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
3281 != 0) {
3282 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
3283 break; /* XXX: rollback? */
3284 }
3285 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
3286 != 0) {
3287 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
3288 break; /* XXX: rollback? */
3289 }
3290 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
3291 != 0) {
3292 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
3293 break; /* XXX: rollback? */
3294 }
3295
3296 /* XXX: these should always succeed here */
3297 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
3298 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
3299 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
3300 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
3301 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
3302
3303 if ((error = pf_clear_tables()) != 0)
3304 break;
3305
3306#ifdef ALTQ
3307 if ((error = pf_begin_altq(&t[0])) != 0) {
3308 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
3309 break;
3310 }
3311 pf_commit_altq(t[0]);
3312#endif
3313
3314 pf_clear_states();
3315
3316 pf_clear_srcnodes();
3317
3318 /* status does not use malloced mem so no need to cleanup */
3319 /* fingerprints and interfaces have thier own cleanup code */
3320 } while(0);
3321
3322 return (error);
3323}
3324
3325static int
3326pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3327 struct inpcb *inp)
3328{
3329 /*
3330 * XXX Wed Jul 9 22:03:16 2003 UTC
3331 * OpenBSD has changed its byte ordering convention on ip_len/ip_off
3332 * in network stack. OpenBSD's network stack have converted
3333 * ip_len/ip_off to host byte order frist as FreeBSD.
3334 * Now this is not true anymore , so we should convert back to network
3335 * byte order.
3336 */
3337 struct ip *h = NULL;
3338 int chk;
3339
3340 if ((*m)->m_pkthdr.len >= (int)sizeof(struct ip)) {
3341 /* if m_pkthdr.len is less than ip header, pf will handle. */
3342 h = mtod(*m, struct ip *);
3343 HTONS(h->ip_len);
3344 HTONS(h->ip_off);
3345 }
3346 chk = pf_test(PF_IN, ifp, m, NULL, inp);
3347 if (chk && *m) {
3348 m_freem(*m);
3349 *m = NULL;
3350 }
3351 if (*m != NULL) {
3352 /* pf_test can change ip header location */
3353 h = mtod(*m, struct ip *);
3354 NTOHS(h->ip_len);
3355 NTOHS(h->ip_off);
3356 }
3357 return chk;
3358}
3359
3360static int
3361pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3362 struct inpcb *inp)
3363{
3364 /*
3365 * XXX Wed Jul 9 22:03:16 2003 UTC
3366 * OpenBSD has changed its byte ordering convention on ip_len/ip_off
3367 * in network stack. OpenBSD's network stack have converted
3368 * ip_len/ip_off to host byte order frist as FreeBSD.
3369 * Now this is not true anymore , so we should convert back to network
3370 * byte order.
3371 */
3372 struct ip *h = NULL;
3373 int chk;
3374
3375 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
3376 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3377 in_delayed_cksum(*m);
3378 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
3379 }
3380 if ((*m)->m_pkthdr.len >= (int)sizeof(*h)) {
3381 /* if m_pkthdr.len is less than ip header, pf will handle. */
3382 h = mtod(*m, struct ip *);
3383 HTONS(h->ip_len);
3384 HTONS(h->ip_off);
3385 }
3386 chk = pf_test(PF_OUT, ifp, m, NULL, inp);
3387 if (chk && *m) {
3388 m_freem(*m);
3389 *m = NULL;
3390 }
3391 if (*m != NULL) {
3392 /* pf_test can change ip header location */
3393 h = mtod(*m, struct ip *);
3394 NTOHS(h->ip_len);
3395 NTOHS(h->ip_off);
3396 }
3397 return chk;
3398}
3399
3400#ifdef INET6
3401static int
3402pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3403 struct inpcb *inp)
3404{
3405 /*
3406 * IPv6 does not affected ip_len/ip_off byte order changes.
3407 */
3408 int chk;
3409
3410 chk = pf_test6(PF_IN, ifp, m, NULL, inp);
3411 if (chk && *m) {
3412 m_freem(*m);
3413 *m = NULL;
3414 }
3415 return chk;
3416}
3417
3418static int
3419pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3420 struct inpcb *inp)
3421{
3422 /*
3423 * IPv6 does not affected ip_len/ip_off byte order changes.
3424 */
3425 int chk;
3426
3427 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
3428 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3429 in_delayed_cksum(*m);
3430 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
3431 }
3432 chk = pf_test6(PF_OUT, ifp, m, NULL, inp);
3433 if (chk && *m) {
3434 m_freem(*m);
3435 *m = NULL;
3436 }
3437 return chk;
3438}
3439#endif /* INET6 */
3440
3441static int
3442hook_pf(void)
3443{
3444 struct pfil_head *pfh_inet;
3445#ifdef INET6
3446 struct pfil_head *pfh_inet6;
3447#endif
3448
3449 PF_ASSERT(MA_NOTOWNED);
3450
3451 if (pf_pfil_hooked)
3452 return (0);
3453
3454 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3455 if (pfh_inet == NULL)
3456 return (ESRCH); /* XXX */
3457 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet);
3458 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet);
3459#ifdef INET6
3460 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3461 if (pfh_inet6 == NULL) {
3462 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3463 pfh_inet);
3464 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3465 pfh_inet);
3466 return (ESRCH); /* XXX */
3467 }
3468 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6);
3469 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6);
3470#endif
3471
3472 pf_pfil_hooked = 1;
3473 return (0);
3474}
3475
3476static int
3477dehook_pf(void)
3478{
3479 struct pfil_head *pfh_inet;
3480#ifdef INET6
3481 struct pfil_head *pfh_inet6;
3482#endif
3483
3484 PF_ASSERT(MA_NOTOWNED);
3485
3486 if (pf_pfil_hooked == 0)
3487 return (0);
3488
3489 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3490 if (pfh_inet == NULL)
3491 return (ESRCH); /* XXX */
3492 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3493 pfh_inet);
3494 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3495 pfh_inet);
3496#ifdef INET6
3497 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3498 if (pfh_inet6 == NULL)
3499 return (ESRCH); /* XXX */
3500 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK,
3501 pfh_inet6);
3502 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK,
3503 pfh_inet6);
3504#endif
3505
3506 pf_pfil_hooked = 0;
3507 return (0);
3508}
3509
3510static int
3511pf_load(void)
3512{
3513 init_zone_var();
3514 init_pf_mutex();
3515 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
3516 if (pfattach() < 0) {
3517 destroy_dev(pf_dev);
3518 destroy_pf_mutex();
3519 return (ENOMEM);
3520 }
3521 return (0);
3522}
3523
3524static int
3525pf_unload(void)
3526{
3527 int error = 0;
3528
3529 PF_LOCK();
3530 pf_status.running = 0;
3531 PF_UNLOCK();
3532 error = dehook_pf();
3533 if (error) {
3534 /*
3535 * Should not happen!
3536 * XXX Due to error code ESRCH, kldunload will show
3537 * a message like 'No such process'.
3538 */
3539 printf("%s : pfil unregisteration fail\n", __FUNCTION__);
3540 return error;
3541 }
3542 PF_LOCK();
3543 shutdown_pf();
3544 pfi_cleanup();
3545 pf_osfp_flush();
3546 pf_osfp_cleanup();
3547 cleanup_pf_zone();
3548 PF_UNLOCK();
3549 destroy_dev(pf_dev);
3550 destroy_pf_mutex();
3551 return error;
3552}
3553
3554static int
3555pf_modevent(module_t mod, int type, void *data)
3556{
3557 int error = 0;
3558
3559 switch(type) {
3560 case MOD_LOAD:
3561 error = pf_load();
3562 break;
3563
3564 case MOD_UNLOAD:
3565 error = pf_unload();
3566 break;
3567 default:
3568 error = EINVAL;
3569 break;
3570 }
3571 return error;
3572}
3573
3574static moduledata_t pf_mod = {
3575 "pf",
3576 pf_modevent,
3577 0
3578};
3579
3580DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST);
3581MODULE_VERSION(pf, PF_MODVER);
3582#endif /* __FreeBSD__ */