Deleted Added
sdiff udiff text old ( 145836 ) new ( 147321 )
full compact
1/* $FreeBSD: head/sys/contrib/pf/net/pf_ioctl.c 145836 2005-05-03 16:43:32Z mlaier $ */
2/* $OpenBSD: pf_ioctl.c,v 1.139 2005/03/03 07:13:39 dhartmei Exp $ */
3
4/*
5 * Copyright (c) 2001 Daniel Hartmeier
6 * Copyright (c) 2002,2003 Henning Brauer
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 *
37 */
38
39#ifdef __FreeBSD__
40#include "opt_inet.h"
41#include "opt_inet6.h"
42#endif
43
44#ifdef __FreeBSD__
45#include "opt_bpf.h"
46#include "opt_pf.h"
47#define NBPFILTER DEV_BPF
48#define NPFLOG DEV_PFLOG
49#define NPFSYNC DEV_PFSYNC
50#else
51#include "bpfilter.h"
52#include "pflog.h"
53#include "pfsync.h"
54#endif
55
56#include <sys/param.h>
57#include <sys/systm.h>
58#include <sys/mbuf.h>
59#include <sys/filio.h>
60#include <sys/fcntl.h>
61#include <sys/socket.h>
62#include <sys/socketvar.h>
63#include <sys/kernel.h>
64#include <sys/time.h>
65#include <sys/malloc.h>
66#ifdef __FreeBSD__
67#include <sys/module.h>
68#include <sys/conf.h>
69#include <sys/proc.h>
70#else
71#include <sys/timeout.h>
72#include <sys/pool.h>
73#endif
74
75#include <net/if.h>
76#include <net/if_types.h>
77#include <net/route.h>
78
79#include <netinet/in.h>
80#include <netinet/in_var.h>
81#include <netinet/in_systm.h>
82#include <netinet/ip.h>
83#include <netinet/ip_var.h>
84#include <netinet/ip_icmp.h>
85
86#ifndef __FreeBSD__
87#include <dev/rndvar.h>
88#endif
89#include <net/pfvar.h>
90
91#if NPFSYNC > 0
92#include <net/if_pfsync.h>
93#endif /* NPFSYNC > 0 */
94
95#ifdef INET6
96#include <netinet/ip6.h>
97#include <netinet/in_pcb.h>
98#endif /* INET6 */
99
100#ifdef ALTQ
101#include <altq/altq.h>
102#endif
103
104#ifdef __FreeBSD__
105#include <sys/limits.h>
106#include <sys/lock.h>
107#include <sys/mutex.h>
108#include <net/pfil.h>
109#endif /* __FreeBSD__ */
110
111#ifdef __FreeBSD__
112void init_zone_var(void);
113void cleanup_pf_zone(void);
114int pfattach(void);
115#else
116void pfattach(int);
117int pfopen(dev_t, int, int, struct proc *);
118int pfclose(dev_t, int, int, struct proc *);
119#endif
120struct pf_pool *pf_get_pool(char *, u_int32_t, u_int8_t, u_int32_t,
121 u_int8_t, u_int8_t, u_int8_t);
122int pf_get_ruleset_number(u_int8_t);
123void pf_init_ruleset(struct pf_ruleset *);
124int pf_anchor_setup(struct pf_rule *,
125 const struct pf_ruleset *, const char *);
126int pf_anchor_copyout(const struct pf_ruleset *,
127 const struct pf_rule *, struct pfioc_rule *);
128void pf_anchor_remove(struct pf_rule *);
129
130void pf_mv_pool(struct pf_palist *, struct pf_palist *);
131void pf_empty_pool(struct pf_palist *);
132#ifdef __FreeBSD__
133int pfioctl(struct cdev *, u_long, caddr_t, int, struct thread *);
134#else
135int pfioctl(struct cdev *, u_long, caddr_t, int, struct proc *);
136#endif
137#ifdef ALTQ
138int pf_begin_altq(u_int32_t *);
139int pf_rollback_altq(u_int32_t);
140int pf_commit_altq(u_int32_t);
141int pf_enable_altq(struct pf_altq *);
142int pf_disable_altq(struct pf_altq *);
143#endif /* ALTQ */
144int pf_begin_rules(u_int32_t *, int, const char *);
145int pf_rollback_rules(u_int32_t, int, char *);
146int pf_commit_rules(u_int32_t, int, char *);
147
148#ifdef __FreeBSD__
149extern struct callout pf_expire_to;
150#else
151extern struct timeout pf_expire_to;
152#endif
153
154struct pf_rule pf_default_rule;
155#ifdef ALTQ
156static int pf_altq_running;
157#endif
158
159#define TAGID_MAX 50000
160TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags),
161 pf_qids = TAILQ_HEAD_INITIALIZER(pf_qids);
162
163#if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
164#error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
165#endif
166static u_int16_t tagname2tag(struct pf_tags *, char *);
167static void tag2tagname(struct pf_tags *, u_int16_t, char *);
168static void tag_unref(struct pf_tags *, u_int16_t);
169int pf_rtlabel_add(struct pf_addr_wrap *);
170void pf_rtlabel_remove(struct pf_addr_wrap *);
171void pf_rtlabel_copyout(struct pf_addr_wrap *);
172
173#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x
174
175
176#ifdef __FreeBSD__
177static struct cdev *pf_dev;
178
179/*
180 * XXX - These are new and need to be checked when moveing to a new version
181 */
182static void pf_clear_states(void);
183static int pf_clear_tables(void);
184static void pf_clear_srcnodes(void);
185/*
186 * XXX - These are new and need to be checked when moveing to a new version
187 */
188
189/*
190 * Wrapper functions for pfil(9) hooks
191 */
192static int pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp,
193 int dir, struct inpcb *inp);
194static int pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp,
195 int dir, struct inpcb *inp);
196#ifdef INET6
197static int pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp,
198 int dir, struct inpcb *inp);
199static int pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp,
200 int dir, struct inpcb *inp);
201#endif
202
203static int hook_pf(void);
204static int dehook_pf(void);
205static int shutdown_pf(void);
206static int pf_load(void);
207static int pf_unload(void);
208
209static struct cdevsw pf_cdevsw = {
210 .d_ioctl = pfioctl,
211 .d_name = PF_NAME,
212 .d_version = D_VERSION,
213};
214
215static volatile int pf_pfil_hooked = 0;
216struct mtx pf_task_mtx;
217
218void
219init_pf_mutex(void)
220{
221 mtx_init(&pf_task_mtx, "pf task mtx", NULL, MTX_DEF);
222}
223
224void
225destroy_pf_mutex(void)
226{
227 mtx_destroy(&pf_task_mtx);
228}
229
230void
231init_zone_var(void)
232{
233 pf_src_tree_pl = pf_rule_pl = NULL;
234 pf_state_pl = pf_altq_pl = pf_pooladdr_pl = NULL;
235 pf_frent_pl = pf_frag_pl = pf_cache_pl = pf_cent_pl = NULL;
236 pf_state_scrub_pl = NULL;
237 pfr_ktable_pl = pfr_kentry_pl = NULL;
238}
239
240void
241cleanup_pf_zone(void)
242{
243 UMA_DESTROY(pf_src_tree_pl);
244 UMA_DESTROY(pf_rule_pl);
245 UMA_DESTROY(pf_state_pl);
246 UMA_DESTROY(pf_altq_pl);
247 UMA_DESTROY(pf_pooladdr_pl);
248 UMA_DESTROY(pf_frent_pl);
249 UMA_DESTROY(pf_frag_pl);
250 UMA_DESTROY(pf_cache_pl);
251 UMA_DESTROY(pf_cent_pl);
252 UMA_DESTROY(pfr_ktable_pl);
253 UMA_DESTROY(pfr_kentry_pl);
254 UMA_DESTROY(pf_state_scrub_pl);
255 UMA_DESTROY(pfi_addr_pl);
256}
257
258int
259pfattach(void)
260{
261 u_int32_t *my_timeout = pf_default_rule.timeout;
262 int error = 1;
263
264 do {
265 UMA_CREATE(pf_src_tree_pl,struct pf_src_node, "pfsrctrpl");
266 UMA_CREATE(pf_rule_pl, struct pf_rule, "pfrulepl");
267 UMA_CREATE(pf_state_pl, struct pf_state, "pfstatepl");
268 UMA_CREATE(pf_altq_pl, struct pf_altq, "pfaltqpl");
269 UMA_CREATE(pf_pooladdr_pl, struct pf_pooladdr, "pfpooladdrpl");
270 UMA_CREATE(pfr_ktable_pl, struct pfr_ktable, "pfrktable");
271 UMA_CREATE(pfr_kentry_pl, struct pfr_kentry, "pfrkentry");
272 UMA_CREATE(pfr_kentry_pl2, struct pfr_kentry, "pfrkentry2");
273 UMA_CREATE(pf_frent_pl, struct pf_frent, "pffrent");
274 UMA_CREATE(pf_frag_pl, struct pf_fragment, "pffrag");
275 UMA_CREATE(pf_cache_pl, struct pf_fragment, "pffrcache");
276 UMA_CREATE(pf_cent_pl, struct pf_frcache, "pffrcent");
277 UMA_CREATE(pf_state_scrub_pl, struct pf_state_scrub,
278 "pfstatescrub");
279 UMA_CREATE(pfi_addr_pl, struct pfi_dynaddr, "pfiaddrpl");
280 error = 0;
281 } while(0);
282 if (error) {
283 cleanup_pf_zone();
284 return (error);
285 }
286 pfr_initialize();
287 pfi_initialize();
288 if ( (error = pf_osfp_initialize()) ) {
289 cleanup_pf_zone();
290 pf_osfp_cleanup();
291 return (error);
292 }
293
294 pf_pool_limits[PF_LIMIT_STATES].pp = pf_state_pl;
295 pf_pool_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
296 pf_pool_limits[PF_LIMIT_SRC_NODES].pp = pf_src_tree_pl;
297 pf_pool_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
298 pf_pool_limits[PF_LIMIT_FRAGS].pp = pf_frent_pl;
299 pf_pool_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
300 uma_zone_set_max(pf_pool_limits[PF_LIMIT_STATES].pp,
301 pf_pool_limits[PF_LIMIT_STATES].limit);
302
303 RB_INIT(&tree_src_tracking);
304 RB_INIT(&pf_anchors);
305 pf_init_ruleset(&pf_main_ruleset);
306 TAILQ_INIT(&pf_altqs[0]);
307 TAILQ_INIT(&pf_altqs[1]);
308 TAILQ_INIT(&pf_pabuf);
309 pf_altqs_active = &pf_altqs[0];
310 pf_altqs_inactive = &pf_altqs[1];
311 TAILQ_INIT(&state_updates);
312
313 /* default rule should never be garbage collected */
314 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
315 pf_default_rule.action = PF_PASS;
316 pf_default_rule.nr = -1;
317
318 /* initialize default timeouts */
319 my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
320 my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
321 my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
322 my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
323 my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
324 my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
325 my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
326 my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
327 my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
328 my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
329 my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
330 my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
331 my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
332 my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
333 my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
334 my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
335 my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
336 my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
337
338 /*
339 * XXX
340 * The 2nd arg. 0 to callout_init(9) shoule be set to CALLOUT_MPSAFE
341 * if Gaint lock is removed from the network stack.
342 */
343 callout_init(&pf_expire_to, 0);
344 callout_reset(&pf_expire_to, my_timeout[PFTM_INTERVAL] * hz,
345 pf_purge_timeout, &pf_expire_to);
346
347 pf_normalize_init();
348 bzero(&pf_status, sizeof(pf_status));
349 pf_pfil_hooked = 0;
350
351 /* XXX do our best to avoid a conflict */
352 pf_status.hostid = arc4random();
353
354 return (error);
355}
356#else /* !__FreeBSD__ */
357void
358pfattach(int num)
359{
360 u_int32_t *timeout = pf_default_rule.timeout;
361
362 pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl",
363 &pool_allocator_nointr);
364 pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0,
365 "pfsrctrpl", NULL);
366 pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl",
367 NULL);
368 pool_init(&pf_altq_pl, sizeof(struct pf_altq), 0, 0, 0, "pfaltqpl",
369 &pool_allocator_nointr);
370 pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0,
371 "pfpooladdrpl", &pool_allocator_nointr);
372 pfr_initialize();
373 pfi_initialize();
374 pf_osfp_initialize();
375
376 pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp,
377 pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0);
378
379 RB_INIT(&tree_src_tracking);
380 RB_INIT(&pf_anchors);
381 pf_init_ruleset(&pf_main_ruleset);
382 TAILQ_INIT(&pf_altqs[0]);
383 TAILQ_INIT(&pf_altqs[1]);
384 TAILQ_INIT(&pf_pabuf);
385 pf_altqs_active = &pf_altqs[0];
386 pf_altqs_inactive = &pf_altqs[1];
387 TAILQ_INIT(&state_updates);
388
389 /* default rule should never be garbage collected */
390 pf_default_rule.entries.tqe_prev = &pf_default_rule.entries.tqe_next;
391 pf_default_rule.action = PF_PASS;
392 pf_default_rule.nr = -1;
393
394 /* initialize default timeouts */
395 timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
396 timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
397 timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
398 timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
399 timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
400 timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
401 timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
402 timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
403 timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
404 timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
405 timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
406 timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
407 timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
408 timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
409 timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
410 timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
411 timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
412 timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
413
414 timeout_set(&pf_expire_to, pf_purge_timeout, &pf_expire_to);
415 timeout_add(&pf_expire_to, timeout[PFTM_INTERVAL] * hz);
416
417 pf_normalize_init();
418 bzero(&pf_status, sizeof(pf_status));
419 pf_status.debug = PF_DEBUG_URGENT;
420
421 /* XXX do our best to avoid a conflict */
422 pf_status.hostid = arc4random();
423}
424
425int
426pfopen(struct cdev *dev, int flags, int fmt, struct proc *p)
427{
428 if (minor(dev) >= 1)
429 return (ENXIO);
430 return (0);
431}
432
433int
434pfclose(struct cdev *dev, int flags, int fmt, struct proc *p)
435{
436 if (minor(dev) >= 1)
437 return (ENXIO);
438 return (0);
439}
440#endif /* __FreeBSD__ */
441
442struct pf_pool *
443pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action,
444 u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
445 u_int8_t check_ticket)
446{
447 struct pf_ruleset *ruleset;
448 struct pf_rule *rule;
449 int rs_num;
450
451 ruleset = pf_find_ruleset(anchor);
452 if (ruleset == NULL)
453 return (NULL);
454 rs_num = pf_get_ruleset_number(rule_action);
455 if (rs_num >= PF_RULESET_MAX)
456 return (NULL);
457 if (active) {
458 if (check_ticket && ticket !=
459 ruleset->rules[rs_num].active.ticket)
460 return (NULL);
461 if (r_last)
462 rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
463 pf_rulequeue);
464 else
465 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
466 } else {
467 if (check_ticket && ticket !=
468 ruleset->rules[rs_num].inactive.ticket)
469 return (NULL);
470 if (r_last)
471 rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
472 pf_rulequeue);
473 else
474 rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
475 }
476 if (!r_last) {
477 while ((rule != NULL) && (rule->nr != rule_number))
478 rule = TAILQ_NEXT(rule, entries);
479 }
480 if (rule == NULL)
481 return (NULL);
482
483 return (&rule->rpool);
484}
485
486int
487pf_get_ruleset_number(u_int8_t action)
488{
489 switch (action) {
490 case PF_SCRUB:
491 case PF_NOSCRUB:
492 return (PF_RULESET_SCRUB);
493 break;
494 case PF_PASS:
495 case PF_DROP:
496 return (PF_RULESET_FILTER);
497 break;
498 case PF_NAT:
499 case PF_NONAT:
500 return (PF_RULESET_NAT);
501 break;
502 case PF_BINAT:
503 case PF_NOBINAT:
504 return (PF_RULESET_BINAT);
505 break;
506 case PF_RDR:
507 case PF_NORDR:
508 return (PF_RULESET_RDR);
509 break;
510 default:
511 return (PF_RULESET_MAX);
512 break;
513 }
514}
515
516void
517pf_init_ruleset(struct pf_ruleset *ruleset)
518{
519 int i;
520
521 memset(ruleset, 0, sizeof(struct pf_ruleset));
522 for (i = 0; i < PF_RULESET_MAX; i++) {
523 TAILQ_INIT(&ruleset->rules[i].queues[0]);
524 TAILQ_INIT(&ruleset->rules[i].queues[1]);
525 ruleset->rules[i].active.ptr = &ruleset->rules[i].queues[0];
526 ruleset->rules[i].inactive.ptr = &ruleset->rules[i].queues[1];
527 }
528}
529
530struct pf_anchor *
531pf_find_anchor(const char *path)
532{
533 static struct pf_anchor key;
534
535 memset(&key, 0, sizeof(key));
536 strlcpy(key.path, path, sizeof(key.path));
537 return (RB_FIND(pf_anchor_global, &pf_anchors, &key));
538}
539
540struct pf_ruleset *
541pf_find_ruleset(const char *path)
542{
543 struct pf_anchor *anchor;
544
545 while (*path == '/')
546 path++;
547 if (!*path)
548 return (&pf_main_ruleset);
549 anchor = pf_find_anchor(path);
550 if (anchor == NULL)
551 return (NULL);
552 else
553 return (&anchor->ruleset);
554}
555
556struct pf_ruleset *
557pf_find_or_create_ruleset(const char *path)
558{
559 static char p[MAXPATHLEN];
560 char *q = NULL, *r; /* make the compiler happy */
561 struct pf_ruleset *ruleset;
562 struct pf_anchor *anchor = NULL, *dup, *parent = NULL;
563
564 while (*path == '/')
565 path++;
566 ruleset = pf_find_ruleset(path);
567 if (ruleset != NULL)
568 return (ruleset);
569 strlcpy(p, path, sizeof(p));
570#ifdef __FreeBSD__
571 while (parent == NULL && (q = rindex(p, '/')) != NULL) {
572#else
573 while (parent == NULL && (q = strrchr(p, '/')) != NULL) {
574#endif
575 *q = 0;
576 if ((ruleset = pf_find_ruleset(p)) != NULL) {
577 parent = ruleset->anchor;
578 break;
579 }
580 }
581 if (q == NULL)
582 q = p;
583 else
584 q++;
585 strlcpy(p, path, sizeof(p));
586 if (!*q)
587 return (NULL);
588#ifdef __FreeBSD__
589 while ((r = index(q, '/')) != NULL || *q) {
590#else
591 while ((r = strchr(q, '/')) != NULL || *q) {
592#endif
593 if (r != NULL)
594 *r = 0;
595 if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE ||
596 (parent != NULL && strlen(parent->path) >=
597 MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1))
598 return (NULL);
599 anchor = (struct pf_anchor *)malloc(sizeof(*anchor), M_TEMP,
600 M_NOWAIT);
601 if (anchor == NULL)
602 return (NULL);
603 memset(anchor, 0, sizeof(*anchor));
604 RB_INIT(&anchor->children);
605 strlcpy(anchor->name, q, sizeof(anchor->name));
606 if (parent != NULL) {
607 strlcpy(anchor->path, parent->path,
608 sizeof(anchor->path));
609 strlcat(anchor->path, "/", sizeof(anchor->path));
610 }
611 strlcat(anchor->path, anchor->name, sizeof(anchor->path));
612 if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) !=
613 NULL) {
614 printf("pf_find_or_create_ruleset: RB_INSERT1 "
615 "'%s' '%s' collides with '%s' '%s'\n",
616 anchor->path, anchor->name, dup->path, dup->name);
617 free(anchor, M_TEMP);
618 return (NULL);
619 }
620 if (parent != NULL) {
621 anchor->parent = parent;
622 if ((dup = RB_INSERT(pf_anchor_node, &parent->children,
623 anchor)) != NULL) {
624 printf("pf_find_or_create_ruleset: "
625 "RB_INSERT2 '%s' '%s' collides with "
626 "'%s' '%s'\n", anchor->path, anchor->name,
627 dup->path, dup->name);
628 RB_REMOVE(pf_anchor_global, &pf_anchors,
629 anchor);
630 free(anchor, M_TEMP);
631 return (NULL);
632 }
633 }
634 pf_init_ruleset(&anchor->ruleset);
635 anchor->ruleset.anchor = anchor;
636 parent = anchor;
637 if (r != NULL)
638 q = r + 1;
639 else
640 *q = 0;
641 }
642 return (&anchor->ruleset);
643}
644
645void
646pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset)
647{
648 struct pf_anchor *parent;
649 int i;
650
651 while (ruleset != NULL) {
652 if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL ||
653 !RB_EMPTY(&ruleset->anchor->children) ||
654 ruleset->anchor->refcnt > 0 || ruleset->tables > 0 ||
655 ruleset->topen)
656 return;
657 for (i = 0; i < PF_RULESET_MAX; ++i)
658 if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) ||
659 !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) ||
660 ruleset->rules[i].inactive.open)
661 return;
662 RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor);
663 if ((parent = ruleset->anchor->parent) != NULL)
664 RB_REMOVE(pf_anchor_node, &parent->children,
665 ruleset->anchor);
666 free(ruleset->anchor, M_TEMP);
667 if (parent == NULL)
668 return;
669 ruleset = &parent->ruleset;
670 }
671}
672
673int
674pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s,
675 const char *name)
676{
677 static char *p, path[MAXPATHLEN];
678 struct pf_ruleset *ruleset;
679
680 r->anchor = NULL;
681 r->anchor_relative = 0;
682 r->anchor_wildcard = 0;
683 if (!name[0])
684 return (0);
685 if (name[0] == '/')
686 strlcpy(path, name + 1, sizeof(path));
687 else {
688 /* relative path */
689 r->anchor_relative = 1;
690 if (s->anchor == NULL || !s->anchor->path[0])
691 path[0] = 0;
692 else
693 strlcpy(path, s->anchor->path, sizeof(path));
694 while (name[0] == '.' && name[1] == '.' && name[2] == '/') {
695 if (!path[0]) {
696 printf("pf_anchor_setup: .. beyond root\n");
697 return (1);
698 }
699#ifdef __FreeBSD__
700 if ((p = rindex(path, '/')) != NULL)
701#else
702 if ((p = strrchr(path, '/')) != NULL)
703#endif
704 *p = 0;
705 else
706 path[0] = 0;
707 r->anchor_relative++;
708 name += 3;
709 }
710 if (path[0])
711 strlcat(path, "/", sizeof(path));
712 strlcat(path, name, sizeof(path));
713 }
714#ifdef __FreeBSD__
715 if ((p = rindex(path, '/')) != NULL && !strcmp(p, "/*")) {
716#else
717 if ((p = strrchr(path, '/')) != NULL && !strcmp(p, "/*")) {
718#endif
719 r->anchor_wildcard = 1;
720 *p = 0;
721 }
722 ruleset = pf_find_or_create_ruleset(path);
723 if (ruleset == NULL || ruleset->anchor == NULL) {
724 printf("pf_anchor_setup: ruleset\n");
725 return (1);
726 }
727 r->anchor = ruleset->anchor;
728 r->anchor->refcnt++;
729 return (0);
730}
731
732int
733pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r,
734 struct pfioc_rule *pr)
735{
736 pr->anchor_call[0] = 0;
737 if (r->anchor == NULL)
738 return (0);
739 if (!r->anchor_relative) {
740 strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call));
741 strlcat(pr->anchor_call, r->anchor->path,
742 sizeof(pr->anchor_call));
743 } else {
744 char a[MAXPATHLEN], b[MAXPATHLEN], *p;
745 int i;
746
747 if (rs->anchor == NULL)
748 a[0] = 0;
749 else
750 strlcpy(a, rs->anchor->path, sizeof(a));
751 strlcpy(b, r->anchor->path, sizeof(b));
752 for (i = 1; i < r->anchor_relative; ++i) {
753#ifdef __FreeBSD__
754 if ((p = rindex(a, '/')) == NULL)
755#else
756 if ((p = strrchr(a, '/')) == NULL)
757#endif
758 p = a;
759 *p = 0;
760 strlcat(pr->anchor_call, "../",
761 sizeof(pr->anchor_call));
762 }
763 if (strncmp(a, b, strlen(a))) {
764 printf("pf_anchor_copyout: '%s' '%s'\n", a, b);
765 return (1);
766 }
767 if (strlen(b) > strlen(a))
768 strlcat(pr->anchor_call, b + (a[0] ? strlen(a) + 1 : 0),
769 sizeof(pr->anchor_call));
770 }
771 if (r->anchor_wildcard)
772 strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*",
773 sizeof(pr->anchor_call));
774 return (0);
775}
776
777void
778pf_anchor_remove(struct pf_rule *r)
779{
780 if (r->anchor == NULL)
781 return;
782 if (r->anchor->refcnt <= 0) {
783 printf("pf_anchor_remove: broken refcount");
784 r->anchor = NULL;
785 return;
786 }
787 if (!--r->anchor->refcnt)
788 pf_remove_if_empty_ruleset(&r->anchor->ruleset);
789 r->anchor = NULL;
790}
791
792void
793pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb)
794{
795 struct pf_pooladdr *mv_pool_pa;
796
797 while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
798 TAILQ_REMOVE(poola, mv_pool_pa, entries);
799 TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
800 }
801}
802
803void
804pf_empty_pool(struct pf_palist *poola)
805{
806 struct pf_pooladdr *empty_pool_pa;
807
808 while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) {
809 pfi_dynaddr_remove(&empty_pool_pa->addr);
810 pf_tbladdr_remove(&empty_pool_pa->addr);
811 pfi_detach_rule(empty_pool_pa->kif);
812 TAILQ_REMOVE(poola, empty_pool_pa, entries);
813 pool_put(&pf_pooladdr_pl, empty_pool_pa);
814 }
815}
816
817void
818pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule)
819{
820 if (rulequeue != NULL) {
821 if (rule->states <= 0) {
822 /*
823 * XXX - we need to remove the table *before* detaching
824 * the rule to make sure the table code does not delete
825 * the anchor under our feet.
826 */
827 pf_tbladdr_remove(&rule->src.addr);
828 pf_tbladdr_remove(&rule->dst.addr);
829 if (rule->overload_tbl)
830 pfr_detach_table(rule->overload_tbl);
831 }
832 TAILQ_REMOVE(rulequeue, rule, entries);
833 rule->entries.tqe_prev = NULL;
834 rule->nr = -1;
835 }
836
837 if (rule->states > 0 || rule->src_nodes > 0 ||
838 rule->entries.tqe_prev != NULL)
839 return;
840 pf_tag_unref(rule->tag);
841 pf_tag_unref(rule->match_tag);
842#ifdef ALTQ
843 if (rule->pqid != rule->qid)
844 pf_qid_unref(rule->pqid);
845 pf_qid_unref(rule->qid);
846#endif
847 pf_rtlabel_remove(&rule->src.addr);
848 pf_rtlabel_remove(&rule->dst.addr);
849 pfi_dynaddr_remove(&rule->src.addr);
850 pfi_dynaddr_remove(&rule->dst.addr);
851 if (rulequeue == NULL) {
852 pf_tbladdr_remove(&rule->src.addr);
853 pf_tbladdr_remove(&rule->dst.addr);
854 if (rule->overload_tbl)
855 pfr_detach_table(rule->overload_tbl);
856 }
857 pfi_detach_rule(rule->kif);
858 pf_anchor_remove(rule);
859 pf_empty_pool(&rule->rpool.list);
860 pool_put(&pf_rule_pl, rule);
861}
862
863static u_int16_t
864tagname2tag(struct pf_tags *head, char *tagname)
865{
866 struct pf_tagname *tag, *p = NULL;
867 u_int16_t new_tagid = 1;
868
869 TAILQ_FOREACH(tag, head, entries)
870 if (strcmp(tagname, tag->name) == 0) {
871 tag->ref++;
872 return (tag->tag);
873 }
874
875 /*
876 * to avoid fragmentation, we do a linear search from the beginning
877 * and take the first free slot we find. if there is none or the list
878 * is empty, append a new entry at the end.
879 */
880
881 /* new entry */
882 if (!TAILQ_EMPTY(head))
883 for (p = TAILQ_FIRST(head); p != NULL &&
884 p->tag == new_tagid; p = TAILQ_NEXT(p, entries))
885 new_tagid = p->tag + 1;
886
887 if (new_tagid > TAGID_MAX)
888 return (0);
889
890 /* allocate and fill new struct pf_tagname */
891 tag = (struct pf_tagname *)malloc(sizeof(struct pf_tagname),
892 M_TEMP, M_NOWAIT);
893 if (tag == NULL)
894 return (0);
895 bzero(tag, sizeof(struct pf_tagname));
896 strlcpy(tag->name, tagname, sizeof(tag->name));
897 tag->tag = new_tagid;
898 tag->ref++;
899
900 if (p != NULL) /* insert new entry before p */
901 TAILQ_INSERT_BEFORE(p, tag, entries);
902 else /* either list empty or no free slot in between */
903 TAILQ_INSERT_TAIL(head, tag, entries);
904
905 return (tag->tag);
906}
907
908static void
909tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p)
910{
911 struct pf_tagname *tag;
912
913 TAILQ_FOREACH(tag, head, entries)
914 if (tag->tag == tagid) {
915 strlcpy(p, tag->name, PF_TAG_NAME_SIZE);
916 return;
917 }
918}
919
920static void
921tag_unref(struct pf_tags *head, u_int16_t tag)
922{
923 struct pf_tagname *p, *next;
924
925 if (tag == 0)
926 return;
927
928 for (p = TAILQ_FIRST(head); p != NULL; p = next) {
929 next = TAILQ_NEXT(p, entries);
930 if (tag == p->tag) {
931 if (--p->ref == 0) {
932 TAILQ_REMOVE(head, p, entries);
933 free(p, M_TEMP);
934 }
935 break;
936 }
937 }
938}
939
940u_int16_t
941pf_tagname2tag(char *tagname)
942{
943 return (tagname2tag(&pf_tags, tagname));
944}
945
946void
947pf_tag2tagname(u_int16_t tagid, char *p)
948{
949 return (tag2tagname(&pf_tags, tagid, p));
950}
951
952void
953pf_tag_ref(u_int16_t tag)
954{
955 struct pf_tagname *t;
956
957 TAILQ_FOREACH(t, &pf_tags, entries)
958 if (t->tag == tag)
959 break;
960 if (t != NULL)
961 t->ref++;
962}
963
964void
965pf_tag_unref(u_int16_t tag)
966{
967 return (tag_unref(&pf_tags, tag));
968}
969
970int
971pf_rtlabel_add(struct pf_addr_wrap *a)
972{
973#ifdef __FreeBSD__
974 /* XXX_IMPORT: later */
975 return (0);
976#else
977 if (a->type == PF_ADDR_RTLABEL &&
978 (a->v.rtlabel = rtlabel_name2id(a->v.rtlabelname)) == 0)
979 return (-1);
980 return (0);
981#endif
982}
983
984void
985pf_rtlabel_remove(struct pf_addr_wrap *a)
986{
987#ifdef __FreeBSD__
988 /* XXX_IMPORT: later */
989#else
990 if (a->type == PF_ADDR_RTLABEL)
991 rtlabel_unref(a->v.rtlabel);
992#endif
993}
994
995void
996pf_rtlabel_copyout(struct pf_addr_wrap *a)
997{
998#ifdef __FreeBSD__
999 /* XXX_IMPORT: later */
1000 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel)
1001 strlcpy(a->v.rtlabelname, "?", sizeof(a->v.rtlabelname));
1002#else
1003 const char *name;
1004
1005 if (a->type == PF_ADDR_RTLABEL && a->v.rtlabel) {
1006 if ((name = rtlabel_id2name(a->v.rtlabel)) == NULL)
1007 strlcpy(a->v.rtlabelname, "?",
1008 sizeof(a->v.rtlabelname));
1009 else
1010 strlcpy(a->v.rtlabelname, name,
1011 sizeof(a->v.rtlabelname));
1012 }
1013#endif
1014}
1015
1016#ifdef ALTQ
1017u_int32_t
1018pf_qname2qid(char *qname)
1019{
1020 return ((u_int32_t)tagname2tag(&pf_qids, qname));
1021}
1022
1023void
1024pf_qid2qname(u_int32_t qid, char *p)
1025{
1026 return (tag2tagname(&pf_qids, (u_int16_t)qid, p));
1027}
1028
1029void
1030pf_qid_unref(u_int32_t qid)
1031{
1032 return (tag_unref(&pf_qids, (u_int16_t)qid));
1033}
1034
1035int
1036pf_begin_altq(u_int32_t *ticket)
1037{
1038 struct pf_altq *altq;
1039 int error = 0;
1040
1041 /* Purge the old altq list */
1042 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
1043 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
1044 if (altq->qname[0] == 0) {
1045 /* detach and destroy the discipline */
1046 error = altq_remove(altq);
1047 } else
1048 pf_qid_unref(altq->qid);
1049 pool_put(&pf_altq_pl, altq);
1050 }
1051 if (error)
1052 return (error);
1053 *ticket = ++ticket_altqs_inactive;
1054 altqs_inactive_open = 1;
1055 return (0);
1056}
1057
1058int
1059pf_rollback_altq(u_int32_t ticket)
1060{
1061 struct pf_altq *altq;
1062 int error = 0;
1063
1064 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
1065 return (0);
1066 /* Purge the old altq list */
1067 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
1068 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
1069 if (altq->qname[0] == 0) {
1070 /* detach and destroy the discipline */
1071 error = altq_remove(altq);
1072 } else
1073 pf_qid_unref(altq->qid);
1074 pool_put(&pf_altq_pl, altq);
1075 }
1076 altqs_inactive_open = 0;
1077 return (error);
1078}
1079
1080int
1081pf_commit_altq(u_int32_t ticket)
1082{
1083 struct pf_altqqueue *old_altqs;
1084 struct pf_altq *altq;
1085 int s, err, error = 0;
1086
1087 if (!altqs_inactive_open || ticket != ticket_altqs_inactive)
1088 return (EBUSY);
1089
1090 /* swap altqs, keep the old. */
1091 s = splsoftnet();
1092 old_altqs = pf_altqs_active;
1093 pf_altqs_active = pf_altqs_inactive;
1094 pf_altqs_inactive = old_altqs;
1095 ticket_altqs_active = ticket_altqs_inactive;
1096
1097 /* Attach new disciplines */
1098 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
1099 if (altq->qname[0] == 0) {
1100 /* attach the discipline */
1101 error = altq_pfattach(altq);
1102 if (error == 0 && pf_altq_running)
1103 error = pf_enable_altq(altq);
1104 if (error != 0) {
1105 splx(s);
1106 return (error);
1107 }
1108 }
1109 }
1110
1111 /* Purge the old altq list */
1112 while ((altq = TAILQ_FIRST(pf_altqs_inactive)) != NULL) {
1113 TAILQ_REMOVE(pf_altqs_inactive, altq, entries);
1114 if (altq->qname[0] == 0) {
1115 /* detach and destroy the discipline */
1116 if (pf_altq_running)
1117 error = pf_disable_altq(altq);
1118 err = altq_pfdetach(altq);
1119 if (err != 0 && error == 0)
1120 error = err;
1121 err = altq_remove(altq);
1122 if (err != 0 && error == 0)
1123 error = err;
1124 } else
1125 pf_qid_unref(altq->qid);
1126 pool_put(&pf_altq_pl, altq);
1127 }
1128 splx(s);
1129
1130 altqs_inactive_open = 0;
1131 return (error);
1132}
1133
1134int
1135pf_enable_altq(struct pf_altq *altq)
1136{
1137 struct ifnet *ifp;
1138 struct tb_profile tb;
1139 int s, error = 0;
1140
1141 if ((ifp = ifunit(altq->ifname)) == NULL)
1142 return (EINVAL);
1143
1144 if (ifp->if_snd.altq_type != ALTQT_NONE)
1145 error = altq_enable(&ifp->if_snd);
1146
1147 /* set tokenbucket regulator */
1148 if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1149 tb.rate = altq->ifbandwidth;
1150 tb.depth = altq->tbrsize;
1151 s = splimp();
1152#ifdef __FreeBSD__
1153 PF_UNLOCK();
1154#endif
1155 error = tbr_set(&ifp->if_snd, &tb);
1156#ifdef __FreeBSD__
1157 PF_LOCK();
1158#endif
1159 splx(s);
1160 }
1161
1162 return (error);
1163}
1164
1165int
1166pf_disable_altq(struct pf_altq *altq)
1167{
1168 struct ifnet *ifp;
1169 struct tb_profile tb;
1170 int s, error;
1171
1172 if ((ifp = ifunit(altq->ifname)) == NULL)
1173 return (EINVAL);
1174
1175 /*
1176 * when the discipline is no longer referenced, it was overridden
1177 * by a new one. if so, just return.
1178 */
1179 if (altq->altq_disc != ifp->if_snd.altq_disc)
1180 return (0);
1181
1182 error = altq_disable(&ifp->if_snd);
1183
1184 if (error == 0) {
1185 /* clear tokenbucket regulator */
1186 tb.rate = 0;
1187 s = splimp();
1188#ifdef __FreeBSD__
1189 PF_UNLOCK();
1190#endif
1191 error = tbr_set(&ifp->if_snd, &tb);
1192#ifdef __FreeBSD__
1193 PF_LOCK();
1194#endif
1195 splx(s);
1196 }
1197
1198 return (error);
1199}
1200#endif /* ALTQ */
1201
1202int
1203pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1204{
1205 struct pf_ruleset *rs;
1206 struct pf_rule *rule;
1207
1208 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1209 return (EINVAL);
1210 rs = pf_find_or_create_ruleset(anchor);
1211 if (rs == NULL)
1212 return (EINVAL);
1213 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1214 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1215 *ticket = ++rs->rules[rs_num].inactive.ticket;
1216 rs->rules[rs_num].inactive.open = 1;
1217 return (0);
1218}
1219
1220int
1221pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1222{
1223 struct pf_ruleset *rs;
1224 struct pf_rule *rule;
1225
1226 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1227 return (EINVAL);
1228 rs = pf_find_ruleset(anchor);
1229 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1230 rs->rules[rs_num].inactive.ticket != ticket)
1231 return (0);
1232 while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL)
1233 pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule);
1234 rs->rules[rs_num].inactive.open = 0;
1235 return (0);
1236}
1237
1238int
1239pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1240{
1241 struct pf_ruleset *rs;
1242 struct pf_rule *rule;
1243 struct pf_rulequeue *old_rules;
1244 int s;
1245
1246 if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1247 return (EINVAL);
1248 rs = pf_find_ruleset(anchor);
1249 if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1250 ticket != rs->rules[rs_num].inactive.ticket)
1251 return (EBUSY);
1252
1253 /* Swap rules, keep the old. */
1254 s = splsoftnet();
1255 old_rules = rs->rules[rs_num].active.ptr;
1256 rs->rules[rs_num].active.ptr =
1257 rs->rules[rs_num].inactive.ptr;
1258 rs->rules[rs_num].inactive.ptr = old_rules;
1259 rs->rules[rs_num].active.ticket =
1260 rs->rules[rs_num].inactive.ticket;
1261 pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1262
1263 /* Purge the old rule list. */
1264 while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1265 pf_rm_rule(old_rules, rule);
1266 rs->rules[rs_num].inactive.open = 0;
1267 pf_remove_if_empty_ruleset(rs);
1268 splx(s);
1269 return (0);
1270}
1271
1272#ifdef __FreeBSD__
1273int
1274pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
1275#else
1276int
1277pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct proc *p)
1278#endif
1279{
1280 struct pf_pooladdr *pa = NULL;
1281 struct pf_pool *pool = NULL;
1282#ifndef __FreeBSD__
1283 int s;
1284#endif
1285 int error = 0;
1286
1287 /* XXX keep in sync with switch() below */
1288#ifdef __FreeBSD__
1289 if (securelevel_gt(td->td_ucred, 2))
1290#else
1291 if (securelevel > 1)
1292#endif
1293 switch (cmd) {
1294 case DIOCGETRULES:
1295 case DIOCGETRULE:
1296 case DIOCGETADDRS:
1297 case DIOCGETADDR:
1298 case DIOCGETSTATE:
1299 case DIOCSETSTATUSIF:
1300 case DIOCGETSTATUS:
1301 case DIOCCLRSTATUS:
1302 case DIOCNATLOOK:
1303 case DIOCSETDEBUG:
1304 case DIOCGETSTATES:
1305 case DIOCGETTIMEOUT:
1306 case DIOCCLRRULECTRS:
1307 case DIOCGETLIMIT:
1308 case DIOCGETALTQS:
1309 case DIOCGETALTQ:
1310 case DIOCGETQSTATS:
1311 case DIOCGETRULESETS:
1312 case DIOCGETRULESET:
1313 case DIOCRGETTABLES:
1314 case DIOCRGETTSTATS:
1315 case DIOCRCLRTSTATS:
1316 case DIOCRCLRADDRS:
1317 case DIOCRADDADDRS:
1318 case DIOCRDELADDRS:
1319 case DIOCRSETADDRS:
1320 case DIOCRGETADDRS:
1321 case DIOCRGETASTATS:
1322 case DIOCRCLRASTATS:
1323 case DIOCRTSTADDRS:
1324 case DIOCOSFPGET:
1325 case DIOCGETSRCNODES:
1326 case DIOCCLRSRCNODES:
1327 case DIOCIGETIFACES:
1328 case DIOCICLRISTATS:
1329#ifdef __FreeBSD__
1330 case DIOCGIFSPEED:
1331#endif
1332 case DIOCSETIFFLAG:
1333 case DIOCCLRIFFLAG:
1334 break;
1335 case DIOCRCLRTABLES:
1336 case DIOCRADDTABLES:
1337 case DIOCRDELTABLES:
1338 case DIOCRSETTFLAGS:
1339 if (((struct pfioc_table *)addr)->pfrio_flags &
1340 PFR_FLAG_DUMMY)
1341 break; /* dummy operation ok */
1342 return (EPERM);
1343 default:
1344 return (EPERM);
1345 }
1346
1347 if (!(flags & FWRITE))
1348 switch (cmd) {
1349 case DIOCGETRULES:
1350 case DIOCGETRULE:
1351 case DIOCGETADDRS:
1352 case DIOCGETADDR:
1353 case DIOCGETSTATE:
1354 case DIOCGETSTATUS:
1355 case DIOCGETSTATES:
1356 case DIOCGETTIMEOUT:
1357 case DIOCGETLIMIT:
1358 case DIOCGETALTQS:
1359 case DIOCGETALTQ:
1360 case DIOCGETQSTATS:
1361 case DIOCGETRULESETS:
1362 case DIOCGETRULESET:
1363 case DIOCRGETTABLES:
1364 case DIOCRGETTSTATS:
1365 case DIOCRGETADDRS:
1366 case DIOCRGETASTATS:
1367 case DIOCRTSTADDRS:
1368 case DIOCOSFPGET:
1369 case DIOCGETSRCNODES:
1370 case DIOCIGETIFACES:
1371#ifdef __FreeBSD__
1372 case DIOCGIFSPEED:
1373#endif
1374 break;
1375 case DIOCRCLRTABLES:
1376 case DIOCRADDTABLES:
1377 case DIOCRDELTABLES:
1378 case DIOCRCLRTSTATS:
1379 case DIOCRCLRADDRS:
1380 case DIOCRADDADDRS:
1381 case DIOCRDELADDRS:
1382 case DIOCRSETADDRS:
1383 case DIOCRSETTFLAGS:
1384 if (((struct pfioc_table *)addr)->pfrio_flags &
1385 PFR_FLAG_DUMMY)
1386 break; /* dummy operation ok */
1387 return (EACCES);
1388 default:
1389 return (EACCES);
1390 }
1391
1392#ifdef __FreeBSD__
1393 PF_LOCK();
1394#else
1395 s = splsoftnet();
1396#endif
1397 switch (cmd) {
1398
1399 case DIOCSTART:
1400 if (pf_status.running)
1401 error = EEXIST;
1402 else {
1403#ifdef __FreeBSD__
1404 PF_UNLOCK();
1405 error = hook_pf();
1406 PF_LOCK();
1407 if (error) {
1408 DPFPRINTF(PF_DEBUG_MISC,
1409 ("pf: pfil registeration fail\n"));
1410 break;
1411 }
1412#endif
1413 pf_status.running = 1;
1414 pf_status.since = time_second;
1415 if (pf_status.stateid == 0) {
1416 pf_status.stateid = time_second;
1417 pf_status.stateid = pf_status.stateid << 32;
1418 }
1419 DPFPRINTF(PF_DEBUG_MISC, ("pf: started\n"));
1420 }
1421 break;
1422
1423 case DIOCSTOP:
1424 if (!pf_status.running)
1425 error = ENOENT;
1426 else {
1427 pf_status.running = 0;
1428#ifdef __FreeBSD__
1429 PF_UNLOCK();
1430 error = dehook_pf();
1431 PF_LOCK();
1432 if (error) {
1433 pf_status.running = 1;
1434 DPFPRINTF(PF_DEBUG_MISC,
1435 ("pf: pfil unregisteration failed\n"));
1436 }
1437#endif
1438 pf_status.since = time_second;
1439 DPFPRINTF(PF_DEBUG_MISC, ("pf: stopped\n"));
1440 }
1441 break;
1442
1443 case DIOCADDRULE: {
1444 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1445 struct pf_ruleset *ruleset;
1446 struct pf_rule *rule, *tail;
1447 struct pf_pooladdr *pa;
1448 int rs_num;
1449
1450 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1451 ruleset = pf_find_ruleset(pr->anchor);
1452 if (ruleset == NULL) {
1453 error = EINVAL;
1454 break;
1455 }
1456 rs_num = pf_get_ruleset_number(pr->rule.action);
1457 if (rs_num >= PF_RULESET_MAX) {
1458 error = EINVAL;
1459 break;
1460 }
1461 if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1462 error = EINVAL;
1463 break;
1464 }
1465 if (pr->ticket != ruleset->rules[rs_num].inactive.ticket) {
1466 printf("ticket: %d != [%d]%d\n", pr->ticket,
1467 rs_num, ruleset->rules[rs_num].inactive.ticket);
1468 error = EBUSY;
1469 break;
1470 }
1471 if (pr->pool_ticket != ticket_pabuf) {
1472 printf("pool_ticket: %d != %d\n", pr->pool_ticket,
1473 ticket_pabuf);
1474 error = EBUSY;
1475 break;
1476 }
1477 rule = pool_get(&pf_rule_pl, PR_NOWAIT);
1478 if (rule == NULL) {
1479 error = ENOMEM;
1480 break;
1481 }
1482 bcopy(&pr->rule, rule, sizeof(struct pf_rule));
1483 rule->anchor = NULL;
1484 rule->kif = NULL;
1485 TAILQ_INIT(&rule->rpool.list);
1486 /* initialize refcounting */
1487 rule->states = 0;
1488 rule->src_nodes = 0;
1489 rule->entries.tqe_prev = NULL;
1490#ifndef INET
1491 if (rule->af == AF_INET) {
1492 pool_put(&pf_rule_pl, rule);
1493 error = EAFNOSUPPORT;
1494 break;
1495 }
1496#endif /* INET */
1497#ifndef INET6
1498 if (rule->af == AF_INET6) {
1499 pool_put(&pf_rule_pl, rule);
1500 error = EAFNOSUPPORT;
1501 break;
1502 }
1503#endif /* INET6 */
1504 tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
1505 pf_rulequeue);
1506 if (tail)
1507 rule->nr = tail->nr + 1;
1508 else
1509 rule->nr = 0;
1510 if (rule->ifname[0]) {
1511 rule->kif = pfi_attach_rule(rule->ifname);
1512 if (rule->kif == NULL) {
1513 pool_put(&pf_rule_pl, rule);
1514 error = EINVAL;
1515 break;
1516 }
1517 }
1518
1519#ifdef ALTQ
1520 /* set queue IDs */
1521 if (rule->qname[0] != 0) {
1522 if ((rule->qid = pf_qname2qid(rule->qname)) == 0)
1523 error = EBUSY;
1524 else if (rule->pqname[0] != 0) {
1525 if ((rule->pqid =
1526 pf_qname2qid(rule->pqname)) == 0)
1527 error = EBUSY;
1528 } else
1529 rule->pqid = rule->qid;
1530 }
1531#endif
1532 if (rule->tagname[0])
1533 if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
1534 error = EBUSY;
1535 if (rule->match_tagname[0])
1536 if ((rule->match_tag =
1537 pf_tagname2tag(rule->match_tagname)) == 0)
1538 error = EBUSY;
1539 if (rule->rt && !rule->direction)
1540 error = EINVAL;
1541 if (pf_rtlabel_add(&rule->src.addr) ||
1542 pf_rtlabel_add(&rule->dst.addr))
1543 error = EBUSY;
1544 if (pfi_dynaddr_setup(&rule->src.addr, rule->af))
1545 error = EINVAL;
1546 if (pfi_dynaddr_setup(&rule->dst.addr, rule->af))
1547 error = EINVAL;
1548 if (pf_tbladdr_setup(ruleset, &rule->src.addr))
1549 error = EINVAL;
1550 if (pf_tbladdr_setup(ruleset, &rule->dst.addr))
1551 error = EINVAL;
1552 if (pf_anchor_setup(rule, ruleset, pr->anchor_call))
1553 error = EINVAL;
1554 TAILQ_FOREACH(pa, &pf_pabuf, entries)
1555 if (pf_tbladdr_setup(ruleset, &pa->addr))
1556 error = EINVAL;
1557
1558 if (rule->overload_tblname[0]) {
1559 if ((rule->overload_tbl = pfr_attach_table(ruleset,
1560 rule->overload_tblname)) == NULL)
1561 error = EINVAL;
1562 else
1563 rule->overload_tbl->pfrkt_flags |=
1564 PFR_TFLAG_ACTIVE;
1565 }
1566
1567 pf_mv_pool(&pf_pabuf, &rule->rpool.list);
1568 if (((((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
1569 (rule->action == PF_BINAT)) && rule->anchor == NULL) ||
1570 (rule->rt > PF_FASTROUTE)) &&
1571 (TAILQ_FIRST(&rule->rpool.list) == NULL))
1572 error = EINVAL;
1573
1574 if (error) {
1575 pf_rm_rule(NULL, rule);
1576 break;
1577 }
1578 rule->rpool.cur = TAILQ_FIRST(&rule->rpool.list);
1579 rule->evaluations = rule->packets = rule->bytes = 0;
1580 TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
1581 rule, entries);
1582 break;
1583 }
1584
1585 case DIOCGETRULES: {
1586 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1587 struct pf_ruleset *ruleset;
1588 struct pf_rule *tail;
1589 int rs_num;
1590
1591 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1592 ruleset = pf_find_ruleset(pr->anchor);
1593 if (ruleset == NULL) {
1594 error = EINVAL;
1595 break;
1596 }
1597 rs_num = pf_get_ruleset_number(pr->rule.action);
1598 if (rs_num >= PF_RULESET_MAX) {
1599 error = EINVAL;
1600 break;
1601 }
1602 tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
1603 pf_rulequeue);
1604 if (tail)
1605 pr->nr = tail->nr + 1;
1606 else
1607 pr->nr = 0;
1608 pr->ticket = ruleset->rules[rs_num].active.ticket;
1609 break;
1610 }
1611
1612 case DIOCGETRULE: {
1613 struct pfioc_rule *pr = (struct pfioc_rule *)addr;
1614 struct pf_ruleset *ruleset;
1615 struct pf_rule *rule;
1616 int rs_num, i;
1617
1618 pr->anchor[sizeof(pr->anchor) - 1] = 0;
1619 ruleset = pf_find_ruleset(pr->anchor);
1620 if (ruleset == NULL) {
1621 error = EINVAL;
1622 break;
1623 }
1624 rs_num = pf_get_ruleset_number(pr->rule.action);
1625 if (rs_num >= PF_RULESET_MAX) {
1626 error = EINVAL;
1627 break;
1628 }
1629 if (pr->ticket != ruleset->rules[rs_num].active.ticket) {
1630 error = EBUSY;
1631 break;
1632 }
1633 rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
1634 while ((rule != NULL) && (rule->nr != pr->nr))
1635 rule = TAILQ_NEXT(rule, entries);
1636 if (rule == NULL) {
1637 error = EBUSY;
1638 break;
1639 }
1640 bcopy(rule, &pr->rule, sizeof(struct pf_rule));
1641 if (pf_anchor_copyout(ruleset, rule, pr)) {
1642 error = EBUSY;
1643 break;
1644 }
1645 pfi_dynaddr_copyout(&pr->rule.src.addr);
1646 pfi_dynaddr_copyout(&pr->rule.dst.addr);
1647 pf_tbladdr_copyout(&pr->rule.src.addr);
1648 pf_tbladdr_copyout(&pr->rule.dst.addr);
1649 pf_rtlabel_copyout(&pr->rule.src.addr);
1650 pf_rtlabel_copyout(&pr->rule.dst.addr);
1651 for (i = 0; i < PF_SKIP_COUNT; ++i)
1652 if (rule->skip[i].ptr == NULL)
1653 pr->rule.skip[i].nr = -1;
1654 else
1655 pr->rule.skip[i].nr =
1656 rule->skip[i].ptr->nr;
1657 break;
1658 }
1659
1660 case DIOCCHANGERULE: {
1661 struct pfioc_rule *pcr = (struct pfioc_rule *)addr;
1662 struct pf_ruleset *ruleset;
1663 struct pf_rule *oldrule = NULL, *newrule = NULL;
1664 u_int32_t nr = 0;
1665 int rs_num;
1666
1667 if (!(pcr->action == PF_CHANGE_REMOVE ||
1668 pcr->action == PF_CHANGE_GET_TICKET) &&
1669 pcr->pool_ticket != ticket_pabuf) {
1670 error = EBUSY;
1671 break;
1672 }
1673
1674 if (pcr->action < PF_CHANGE_ADD_HEAD ||
1675 pcr->action > PF_CHANGE_GET_TICKET) {
1676 error = EINVAL;
1677 break;
1678 }
1679 ruleset = pf_find_ruleset(pcr->anchor);
1680 if (ruleset == NULL) {
1681 error = EINVAL;
1682 break;
1683 }
1684 rs_num = pf_get_ruleset_number(pcr->rule.action);
1685 if (rs_num >= PF_RULESET_MAX) {
1686 error = EINVAL;
1687 break;
1688 }
1689
1690 if (pcr->action == PF_CHANGE_GET_TICKET) {
1691 pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
1692 break;
1693 } else {
1694 if (pcr->ticket !=
1695 ruleset->rules[rs_num].active.ticket) {
1696 error = EINVAL;
1697 break;
1698 }
1699 if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
1700 error = EINVAL;
1701 break;
1702 }
1703 }
1704
1705 if (pcr->action != PF_CHANGE_REMOVE) {
1706 newrule = pool_get(&pf_rule_pl, PR_NOWAIT);
1707 if (newrule == NULL) {
1708 error = ENOMEM;
1709 break;
1710 }
1711 bcopy(&pcr->rule, newrule, sizeof(struct pf_rule));
1712 TAILQ_INIT(&newrule->rpool.list);
1713 /* initialize refcounting */
1714 newrule->states = 0;
1715 newrule->entries.tqe_prev = NULL;
1716#ifndef INET
1717 if (newrule->af == AF_INET) {
1718 pool_put(&pf_rule_pl, newrule);
1719 error = EAFNOSUPPORT;
1720 break;
1721 }
1722#endif /* INET */
1723#ifndef INET6
1724 if (newrule->af == AF_INET6) {
1725 pool_put(&pf_rule_pl, newrule);
1726 error = EAFNOSUPPORT;
1727 break;
1728 }
1729#endif /* INET6 */
1730 if (newrule->ifname[0]) {
1731 newrule->kif = pfi_attach_rule(newrule->ifname);
1732 if (newrule->kif == NULL) {
1733 pool_put(&pf_rule_pl, newrule);
1734 error = EINVAL;
1735 break;
1736 }
1737 } else
1738 newrule->kif = NULL;
1739
1740#ifdef ALTQ
1741 /* set queue IDs */
1742 if (newrule->qname[0] != 0) {
1743 if ((newrule->qid =
1744 pf_qname2qid(newrule->qname)) == 0)
1745 error = EBUSY;
1746 else if (newrule->pqname[0] != 0) {
1747 if ((newrule->pqid =
1748 pf_qname2qid(newrule->pqname)) == 0)
1749 error = EBUSY;
1750 } else
1751 newrule->pqid = newrule->qid;
1752 }
1753#endif /* ALTQ */
1754 if (newrule->tagname[0])
1755 if ((newrule->tag =
1756 pf_tagname2tag(newrule->tagname)) == 0)
1757 error = EBUSY;
1758 if (newrule->match_tagname[0])
1759 if ((newrule->match_tag = pf_tagname2tag(
1760 newrule->match_tagname)) == 0)
1761 error = EBUSY;
1762 if (newrule->rt && !newrule->direction)
1763 error = EINVAL;
1764 if (pf_rtlabel_add(&newrule->src.addr) ||
1765 pf_rtlabel_add(&newrule->dst.addr))
1766 error = EBUSY;
1767 if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af))
1768 error = EINVAL;
1769 if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af))
1770 error = EINVAL;
1771 if (pf_tbladdr_setup(ruleset, &newrule->src.addr))
1772 error = EINVAL;
1773 if (pf_tbladdr_setup(ruleset, &newrule->dst.addr))
1774 error = EINVAL;
1775 if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call))
1776 error = EINVAL;
1777
1778 if (newrule->overload_tblname[0]) {
1779 if ((newrule->overload_tbl = pfr_attach_table(
1780 ruleset, newrule->overload_tblname)) ==
1781 NULL)
1782 error = EINVAL;
1783 else
1784 newrule->overload_tbl->pfrkt_flags |=
1785 PFR_TFLAG_ACTIVE;
1786 }
1787
1788 pf_mv_pool(&pf_pabuf, &newrule->rpool.list);
1789 if (((((newrule->action == PF_NAT) ||
1790 (newrule->action == PF_RDR) ||
1791 (newrule->action == PF_BINAT) ||
1792 (newrule->rt > PF_FASTROUTE)) &&
1793 !pcr->anchor[0])) &&
1794 (TAILQ_FIRST(&newrule->rpool.list) == NULL))
1795 error = EINVAL;
1796
1797 if (error) {
1798 pf_rm_rule(NULL, newrule);
1799 break;
1800 }
1801 newrule->rpool.cur = TAILQ_FIRST(&newrule->rpool.list);
1802 newrule->evaluations = newrule->packets = 0;
1803 newrule->bytes = 0;
1804 }
1805 pf_empty_pool(&pf_pabuf);
1806
1807 if (pcr->action == PF_CHANGE_ADD_HEAD)
1808 oldrule = TAILQ_FIRST(
1809 ruleset->rules[rs_num].active.ptr);
1810 else if (pcr->action == PF_CHANGE_ADD_TAIL)
1811 oldrule = TAILQ_LAST(
1812 ruleset->rules[rs_num].active.ptr, pf_rulequeue);
1813 else {
1814 oldrule = TAILQ_FIRST(
1815 ruleset->rules[rs_num].active.ptr);
1816 while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
1817 oldrule = TAILQ_NEXT(oldrule, entries);
1818 if (oldrule == NULL) {
1819 if (newrule != NULL)
1820 pf_rm_rule(NULL, newrule);
1821 error = EINVAL;
1822 break;
1823 }
1824 }
1825
1826 if (pcr->action == PF_CHANGE_REMOVE)
1827 pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule);
1828 else {
1829 if (oldrule == NULL)
1830 TAILQ_INSERT_TAIL(
1831 ruleset->rules[rs_num].active.ptr,
1832 newrule, entries);
1833 else if (pcr->action == PF_CHANGE_ADD_HEAD ||
1834 pcr->action == PF_CHANGE_ADD_BEFORE)
1835 TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
1836 else
1837 TAILQ_INSERT_AFTER(
1838 ruleset->rules[rs_num].active.ptr,
1839 oldrule, newrule, entries);
1840 }
1841
1842 nr = 0;
1843 TAILQ_FOREACH(oldrule,
1844 ruleset->rules[rs_num].active.ptr, entries)
1845 oldrule->nr = nr++;
1846
1847 ruleset->rules[rs_num].active.ticket++;
1848
1849 pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
1850 pf_remove_if_empty_ruleset(ruleset);
1851
1852 break;
1853 }
1854
1855 case DIOCCLRSTATES: {
1856 struct pf_state *state;
1857 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1858 int killed = 0;
1859
1860 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1861 if (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1862 state->u.s.kif->pfik_name)) {
1863 state->timeout = PFTM_PURGE;
1864#if NPFSYNC
1865 /* don't send out individual delete messages */
1866 state->sync_flags = PFSTATE_NOSYNC;
1867#endif
1868 killed++;
1869 }
1870 }
1871 pf_purge_expired_states();
1872 pf_status.states = 0;
1873 psk->psk_af = killed;
1874#if NPFSYNC
1875 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
1876#endif
1877 break;
1878 }
1879
1880 case DIOCKILLSTATES: {
1881 struct pf_state *state;
1882 struct pfioc_state_kill *psk = (struct pfioc_state_kill *)addr;
1883 int killed = 0;
1884
1885 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1886 if ((!psk->psk_af || state->af == psk->psk_af)
1887 && (!psk->psk_proto || psk->psk_proto ==
1888 state->proto) &&
1889 PF_MATCHA(psk->psk_src.neg,
1890 &psk->psk_src.addr.v.a.addr,
1891 &psk->psk_src.addr.v.a.mask,
1892 &state->lan.addr, state->af) &&
1893 PF_MATCHA(psk->psk_dst.neg,
1894 &psk->psk_dst.addr.v.a.addr,
1895 &psk->psk_dst.addr.v.a.mask,
1896 &state->ext.addr, state->af) &&
1897 (psk->psk_src.port_op == 0 ||
1898 pf_match_port(psk->psk_src.port_op,
1899 psk->psk_src.port[0], psk->psk_src.port[1],
1900 state->lan.port)) &&
1901 (psk->psk_dst.port_op == 0 ||
1902 pf_match_port(psk->psk_dst.port_op,
1903 psk->psk_dst.port[0], psk->psk_dst.port[1],
1904 state->ext.port)) &&
1905 (!psk->psk_ifname[0] || !strcmp(psk->psk_ifname,
1906 state->u.s.kif->pfik_name))) {
1907 state->timeout = PFTM_PURGE;
1908 killed++;
1909 }
1910 }
1911 pf_purge_expired_states();
1912 psk->psk_af = killed;
1913 break;
1914 }
1915
1916 case DIOCADDSTATE: {
1917 struct pfioc_state *ps = (struct pfioc_state *)addr;
1918 struct pf_state *state;
1919 struct pfi_kif *kif;
1920
1921 if (ps->state.timeout >= PFTM_MAX &&
1922 ps->state.timeout != PFTM_UNTIL_PACKET) {
1923 error = EINVAL;
1924 break;
1925 }
1926 state = pool_get(&pf_state_pl, PR_NOWAIT);
1927 if (state == NULL) {
1928 error = ENOMEM;
1929 break;
1930 }
1931 kif = pfi_lookup_create(ps->state.u.ifname);
1932 if (kif == NULL) {
1933 pool_put(&pf_state_pl, state);
1934 error = ENOENT;
1935 break;
1936 }
1937 bcopy(&ps->state, state, sizeof(struct pf_state));
1938 bzero(&state->u, sizeof(state->u));
1939 state->rule.ptr = &pf_default_rule;
1940 state->nat_rule.ptr = NULL;
1941 state->anchor.ptr = NULL;
1942 state->rt_kif = NULL;
1943 state->creation = time_second;
1944 state->pfsync_time = 0;
1945 state->packets[0] = state->packets[1] = 0;
1946 state->bytes[0] = state->bytes[1] = 0;
1947
1948 if (pf_insert_state(kif, state)) {
1949 pfi_maybe_destroy(kif);
1950 pool_put(&pf_state_pl, state);
1951 error = ENOMEM;
1952 }
1953 break;
1954 }
1955
1956 case DIOCGETSTATE: {
1957 struct pfioc_state *ps = (struct pfioc_state *)addr;
1958 struct pf_state *state;
1959 u_int32_t nr;
1960
1961 nr = 0;
1962 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
1963 if (nr >= ps->nr)
1964 break;
1965 nr++;
1966 }
1967 if (state == NULL) {
1968 error = EBUSY;
1969 break;
1970 }
1971 bcopy(state, &ps->state, sizeof(struct pf_state));
1972 ps->state.rule.nr = state->rule.ptr->nr;
1973 ps->state.nat_rule.nr = (state->nat_rule.ptr == NULL) ?
1974 -1 : state->nat_rule.ptr->nr;
1975 ps->state.anchor.nr = (state->anchor.ptr == NULL) ?
1976 -1 : state->anchor.ptr->nr;
1977 ps->state.expire = pf_state_expires(state);
1978 if (ps->state.expire > time_second)
1979 ps->state.expire -= time_second;
1980 else
1981 ps->state.expire = 0;
1982 break;
1983 }
1984
1985 case DIOCGETSTATES: {
1986 struct pfioc_states *ps = (struct pfioc_states *)addr;
1987 struct pf_state *state;
1988 struct pf_state *p, pstore;
1989 struct pfi_kif *kif;
1990 u_int32_t nr = 0;
1991 int space = ps->ps_len;
1992
1993 if (space == 0) {
1994 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
1995 nr += kif->pfik_states;
1996 ps->ps_len = sizeof(struct pf_state) * nr;
1997 break;
1998 }
1999
2000 p = ps->ps_states;
2001 TAILQ_FOREACH(kif, &pfi_statehead, pfik_w_states)
2002 RB_FOREACH(state, pf_state_tree_ext_gwy,
2003 &kif->pfik_ext_gwy) {
2004 int secs = time_second;
2005
2006 if ((nr+1) * sizeof(*p) > (unsigned)ps->ps_len)
2007 break;
2008
2009 bcopy(state, &pstore, sizeof(pstore));
2010 strlcpy(pstore.u.ifname, kif->pfik_name,
2011 sizeof(pstore.u.ifname));
2012 pstore.rule.nr = state->rule.ptr->nr;
2013 pstore.nat_rule.nr = (state->nat_rule.ptr ==
2014 NULL) ? -1 : state->nat_rule.ptr->nr;
2015 pstore.anchor.nr = (state->anchor.ptr ==
2016 NULL) ? -1 : state->anchor.ptr->nr;
2017 pstore.creation = secs - pstore.creation;
2018 pstore.expire = pf_state_expires(state);
2019 if (pstore.expire > secs)
2020 pstore.expire -= secs;
2021 else
2022 pstore.expire = 0;
2023#ifdef __FreeBSD__
2024 PF_COPYOUT(&pstore, p, sizeof(*p), error);
2025#else
2026 error = copyout(&pstore, p, sizeof(*p));
2027#endif
2028 if (error)
2029 goto fail;
2030 p++;
2031 nr++;
2032 }
2033 ps->ps_len = sizeof(struct pf_state) * nr;
2034 break;
2035 }
2036
2037 case DIOCGETSTATUS: {
2038 struct pf_status *s = (struct pf_status *)addr;
2039 bcopy(&pf_status, s, sizeof(struct pf_status));
2040 pfi_fill_oldstatus(s);
2041 break;
2042 }
2043
2044 case DIOCSETSTATUSIF: {
2045 struct pfioc_if *pi = (struct pfioc_if *)addr;
2046
2047 if (pi->ifname[0] == 0) {
2048 bzero(pf_status.ifname, IFNAMSIZ);
2049 break;
2050 }
2051 if (ifunit(pi->ifname) == NULL) {
2052 error = EINVAL;
2053 break;
2054 }
2055 strlcpy(pf_status.ifname, pi->ifname, IFNAMSIZ);
2056 break;
2057 }
2058
2059 case DIOCCLRSTATUS: {
2060 bzero(pf_status.counters, sizeof(pf_status.counters));
2061 bzero(pf_status.fcounters, sizeof(pf_status.fcounters));
2062 bzero(pf_status.scounters, sizeof(pf_status.scounters));
2063 if (*pf_status.ifname)
2064 pfi_clr_istats(pf_status.ifname, NULL,
2065 PFI_FLAG_INSTANCE);
2066 break;
2067 }
2068
2069 case DIOCNATLOOK: {
2070 struct pfioc_natlook *pnl = (struct pfioc_natlook *)addr;
2071 struct pf_state *state;
2072 struct pf_state key;
2073 int m = 0, direction = pnl->direction;
2074
2075 key.af = pnl->af;
2076 key.proto = pnl->proto;
2077
2078 if (!pnl->proto ||
2079 PF_AZERO(&pnl->saddr, pnl->af) ||
2080 PF_AZERO(&pnl->daddr, pnl->af) ||
2081 !pnl->dport || !pnl->sport)
2082 error = EINVAL;
2083 else {
2084 /*
2085 * userland gives us source and dest of connection,
2086 * reverse the lookup so we ask for what happens with
2087 * the return traffic, enabling us to find it in the
2088 * state tree.
2089 */
2090 if (direction == PF_IN) {
2091 PF_ACPY(&key.ext.addr, &pnl->daddr, pnl->af);
2092 key.ext.port = pnl->dport;
2093 PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af);
2094 key.gwy.port = pnl->sport;
2095 state = pf_find_state_all(&key, PF_EXT_GWY, &m);
2096 } else {
2097 PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af);
2098 key.lan.port = pnl->dport;
2099 PF_ACPY(&key.ext.addr, &pnl->saddr, pnl->af);
2100 key.ext.port = pnl->sport;
2101 state = pf_find_state_all(&key, PF_LAN_EXT, &m);
2102 }
2103 if (m > 1)
2104 error = E2BIG; /* more than one state */
2105 else if (state != NULL) {
2106 if (direction == PF_IN) {
2107 PF_ACPY(&pnl->rsaddr, &state->lan.addr,
2108 state->af);
2109 pnl->rsport = state->lan.port;
2110 PF_ACPY(&pnl->rdaddr, &pnl->daddr,
2111 pnl->af);
2112 pnl->rdport = pnl->dport;
2113 } else {
2114 PF_ACPY(&pnl->rdaddr, &state->gwy.addr,
2115 state->af);
2116 pnl->rdport = state->gwy.port;
2117 PF_ACPY(&pnl->rsaddr, &pnl->saddr,
2118 pnl->af);
2119 pnl->rsport = pnl->sport;
2120 }
2121 } else
2122 error = ENOENT;
2123 }
2124 break;
2125 }
2126
2127 case DIOCSETTIMEOUT: {
2128 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2129 int old;
2130
2131 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX ||
2132 pt->seconds < 0) {
2133 error = EINVAL;
2134 goto fail;
2135 }
2136 old = pf_default_rule.timeout[pt->timeout];
2137 pf_default_rule.timeout[pt->timeout] = pt->seconds;
2138 pt->seconds = old;
2139 break;
2140 }
2141
2142 case DIOCGETTIMEOUT: {
2143 struct pfioc_tm *pt = (struct pfioc_tm *)addr;
2144
2145 if (pt->timeout < 0 || pt->timeout >= PFTM_MAX) {
2146 error = EINVAL;
2147 goto fail;
2148 }
2149 pt->seconds = pf_default_rule.timeout[pt->timeout];
2150 break;
2151 }
2152
2153 case DIOCGETLIMIT: {
2154 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2155
2156 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) {
2157 error = EINVAL;
2158 goto fail;
2159 }
2160 pl->limit = pf_pool_limits[pl->index].limit;
2161 break;
2162 }
2163
2164 case DIOCSETLIMIT: {
2165 struct pfioc_limit *pl = (struct pfioc_limit *)addr;
2166 int old_limit;
2167
2168 if (pl->index < 0 || pl->index >= PF_LIMIT_MAX ||
2169 pf_pool_limits[pl->index].pp == NULL) {
2170 error = EINVAL;
2171 goto fail;
2172 }
2173#ifdef __FreeBSD__
2174 uma_zone_set_max(pf_pool_limits[pl->index].pp, pl->limit);
2175#else
2176 if (pool_sethardlimit(pf_pool_limits[pl->index].pp,
2177 pl->limit, NULL, 0) != 0) {
2178 error = EBUSY;
2179 goto fail;
2180 }
2181#endif
2182 old_limit = pf_pool_limits[pl->index].limit;
2183 pf_pool_limits[pl->index].limit = pl->limit;
2184 pl->limit = old_limit;
2185 break;
2186 }
2187
2188 case DIOCSETDEBUG: {
2189 u_int32_t *level = (u_int32_t *)addr;
2190
2191 pf_status.debug = *level;
2192 break;
2193 }
2194
2195 case DIOCCLRRULECTRS: {
2196 struct pf_ruleset *ruleset = &pf_main_ruleset;
2197 struct pf_rule *rule;
2198
2199 TAILQ_FOREACH(rule,
2200 ruleset->rules[PF_RULESET_FILTER].active.ptr, entries)
2201 rule->evaluations = rule->packets =
2202 rule->bytes = 0;
2203 break;
2204 }
2205
2206#ifdef __FreeBSD__
2207 case DIOCGIFSPEED: {
2208 struct pf_ifspeed *psp = (struct pf_ifspeed *)addr;
2209 struct pf_ifspeed ps;
2210 struct ifnet *ifp;
2211
2212 if (psp->ifname[0] != 0) {
2213 /* Can we completely trust user-land? */
2214 strlcpy(ps.ifname, psp->ifname, IFNAMSIZ);
2215 ifp = ifunit(ps.ifname);
2216 if (ifp != NULL)
2217 psp->baudrate = ifp->if_baudrate;
2218 else
2219 error = EINVAL;
2220 } else
2221 error = EINVAL;
2222 break;
2223 }
2224#endif /* __FreeBSD__ */
2225
2226#ifdef ALTQ
2227 case DIOCSTARTALTQ: {
2228 struct pf_altq *altq;
2229
2230 /* enable all altq interfaces on active list */
2231 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2232 if (altq->qname[0] == 0) {
2233 error = pf_enable_altq(altq);
2234 if (error != 0)
2235 break;
2236 }
2237 }
2238 if (error == 0)
2239 pf_altq_running = 1;
2240 DPFPRINTF(PF_DEBUG_MISC, ("altq: started\n"));
2241 break;
2242 }
2243
2244 case DIOCSTOPALTQ: {
2245 struct pf_altq *altq;
2246
2247 /* disable all altq interfaces on active list */
2248 TAILQ_FOREACH(altq, pf_altqs_active, entries) {
2249 if (altq->qname[0] == 0) {
2250 error = pf_disable_altq(altq);
2251 if (error != 0)
2252 break;
2253 }
2254 }
2255 if (error == 0)
2256 pf_altq_running = 0;
2257 DPFPRINTF(PF_DEBUG_MISC, ("altq: stopped\n"));
2258 break;
2259 }
2260
2261 case DIOCADDALTQ: {
2262 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2263 struct pf_altq *altq, *a;
2264
2265 if (pa->ticket != ticket_altqs_inactive) {
2266 error = EBUSY;
2267 break;
2268 }
2269 altq = pool_get(&pf_altq_pl, PR_NOWAIT);
2270 if (altq == NULL) {
2271 error = ENOMEM;
2272 break;
2273 }
2274 bcopy(&pa->altq, altq, sizeof(struct pf_altq));
2275
2276 /*
2277 * if this is for a queue, find the discipline and
2278 * copy the necessary fields
2279 */
2280 if (altq->qname[0] != 0) {
2281 if ((altq->qid = pf_qname2qid(altq->qname)) == 0) {
2282 error = EBUSY;
2283 pool_put(&pf_altq_pl, altq);
2284 break;
2285 }
2286 TAILQ_FOREACH(a, pf_altqs_inactive, entries) {
2287 if (strncmp(a->ifname, altq->ifname,
2288 IFNAMSIZ) == 0 && a->qname[0] == 0) {
2289 altq->altq_disc = a->altq_disc;
2290 break;
2291 }
2292 }
2293 }
2294
2295#ifdef __FreeBSD__
2296 PF_UNLOCK();
2297#endif
2298 error = altq_add(altq);
2299#ifdef __FreeBSD__
2300 PF_LOCK();
2301#endif
2302 if (error) {
2303 pool_put(&pf_altq_pl, altq);
2304 break;
2305 }
2306
2307 TAILQ_INSERT_TAIL(pf_altqs_inactive, altq, entries);
2308 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2309 break;
2310 }
2311
2312 case DIOCGETALTQS: {
2313 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2314 struct pf_altq *altq;
2315
2316 pa->nr = 0;
2317 TAILQ_FOREACH(altq, pf_altqs_active, entries)
2318 pa->nr++;
2319 pa->ticket = ticket_altqs_active;
2320 break;
2321 }
2322
2323 case DIOCGETALTQ: {
2324 struct pfioc_altq *pa = (struct pfioc_altq *)addr;
2325 struct pf_altq *altq;
2326 u_int32_t nr;
2327
2328 if (pa->ticket != ticket_altqs_active) {
2329 error = EBUSY;
2330 break;
2331 }
2332 nr = 0;
2333 altq = TAILQ_FIRST(pf_altqs_active);
2334 while ((altq != NULL) && (nr < pa->nr)) {
2335 altq = TAILQ_NEXT(altq, entries);
2336 nr++;
2337 }
2338 if (altq == NULL) {
2339 error = EBUSY;
2340 break;
2341 }
2342 bcopy(altq, &pa->altq, sizeof(struct pf_altq));
2343 break;
2344 }
2345
2346 case DIOCCHANGEALTQ:
2347 /* CHANGEALTQ not supported yet! */
2348 error = ENODEV;
2349 break;
2350
2351 case DIOCGETQSTATS: {
2352 struct pfioc_qstats *pq = (struct pfioc_qstats *)addr;
2353 struct pf_altq *altq;
2354 u_int32_t nr;
2355 int nbytes;
2356
2357 if (pq->ticket != ticket_altqs_active) {
2358 error = EBUSY;
2359 break;
2360 }
2361 nbytes = pq->nbytes;
2362 nr = 0;
2363 altq = TAILQ_FIRST(pf_altqs_active);
2364 while ((altq != NULL) && (nr < pq->nr)) {
2365 altq = TAILQ_NEXT(altq, entries);
2366 nr++;
2367 }
2368 if (altq == NULL) {
2369 error = EBUSY;
2370 break;
2371 }
2372#ifdef __FreeBSD__
2373 PF_UNLOCK();
2374#endif
2375 error = altq_getqstats(altq, pq->buf, &nbytes);
2376#ifdef __FreeBSD__
2377 PF_LOCK();
2378#endif
2379 if (error == 0) {
2380 pq->scheduler = altq->scheduler;
2381 pq->nbytes = nbytes;
2382 }
2383 break;
2384 }
2385#endif /* ALTQ */
2386
2387 case DIOCBEGINADDRS: {
2388 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2389
2390 pf_empty_pool(&pf_pabuf);
2391 pp->ticket = ++ticket_pabuf;
2392 break;
2393 }
2394
2395 case DIOCADDADDR: {
2396 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2397
2398#ifndef INET
2399 if (pp->af == AF_INET) {
2400 error = EAFNOSUPPORT;
2401 break;
2402 }
2403#endif /* INET */
2404#ifndef INET6
2405 if (pp->af == AF_INET6) {
2406 error = EAFNOSUPPORT;
2407 break;
2408 }
2409#endif /* INET6 */
2410 if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
2411 pp->addr.addr.type != PF_ADDR_DYNIFTL &&
2412 pp->addr.addr.type != PF_ADDR_TABLE) {
2413 error = EINVAL;
2414 break;
2415 }
2416 pa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2417 if (pa == NULL) {
2418 error = ENOMEM;
2419 break;
2420 }
2421 bcopy(&pp->addr, pa, sizeof(struct pf_pooladdr));
2422 if (pa->ifname[0]) {
2423 pa->kif = pfi_attach_rule(pa->ifname);
2424 if (pa->kif == NULL) {
2425 pool_put(&pf_pooladdr_pl, pa);
2426 error = EINVAL;
2427 break;
2428 }
2429 }
2430 if (pfi_dynaddr_setup(&pa->addr, pp->af)) {
2431 pfi_dynaddr_remove(&pa->addr);
2432 pfi_detach_rule(pa->kif);
2433 pool_put(&pf_pooladdr_pl, pa);
2434 error = EINVAL;
2435 break;
2436 }
2437 TAILQ_INSERT_TAIL(&pf_pabuf, pa, entries);
2438 break;
2439 }
2440
2441 case DIOCGETADDRS: {
2442 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2443
2444 pp->nr = 0;
2445 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2446 pp->r_num, 0, 1, 0);
2447 if (pool == NULL) {
2448 error = EBUSY;
2449 break;
2450 }
2451 TAILQ_FOREACH(pa, &pool->list, entries)
2452 pp->nr++;
2453 break;
2454 }
2455
2456 case DIOCGETADDR: {
2457 struct pfioc_pooladdr *pp = (struct pfioc_pooladdr *)addr;
2458 u_int32_t nr = 0;
2459
2460 pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action,
2461 pp->r_num, 0, 1, 1);
2462 if (pool == NULL) {
2463 error = EBUSY;
2464 break;
2465 }
2466 pa = TAILQ_FIRST(&pool->list);
2467 while ((pa != NULL) && (nr < pp->nr)) {
2468 pa = TAILQ_NEXT(pa, entries);
2469 nr++;
2470 }
2471 if (pa == NULL) {
2472 error = EBUSY;
2473 break;
2474 }
2475 bcopy(pa, &pp->addr, sizeof(struct pf_pooladdr));
2476 pfi_dynaddr_copyout(&pp->addr.addr);
2477 pf_tbladdr_copyout(&pp->addr.addr);
2478 pf_rtlabel_copyout(&pp->addr.addr);
2479 break;
2480 }
2481
2482 case DIOCCHANGEADDR: {
2483 struct pfioc_pooladdr *pca = (struct pfioc_pooladdr *)addr;
2484 struct pf_pooladdr *oldpa = NULL, *newpa = NULL;
2485 struct pf_ruleset *ruleset;
2486
2487 if (pca->action < PF_CHANGE_ADD_HEAD ||
2488 pca->action > PF_CHANGE_REMOVE) {
2489 error = EINVAL;
2490 break;
2491 }
2492 if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
2493 pca->addr.addr.type != PF_ADDR_DYNIFTL &&
2494 pca->addr.addr.type != PF_ADDR_TABLE) {
2495 error = EINVAL;
2496 break;
2497 }
2498
2499 ruleset = pf_find_ruleset(pca->anchor);
2500 if (ruleset == NULL) {
2501 error = EBUSY;
2502 break;
2503 }
2504 pool = pf_get_pool(pca->anchor, pca->ticket, pca->r_action,
2505 pca->r_num, pca->r_last, 1, 1);
2506 if (pool == NULL) {
2507 error = EBUSY;
2508 break;
2509 }
2510 if (pca->action != PF_CHANGE_REMOVE) {
2511 newpa = pool_get(&pf_pooladdr_pl, PR_NOWAIT);
2512 if (newpa == NULL) {
2513 error = ENOMEM;
2514 break;
2515 }
2516 bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
2517#ifndef INET
2518 if (pca->af == AF_INET) {
2519 pool_put(&pf_pooladdr_pl, newpa);
2520 error = EAFNOSUPPORT;
2521 break;
2522 }
2523#endif /* INET */
2524#ifndef INET6
2525 if (pca->af == AF_INET6) {
2526 pool_put(&pf_pooladdr_pl, newpa);
2527 error = EAFNOSUPPORT;
2528 break;
2529 }
2530#endif /* INET6 */
2531 if (newpa->ifname[0]) {
2532 newpa->kif = pfi_attach_rule(newpa->ifname);
2533 if (newpa->kif == NULL) {
2534 pool_put(&pf_pooladdr_pl, newpa);
2535 error = EINVAL;
2536 break;
2537 }
2538 } else
2539 newpa->kif = NULL;
2540 if (pfi_dynaddr_setup(&newpa->addr, pca->af) ||
2541 pf_tbladdr_setup(ruleset, &newpa->addr)) {
2542 pfi_dynaddr_remove(&newpa->addr);
2543 pfi_detach_rule(newpa->kif);
2544 pool_put(&pf_pooladdr_pl, newpa);
2545 error = EINVAL;
2546 break;
2547 }
2548 }
2549
2550 if (pca->action == PF_CHANGE_ADD_HEAD)
2551 oldpa = TAILQ_FIRST(&pool->list);
2552 else if (pca->action == PF_CHANGE_ADD_TAIL)
2553 oldpa = TAILQ_LAST(&pool->list, pf_palist);
2554 else {
2555 int i = 0;
2556
2557 oldpa = TAILQ_FIRST(&pool->list);
2558 while ((oldpa != NULL) && (i < pca->nr)) {
2559 oldpa = TAILQ_NEXT(oldpa, entries);
2560 i++;
2561 }
2562 if (oldpa == NULL) {
2563 error = EINVAL;
2564 break;
2565 }
2566 }
2567
2568 if (pca->action == PF_CHANGE_REMOVE) {
2569 TAILQ_REMOVE(&pool->list, oldpa, entries);
2570 pfi_dynaddr_remove(&oldpa->addr);
2571 pf_tbladdr_remove(&oldpa->addr);
2572 pfi_detach_rule(oldpa->kif);
2573 pool_put(&pf_pooladdr_pl, oldpa);
2574 } else {
2575 if (oldpa == NULL)
2576 TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
2577 else if (pca->action == PF_CHANGE_ADD_HEAD ||
2578 pca->action == PF_CHANGE_ADD_BEFORE)
2579 TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
2580 else
2581 TAILQ_INSERT_AFTER(&pool->list, oldpa,
2582 newpa, entries);
2583 }
2584
2585 pool->cur = TAILQ_FIRST(&pool->list);
2586 PF_ACPY(&pool->counter, &pool->cur->addr.v.a.addr,
2587 pca->af);
2588 break;
2589 }
2590
2591 case DIOCGETRULESETS: {
2592 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2593 struct pf_ruleset *ruleset;
2594 struct pf_anchor *anchor;
2595
2596 pr->path[sizeof(pr->path) - 1] = 0;
2597 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2598 error = EINVAL;
2599 break;
2600 }
2601 pr->nr = 0;
2602 if (ruleset->anchor == NULL) {
2603 /* XXX kludge for pf_main_ruleset */
2604 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2605 if (anchor->parent == NULL)
2606 pr->nr++;
2607 } else {
2608 RB_FOREACH(anchor, pf_anchor_node,
2609 &ruleset->anchor->children)
2610 pr->nr++;
2611 }
2612 break;
2613 }
2614
2615 case DIOCGETRULESET: {
2616 struct pfioc_ruleset *pr = (struct pfioc_ruleset *)addr;
2617 struct pf_ruleset *ruleset;
2618 struct pf_anchor *anchor;
2619 u_int32_t nr = 0;
2620
2621 pr->path[sizeof(pr->path) - 1] = 0;
2622 if ((ruleset = pf_find_ruleset(pr->path)) == NULL) {
2623 error = EINVAL;
2624 break;
2625 }
2626 pr->name[0] = 0;
2627 if (ruleset->anchor == NULL) {
2628 /* XXX kludge for pf_main_ruleset */
2629 RB_FOREACH(anchor, pf_anchor_global, &pf_anchors)
2630 if (anchor->parent == NULL && nr++ == pr->nr) {
2631 strlcpy(pr->name, anchor->name,
2632 sizeof(pr->name));
2633 break;
2634 }
2635 } else {
2636 RB_FOREACH(anchor, pf_anchor_node,
2637 &ruleset->anchor->children)
2638 if (nr++ == pr->nr) {
2639 strlcpy(pr->name, anchor->name,
2640 sizeof(pr->name));
2641 break;
2642 }
2643 }
2644 if (!pr->name[0])
2645 error = EBUSY;
2646 break;
2647 }
2648
2649 case DIOCRCLRTABLES: {
2650 struct pfioc_table *io = (struct pfioc_table *)addr;
2651
2652 if (io->pfrio_esize != 0) {
2653 error = ENODEV;
2654 break;
2655 }
2656 error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
2657 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2658 break;
2659 }
2660
2661 case DIOCRADDTABLES: {
2662 struct pfioc_table *io = (struct pfioc_table *)addr;
2663
2664 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2665 error = ENODEV;
2666 break;
2667 }
2668 error = pfr_add_tables(io->pfrio_buffer, io->pfrio_size,
2669 &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2670 break;
2671 }
2672
2673 case DIOCRDELTABLES: {
2674 struct pfioc_table *io = (struct pfioc_table *)addr;
2675
2676 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2677 error = ENODEV;
2678 break;
2679 }
2680 error = pfr_del_tables(io->pfrio_buffer, io->pfrio_size,
2681 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2682 break;
2683 }
2684
2685 case DIOCRGETTABLES: {
2686 struct pfioc_table *io = (struct pfioc_table *)addr;
2687
2688 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2689 error = ENODEV;
2690 break;
2691 }
2692 error = pfr_get_tables(&io->pfrio_table, io->pfrio_buffer,
2693 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2694 break;
2695 }
2696
2697 case DIOCRGETTSTATS: {
2698 struct pfioc_table *io = (struct pfioc_table *)addr;
2699
2700 if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
2701 error = ENODEV;
2702 break;
2703 }
2704 error = pfr_get_tstats(&io->pfrio_table, io->pfrio_buffer,
2705 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2706 break;
2707 }
2708
2709 case DIOCRCLRTSTATS: {
2710 struct pfioc_table *io = (struct pfioc_table *)addr;
2711
2712 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2713 error = ENODEV;
2714 break;
2715 }
2716 error = pfr_clr_tstats(io->pfrio_buffer, io->pfrio_size,
2717 &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2718 break;
2719 }
2720
2721 case DIOCRSETTFLAGS: {
2722 struct pfioc_table *io = (struct pfioc_table *)addr;
2723
2724 if (io->pfrio_esize != sizeof(struct pfr_table)) {
2725 error = ENODEV;
2726 break;
2727 }
2728 error = pfr_set_tflags(io->pfrio_buffer, io->pfrio_size,
2729 io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
2730 &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2731 break;
2732 }
2733
2734 case DIOCRCLRADDRS: {
2735 struct pfioc_table *io = (struct pfioc_table *)addr;
2736
2737 if (io->pfrio_esize != 0) {
2738 error = ENODEV;
2739 break;
2740 }
2741 error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
2742 io->pfrio_flags | PFR_FLAG_USERIOCTL);
2743 break;
2744 }
2745
2746 case DIOCRADDADDRS: {
2747 struct pfioc_table *io = (struct pfioc_table *)addr;
2748
2749 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2750 error = ENODEV;
2751 break;
2752 }
2753 error = pfr_add_addrs(&io->pfrio_table, io->pfrio_buffer,
2754 io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
2755 PFR_FLAG_USERIOCTL);
2756 break;
2757 }
2758
2759 case DIOCRDELADDRS: {
2760 struct pfioc_table *io = (struct pfioc_table *)addr;
2761
2762 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2763 error = ENODEV;
2764 break;
2765 }
2766 error = pfr_del_addrs(&io->pfrio_table, io->pfrio_buffer,
2767 io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
2768 PFR_FLAG_USERIOCTL);
2769 break;
2770 }
2771
2772 case DIOCRSETADDRS: {
2773 struct pfioc_table *io = (struct pfioc_table *)addr;
2774
2775 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2776 error = ENODEV;
2777 break;
2778 }
2779 error = pfr_set_addrs(&io->pfrio_table, io->pfrio_buffer,
2780 io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
2781 &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
2782 PFR_FLAG_USERIOCTL);
2783 break;
2784 }
2785
2786 case DIOCRGETADDRS: {
2787 struct pfioc_table *io = (struct pfioc_table *)addr;
2788
2789 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2790 error = ENODEV;
2791 break;
2792 }
2793 error = pfr_get_addrs(&io->pfrio_table, io->pfrio_buffer,
2794 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2795 break;
2796 }
2797
2798 case DIOCRGETASTATS: {
2799 struct pfioc_table *io = (struct pfioc_table *)addr;
2800
2801 if (io->pfrio_esize != sizeof(struct pfr_astats)) {
2802 error = ENODEV;
2803 break;
2804 }
2805 error = pfr_get_astats(&io->pfrio_table, io->pfrio_buffer,
2806 &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2807 break;
2808 }
2809
2810 case DIOCRCLRASTATS: {
2811 struct pfioc_table *io = (struct pfioc_table *)addr;
2812
2813 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2814 error = ENODEV;
2815 break;
2816 }
2817 error = pfr_clr_astats(&io->pfrio_table, io->pfrio_buffer,
2818 io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
2819 PFR_FLAG_USERIOCTL);
2820 break;
2821 }
2822
2823 case DIOCRTSTADDRS: {
2824 struct pfioc_table *io = (struct pfioc_table *)addr;
2825
2826 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2827 error = ENODEV;
2828 break;
2829 }
2830 error = pfr_tst_addrs(&io->pfrio_table, io->pfrio_buffer,
2831 io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
2832 PFR_FLAG_USERIOCTL);
2833 break;
2834 }
2835
2836 case DIOCRINADEFINE: {
2837 struct pfioc_table *io = (struct pfioc_table *)addr;
2838
2839 if (io->pfrio_esize != sizeof(struct pfr_addr)) {
2840 error = ENODEV;
2841 break;
2842 }
2843 error = pfr_ina_define(&io->pfrio_table, io->pfrio_buffer,
2844 io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
2845 io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
2846 break;
2847 }
2848
2849 case DIOCOSFPADD: {
2850 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2851 error = pf_osfp_add(io);
2852 break;
2853 }
2854
2855 case DIOCOSFPGET: {
2856 struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
2857 error = pf_osfp_get(io);
2858 break;
2859 }
2860
2861 case DIOCXBEGIN: {
2862 struct pfioc_trans *io = (struct pfioc_trans *)
2863 addr;
2864 static struct pfioc_trans_e ioe;
2865 static struct pfr_table table;
2866 int i;
2867
2868 if (io->esize != sizeof(ioe)) {
2869 error = ENODEV;
2870 goto fail;
2871 }
2872 for (i = 0; i < io->size; i++) {
2873#ifdef __FreeBSD__
2874 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error);
2875 if (error) {
2876#else
2877 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2878#endif
2879 error = EFAULT;
2880 goto fail;
2881 }
2882 switch (ioe.rs_num) {
2883#ifdef ALTQ
2884 case PF_RULESET_ALTQ:
2885 if (ioe.anchor[0]) {
2886 error = EINVAL;
2887 goto fail;
2888 }
2889 if ((error = pf_begin_altq(&ioe.ticket)))
2890 goto fail;
2891 break;
2892#endif /* ALTQ */
2893 case PF_RULESET_TABLE:
2894 bzero(&table, sizeof(table));
2895 strlcpy(table.pfrt_anchor, ioe.anchor,
2896 sizeof(table.pfrt_anchor));
2897 if ((error = pfr_ina_begin(&table,
2898 &ioe.ticket, NULL, 0)))
2899 goto fail;
2900 break;
2901 default:
2902 if ((error = pf_begin_rules(&ioe.ticket,
2903 ioe.rs_num, ioe.anchor)))
2904 goto fail;
2905 break;
2906 }
2907#ifdef __FreeBSD__
2908 PF_COPYOUT(&ioe, io->array+i, sizeof(io->array[i]),
2909 error);
2910 if (error) {
2911#else
2912 if (copyout(&ioe, io->array+i, sizeof(io->array[i]))) {
2913#endif
2914 error = EFAULT;
2915 goto fail;
2916 }
2917 }
2918 break;
2919 }
2920
2921 case DIOCXROLLBACK: {
2922 struct pfioc_trans *io = (struct pfioc_trans *)
2923 addr;
2924 static struct pfioc_trans_e ioe;
2925 static struct pfr_table table;
2926 int i;
2927
2928 if (io->esize != sizeof(ioe)) {
2929 error = ENODEV;
2930 goto fail;
2931 }
2932 for (i = 0; i < io->size; i++) {
2933#ifdef __FreeBSD__
2934 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error);
2935 if (error) {
2936#else
2937 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2938#endif
2939 error = EFAULT;
2940 goto fail;
2941 }
2942 switch (ioe.rs_num) {
2943#ifdef ALTQ
2944 case PF_RULESET_ALTQ:
2945 if (ioe.anchor[0]) {
2946 error = EINVAL;
2947 goto fail;
2948 }
2949 if ((error = pf_rollback_altq(ioe.ticket)))
2950 goto fail; /* really bad */
2951 break;
2952#endif /* ALTQ */
2953 case PF_RULESET_TABLE:
2954 bzero(&table, sizeof(table));
2955 strlcpy(table.pfrt_anchor, ioe.anchor,
2956 sizeof(table.pfrt_anchor));
2957 if ((error = pfr_ina_rollback(&table,
2958 ioe.ticket, NULL, 0)))
2959 goto fail; /* really bad */
2960 break;
2961 default:
2962 if ((error = pf_rollback_rules(ioe.ticket,
2963 ioe.rs_num, ioe.anchor)))
2964 goto fail; /* really bad */
2965 break;
2966 }
2967 }
2968 break;
2969 }
2970
2971 case DIOCXCOMMIT: {
2972 struct pfioc_trans *io = (struct pfioc_trans *)
2973 addr;
2974 static struct pfioc_trans_e ioe;
2975 static struct pfr_table table;
2976 struct pf_ruleset *rs;
2977 int i;
2978
2979 if (io->esize != sizeof(ioe)) {
2980 error = ENODEV;
2981 goto fail;
2982 }
2983 /* first makes sure everything will succeed */
2984 for (i = 0; i < io->size; i++) {
2985#ifdef __FreeBSD__
2986 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error);
2987 if (error) {
2988#else
2989 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
2990#endif
2991 error = EFAULT;
2992 goto fail;
2993 }
2994 switch (ioe.rs_num) {
2995#ifdef ALTQ
2996 case PF_RULESET_ALTQ:
2997 if (ioe.anchor[0]) {
2998 error = EINVAL;
2999 goto fail;
3000 }
3001 if (!altqs_inactive_open || ioe.ticket !=
3002 ticket_altqs_inactive) {
3003 error = EBUSY;
3004 goto fail;
3005 }
3006 break;
3007#endif /* ALTQ */
3008 case PF_RULESET_TABLE:
3009 rs = pf_find_ruleset(ioe.anchor);
3010 if (rs == NULL || !rs->topen || ioe.ticket !=
3011 rs->tticket) {
3012 error = EBUSY;
3013 goto fail;
3014 }
3015 break;
3016 default:
3017 if (ioe.rs_num < 0 || ioe.rs_num >=
3018 PF_RULESET_MAX) {
3019 error = EINVAL;
3020 goto fail;
3021 }
3022 rs = pf_find_ruleset(ioe.anchor);
3023 if (rs == NULL ||
3024 !rs->rules[ioe.rs_num].inactive.open ||
3025 rs->rules[ioe.rs_num].inactive.ticket !=
3026 ioe.ticket) {
3027 error = EBUSY;
3028 goto fail;
3029 }
3030 break;
3031 }
3032 }
3033 /* now do the commit - no errors should happen here */
3034 for (i = 0; i < io->size; i++) {
3035#ifdef __FreeBSD__
3036 PF_COPYIN(io->array+i, &ioe, sizeof(ioe), error);
3037 if (error) {
3038#else
3039 if (copyin(io->array+i, &ioe, sizeof(ioe))) {
3040#endif
3041 error = EFAULT;
3042 goto fail;
3043 }
3044 switch (ioe.rs_num) {
3045#ifdef ALTQ
3046 case PF_RULESET_ALTQ:
3047 if ((error = pf_commit_altq(ioe.ticket)))
3048 goto fail; /* really bad */
3049 break;
3050#endif /* ALTQ */
3051 case PF_RULESET_TABLE:
3052 bzero(&table, sizeof(table));
3053 strlcpy(table.pfrt_anchor, ioe.anchor,
3054 sizeof(table.pfrt_anchor));
3055 if ((error = pfr_ina_commit(&table, ioe.ticket,
3056 NULL, NULL, 0)))
3057 goto fail; /* really bad */
3058 break;
3059 default:
3060 if ((error = pf_commit_rules(ioe.ticket,
3061 ioe.rs_num, ioe.anchor)))
3062 goto fail; /* really bad */
3063 break;
3064 }
3065 }
3066 break;
3067 }
3068
3069 case DIOCGETSRCNODES: {
3070 struct pfioc_src_nodes *psn = (struct pfioc_src_nodes *)addr;
3071 struct pf_src_node *n;
3072 struct pf_src_node *p, pstore;
3073 u_int32_t nr = 0;
3074 int space = psn->psn_len;
3075
3076 if (space == 0) {
3077 RB_FOREACH(n, pf_src_tree, &tree_src_tracking)
3078 nr++;
3079 psn->psn_len = sizeof(struct pf_src_node) * nr;
3080 break;
3081 }
3082
3083 p = psn->psn_src_nodes;
3084 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3085 int secs = time_second, diff;
3086
3087 if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
3088 break;
3089
3090 bcopy(n, &pstore, sizeof(pstore));
3091 if (n->rule.ptr != NULL)
3092 pstore.rule.nr = n->rule.ptr->nr;
3093 pstore.creation = secs - pstore.creation;
3094 if (pstore.expire > secs)
3095 pstore.expire -= secs;
3096 else
3097 pstore.expire = 0;
3098
3099 /* adjust the connection rate estimate */
3100 diff = secs - n->conn_rate.last;
3101 if (diff >= n->conn_rate.seconds)
3102 pstore.conn_rate.count = 0;
3103 else
3104 pstore.conn_rate.count -=
3105 n->conn_rate.count * diff /
3106 n->conn_rate.seconds;
3107
3108#ifdef __FreeBSD__
3109 PF_COPYOUT(&pstore, p, sizeof(*p), error);
3110#else
3111 error = copyout(&pstore, p, sizeof(*p));
3112#endif
3113 if (error)
3114 goto fail;
3115 p++;
3116 nr++;
3117 }
3118 psn->psn_len = sizeof(struct pf_src_node) * nr;
3119 break;
3120 }
3121
3122 case DIOCCLRSRCNODES: {
3123 struct pf_src_node *n;
3124 struct pf_state *state;
3125
3126 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
3127 state->src_node = NULL;
3128 state->nat_src_node = NULL;
3129 }
3130 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3131 n->expire = 1;
3132 n->states = 0;
3133 }
3134 pf_purge_expired_src_nodes();
3135 pf_status.src_nodes = 0;
3136 break;
3137 }
3138
3139 case DIOCSETHOSTID: {
3140 u_int32_t *hostid = (u_int32_t *)addr;
3141
3142 if (*hostid == 0)
3143 pf_status.hostid = arc4random();
3144 else
3145 pf_status.hostid = *hostid;
3146 break;
3147 }
3148
3149 case DIOCOSFPFLUSH:
3150 pf_osfp_flush();
3151 break;
3152
3153 case DIOCIGETIFACES: {
3154 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3155
3156 if (io->pfiio_esize != sizeof(struct pfi_if)) {
3157 error = ENODEV;
3158 break;
3159 }
3160 error = pfi_get_ifaces(io->pfiio_name, io->pfiio_buffer,
3161 &io->pfiio_size, io->pfiio_flags);
3162 break;
3163 }
3164
3165 case DIOCICLRISTATS: {
3166 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3167
3168 error = pfi_clr_istats(io->pfiio_name, &io->pfiio_nzero,
3169 io->pfiio_flags);
3170 break;
3171 }
3172
3173 case DIOCSETIFFLAG: {
3174 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3175
3176 error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
3177 break;
3178 }
3179
3180 case DIOCCLRIFFLAG: {
3181 struct pfioc_iface *io = (struct pfioc_iface *)addr;
3182
3183 error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
3184 break;
3185 }
3186
3187 default:
3188 error = ENODEV;
3189 break;
3190 }
3191fail:
3192#ifdef __FreeBSD__
3193 PF_UNLOCK();
3194#else
3195 splx(s);
3196#endif
3197 return (error);
3198}
3199
3200#ifdef __FreeBSD__
3201/*
3202 * XXX - Check for version missmatch!!!
3203 */
3204static void
3205pf_clear_states(void)
3206{
3207 struct pf_state *state;
3208
3209 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
3210 state->timeout = PFTM_PURGE;
3211#if NPFSYNC
3212 /* don't send out individual delete messages */
3213 state->sync_flags = PFSTATE_NOSYNC;
3214#endif
3215 }
3216 pf_purge_expired_states();
3217 pf_status.states = 0;
3218#if 0 /* NPFSYNC */
3219/*
3220 * XXX This is called on module unload, we do not want to sync that over? */
3221 */
3222 pfsync_clear_states(pf_status.hostid, psk->psk_ifname);
3223#endif
3224}
3225
3226static int
3227pf_clear_tables(void)
3228{
3229 struct pfioc_table io;
3230 int error;
3231
3232 bzero(&io, sizeof(io));
3233
3234 error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
3235 io.pfrio_flags);
3236
3237 return (error);
3238}
3239
3240static void
3241pf_clear_srcnodes(void)
3242{
3243 struct pf_src_node *n;
3244 struct pf_state *state;
3245
3246 RB_FOREACH(state, pf_state_tree_id, &tree_id) {
3247 state->src_node = NULL;
3248 state->nat_src_node = NULL;
3249 }
3250 RB_FOREACH(n, pf_src_tree, &tree_src_tracking) {
3251 n->expire = 1;
3252 n->states = 0;
3253 }
3254 pf_purge_expired_src_nodes();
3255 pf_status.src_nodes = 0;
3256}
3257/*
3258 * XXX - Check for version missmatch!!!
3259 */
3260
3261/*
3262 * Duplicate pfctl -Fa operation to get rid of as much as we can.
3263 */
3264static int
3265shutdown_pf(void)
3266{
3267 int error = 0;
3268 u_int32_t t[5];
3269 char nn = '\0';
3270
3271 callout_stop(&pf_expire_to);
3272
3273 pf_status.running = 0;
3274 do {
3275 if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
3276 != 0) {
3277 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: SCRUB\n"));
3278 break;
3279 }
3280 if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
3281 != 0) {
3282 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: FILTER\n"));
3283 break; /* XXX: rollback? */
3284 }
3285 if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
3286 != 0) {
3287 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: NAT\n"));
3288 break; /* XXX: rollback? */
3289 }
3290 if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
3291 != 0) {
3292 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: BINAT\n"));
3293 break; /* XXX: rollback? */
3294 }
3295 if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
3296 != 0) {
3297 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: RDR\n"));
3298 break; /* XXX: rollback? */
3299 }
3300
3301 /* XXX: these should always succeed here */
3302 pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
3303 pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
3304 pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
3305 pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
3306 pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
3307
3308 if ((error = pf_clear_tables()) != 0)
3309 break;
3310
3311#ifdef ALTQ
3312 if ((error = pf_begin_altq(&t[0])) != 0) {
3313 DPFPRINTF(PF_DEBUG_MISC, ("shutdown_pf: ALTQ\n"));
3314 break;
3315 }
3316 pf_commit_altq(t[0]);
3317#endif
3318
3319 pf_clear_states();
3320
3321 pf_clear_srcnodes();
3322
3323 /* status does not use malloced mem so no need to cleanup */
3324 /* fingerprints and interfaces have thier own cleanup code */
3325 } while(0);
3326
3327 return (error);
3328}
3329
3330static int
3331pf_check_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3332 struct inpcb *inp)
3333{
3334 /*
3335 * XXX Wed Jul 9 22:03:16 2003 UTC
3336 * OpenBSD has changed its byte ordering convention on ip_len/ip_off
3337 * in network stack. OpenBSD's network stack have converted
3338 * ip_len/ip_off to host byte order frist as FreeBSD.
3339 * Now this is not true anymore , so we should convert back to network
3340 * byte order.
3341 */
3342 struct ip *h = NULL;
3343 int chk;
3344
3345 if ((*m)->m_pkthdr.len >= (int)sizeof(struct ip)) {
3346 /* if m_pkthdr.len is less than ip header, pf will handle. */
3347 h = mtod(*m, struct ip *);
3348 HTONS(h->ip_len);
3349 HTONS(h->ip_off);
3350 }
3351 chk = pf_test(PF_IN, ifp, m, NULL, inp);
3352 if (chk && *m) {
3353 m_freem(*m);
3354 *m = NULL;
3355 }
3356 if (*m != NULL) {
3357 /* pf_test can change ip header location */
3358 h = mtod(*m, struct ip *);
3359 NTOHS(h->ip_len);
3360 NTOHS(h->ip_off);
3361 }
3362 return chk;
3363}
3364
3365static int
3366pf_check_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3367 struct inpcb *inp)
3368{
3369 /*
3370 * XXX Wed Jul 9 22:03:16 2003 UTC
3371 * OpenBSD has changed its byte ordering convention on ip_len/ip_off
3372 * in network stack. OpenBSD's network stack have converted
3373 * ip_len/ip_off to host byte order frist as FreeBSD.
3374 * Now this is not true anymore , so we should convert back to network
3375 * byte order.
3376 */
3377 struct ip *h = NULL;
3378 int chk;
3379
3380 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
3381 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3382 in_delayed_cksum(*m);
3383 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
3384 }
3385 if ((*m)->m_pkthdr.len >= (int)sizeof(*h)) {
3386 /* if m_pkthdr.len is less than ip header, pf will handle. */
3387 h = mtod(*m, struct ip *);
3388 HTONS(h->ip_len);
3389 HTONS(h->ip_off);
3390 }
3391 chk = pf_test(PF_OUT, ifp, m, NULL, inp);
3392 if (chk && *m) {
3393 m_freem(*m);
3394 *m = NULL;
3395 }
3396 if (*m != NULL) {
3397 /* pf_test can change ip header location */
3398 h = mtod(*m, struct ip *);
3399 NTOHS(h->ip_len);
3400 NTOHS(h->ip_off);
3401 }
3402 return chk;
3403}
3404
3405#ifdef INET6
3406static int
3407pf_check6_in(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3408 struct inpcb *inp)
3409{
3410 /*
3411 * IPv6 does not affected ip_len/ip_off byte order changes.
3412 */
3413 int chk;
3414
3415 chk = pf_test6(PF_IN, ifp, m, NULL, inp);
3416 if (chk && *m) {
3417 m_freem(*m);
3418 *m = NULL;
3419 }
3420 return chk;
3421}
3422
3423static int
3424pf_check6_out(void *arg, struct mbuf **m, struct ifnet *ifp, int dir,
3425 struct inpcb *inp)
3426{
3427 /*
3428 * IPv6 does not affected ip_len/ip_off byte order changes.
3429 */
3430 int chk;
3431
3432 /* We need a proper CSUM befor we start (s. OpenBSD ip_output) */
3433 if ((*m)->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3434 in_delayed_cksum(*m);
3435 (*m)->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
3436 }
3437 chk = pf_test6(PF_OUT, ifp, m, NULL, inp);
3438 if (chk && *m) {
3439 m_freem(*m);
3440 *m = NULL;
3441 }
3442 return chk;
3443}
3444#endif /* INET6 */
3445
3446static int
3447hook_pf(void)
3448{
3449 struct pfil_head *pfh_inet;
3450#ifdef INET6
3451 struct pfil_head *pfh_inet6;
3452#endif
3453
3454 PF_ASSERT(MA_NOTOWNED);
3455
3456 if (pf_pfil_hooked)
3457 return (0);
3458
3459 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3460 if (pfh_inet == NULL)
3461 return (ESRCH); /* XXX */
3462 pfil_add_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet);
3463 pfil_add_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet);
3464#ifdef INET6
3465 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3466 if (pfh_inet6 == NULL) {
3467 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3468 pfh_inet);
3469 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3470 pfh_inet);
3471 return (ESRCH); /* XXX */
3472 }
3473 pfil_add_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK, pfh_inet6);
3474 pfil_add_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK, pfh_inet6);
3475#endif
3476
3477 pf_pfil_hooked = 1;
3478 return (0);
3479}
3480
3481static int
3482dehook_pf(void)
3483{
3484 struct pfil_head *pfh_inet;
3485#ifdef INET6
3486 struct pfil_head *pfh_inet6;
3487#endif
3488
3489 PF_ASSERT(MA_NOTOWNED);
3490
3491 if (pf_pfil_hooked == 0)
3492 return (0);
3493
3494 pfh_inet = pfil_head_get(PFIL_TYPE_AF, AF_INET);
3495 if (pfh_inet == NULL)
3496 return (ESRCH); /* XXX */
3497 pfil_remove_hook(pf_check_in, NULL, PFIL_IN | PFIL_WAITOK,
3498 pfh_inet);
3499 pfil_remove_hook(pf_check_out, NULL, PFIL_OUT | PFIL_WAITOK,
3500 pfh_inet);
3501#ifdef INET6
3502 pfh_inet6 = pfil_head_get(PFIL_TYPE_AF, AF_INET6);
3503 if (pfh_inet6 == NULL)
3504 return (ESRCH); /* XXX */
3505 pfil_remove_hook(pf_check6_in, NULL, PFIL_IN | PFIL_WAITOK,
3506 pfh_inet6);
3507 pfil_remove_hook(pf_check6_out, NULL, PFIL_OUT | PFIL_WAITOK,
3508 pfh_inet6);
3509#endif
3510
3511 pf_pfil_hooked = 0;
3512 return (0);
3513}
3514
3515static int
3516pf_load(void)
3517{
3518 init_zone_var();
3519 init_pf_mutex();
3520 pf_dev = make_dev(&pf_cdevsw, 0, 0, 0, 0600, PF_NAME);
3521 if (pfattach() < 0) {
3522 destroy_dev(pf_dev);
3523 destroy_pf_mutex();
3524 return (ENOMEM);
3525 }
3526 return (0);
3527}
3528
3529static int
3530pf_unload(void)
3531{
3532 int error = 0;
3533
3534 PF_LOCK();
3535 pf_status.running = 0;
3536 PF_UNLOCK();
3537 error = dehook_pf();
3538 if (error) {
3539 /*
3540 * Should not happen!
3541 * XXX Due to error code ESRCH, kldunload will show
3542 * a message like 'No such process'.
3543 */
3544 printf("%s : pfil unregisteration fail\n", __FUNCTION__);
3545 return error;
3546 }
3547 PF_LOCK();
3548 shutdown_pf();
3549 pfi_cleanup();
3550 pf_osfp_flush();
3551 pf_osfp_cleanup();
3552 cleanup_pf_zone();
3553 PF_UNLOCK();
3554 destroy_dev(pf_dev);
3555 destroy_pf_mutex();
3556 return error;
3557}
3558
3559static int
3560pf_modevent(module_t mod, int type, void *data)
3561{
3562 int error = 0;
3563
3564 switch(type) {
3565 case MOD_LOAD:
3566 error = pf_load();
3567 break;
3568
3569 case MOD_UNLOAD:
3570 error = pf_unload();
3571 break;
3572 default:
3573 error = EINVAL;
3574 break;
3575 }
3576 return error;
3577}
3578
3579static moduledata_t pf_mod = {
3580 "pf",
3581 pf_modevent,
3582 0
3583};
3584
3585DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_FIRST);
3586MODULE_VERSION(pf, PF_MODVER);
3587#endif /* __FreeBSD__ */