pf.c revision 240737
1/* $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $ */ 2 3/* 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002 - 2008 Henning Brauer 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 * Effort sponsored in part by the Defense Advanced Research Projects 33 * Agency (DARPA) and Air Force Research Laboratory, Air Force 34 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 35 * 36 */ 37 38#include <sys/cdefs.h> 39 40__FBSDID("$FreeBSD: head/sys/netpfil/pf/pf.c 240737 2012-09-20 07:04:08Z glebius $"); 41 42#include "opt_inet.h" 43#include "opt_inet6.h" 44#include "opt_bpf.h" 45#include "opt_pf.h" 46 47#include <sys/param.h> 48#include <sys/bus.h> 49#include <sys/endian.h> 50#include <sys/hash.h> 51#include <sys/interrupt.h> 52#include <sys/kernel.h> 53#include <sys/kthread.h> 54#include <sys/limits.h> 55#include <sys/mbuf.h> 56#include <sys/md5.h> 57#include <sys/random.h> 58#include <sys/refcount.h> 59#include <sys/socket.h> 60#include <sys/sysctl.h> 61#include <sys/taskqueue.h> 62#include <sys/ucred.h> 63 64#include <net/if.h> 65#include <net/if_types.h> 66#include <net/route.h> 67#include <net/radix_mpath.h> 68#include <net/vnet.h> 69 70#include <net/pfvar.h> 71#include <net/pf_mtag.h> 72#include <net/if_pflog.h> 73#include <net/if_pfsync.h> 74 75#include <netinet/in_pcb.h> 76#include <netinet/in_var.h> 77#include <netinet/ip.h> 78#include <netinet/ip_fw.h> 79#include <netinet/ip_icmp.h> 80#include <netinet/icmp_var.h> 81#include <netinet/ip_var.h> 82#include <netinet/tcp.h> 83#include <netinet/tcp_fsm.h> 84#include <netinet/tcp_seq.h> 85#include <netinet/tcp_timer.h> 86#include <netinet/tcp_var.h> 87#include <netinet/udp.h> 88#include <netinet/udp_var.h> 89 90#include <netpfil/ipfw/ip_fw_private.h> /* XXX: only for DIR_IN/DIR_OUT */ 91 92#ifdef INET6 93#include <netinet/ip6.h> 94#include <netinet/icmp6.h> 95#include <netinet6/nd6.h> 96#include <netinet6/ip6_var.h> 97#include <netinet6/in6_pcb.h> 98#endif /* INET6 */ 99 100#include <machine/in_cksum.h> 101#include <security/mac/mac_framework.h> 102 103#define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 104 105/* 106 * Global variables 107 */ 108 109/* state tables */ 110VNET_DEFINE(struct pf_altqqueue, pf_altqs[2]); 111VNET_DEFINE(struct pf_palist, pf_pabuf); 112VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active); 113VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive); 114VNET_DEFINE(struct pf_status, pf_status); 115 116VNET_DEFINE(u_int32_t, ticket_altqs_active); 117VNET_DEFINE(u_int32_t, ticket_altqs_inactive); 118VNET_DEFINE(int, altqs_inactive_open); 119VNET_DEFINE(u_int32_t, ticket_pabuf); 120 121VNET_DEFINE(MD5_CTX, pf_tcp_secret_ctx); 122#define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx) 123VNET_DEFINE(u_char, pf_tcp_secret[16]); 124#define V_pf_tcp_secret VNET(pf_tcp_secret) 125VNET_DEFINE(int, pf_tcp_secret_init); 126#define V_pf_tcp_secret_init VNET(pf_tcp_secret_init) 127VNET_DEFINE(int, pf_tcp_iss_off); 128#define V_pf_tcp_iss_off VNET(pf_tcp_iss_off) 129 130/* 131 * Queue for pf_intr() sends. 132 */ 133static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations"); 134struct pf_send_entry { 135 STAILQ_ENTRY(pf_send_entry) pfse_next; 136 struct mbuf *pfse_m; 137 enum { 138 PFSE_IP, 139 PFSE_IP6, 140 PFSE_ICMP, 141 PFSE_ICMP6, 142 } pfse_type; 143 union { 144 struct route ro; 145 struct { 146 int type; 147 int code; 148 int mtu; 149 } icmpopts; 150 } u; 151#define pfse_ro u.ro 152#define pfse_icmp_type u.icmpopts.type 153#define pfse_icmp_code u.icmpopts.code 154#define pfse_icmp_mtu u.icmpopts.mtu 155}; 156 157STAILQ_HEAD(pf_send_head, pf_send_entry); 158static VNET_DEFINE(struct pf_send_head, pf_sendqueue); 159#define V_pf_sendqueue VNET(pf_sendqueue) 160 161static struct mtx pf_sendqueue_mtx; 162#define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx) 163#define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx) 164 165/* 166 * Queue for pf_flush_task() tasks. 167 */ 168struct pf_flush_entry { 169 SLIST_ENTRY(pf_flush_entry) next; 170 struct pf_addr addr; 171 sa_family_t af; 172 uint8_t dir; 173 struct pf_rule *rule; /* never dereferenced */ 174}; 175 176SLIST_HEAD(pf_flush_head, pf_flush_entry); 177static VNET_DEFINE(struct pf_flush_head, pf_flushqueue); 178#define V_pf_flushqueue VNET(pf_flushqueue) 179static VNET_DEFINE(struct task, pf_flushtask); 180#define V_pf_flushtask VNET(pf_flushtask) 181 182static struct mtx pf_flushqueue_mtx; 183#define PF_FLUSHQ_LOCK() mtx_lock(&pf_flushqueue_mtx) 184#define PF_FLUSHQ_UNLOCK() mtx_unlock(&pf_flushqueue_mtx) 185 186VNET_DEFINE(struct pf_rulequeue, pf_unlinked_rules); 187struct mtx pf_unlnkdrules_mtx; 188 189static VNET_DEFINE(uma_zone_t, pf_sources_z); 190#define V_pf_sources_z VNET(pf_sources_z) 191static VNET_DEFINE(uma_zone_t, pf_mtag_z); 192#define V_pf_mtag_z VNET(pf_mtag_z) 193VNET_DEFINE(uma_zone_t, pf_state_z); 194VNET_DEFINE(uma_zone_t, pf_state_key_z); 195 196VNET_DEFINE(uint64_t, pf_stateid[MAXCPU]); 197#define PFID_CPUBITS 8 198#define PFID_CPUSHIFT (sizeof(uint64_t) * NBBY - PFID_CPUBITS) 199#define PFID_CPUMASK ((uint64_t)((1 << PFID_CPUBITS) - 1) << PFID_CPUSHIFT) 200#define PFID_MAXID (~PFID_CPUMASK) 201CTASSERT((1 << PFID_CPUBITS) > MAXCPU); 202 203static void pf_src_tree_remove_state(struct pf_state *); 204static void pf_init_threshold(struct pf_threshold *, u_int32_t, 205 u_int32_t); 206static void pf_add_threshold(struct pf_threshold *); 207static int pf_check_threshold(struct pf_threshold *); 208 209static void pf_change_ap(struct pf_addr *, u_int16_t *, 210 u_int16_t *, u_int16_t *, struct pf_addr *, 211 u_int16_t, u_int8_t, sa_family_t); 212static int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *, 213 struct tcphdr *, struct pf_state_peer *); 214static void pf_change_icmp(struct pf_addr *, u_int16_t *, 215 struct pf_addr *, struct pf_addr *, u_int16_t, 216 u_int16_t *, u_int16_t *, u_int16_t *, 217 u_int16_t *, u_int8_t, sa_family_t); 218static void pf_send_tcp(struct mbuf *, 219 const struct pf_rule *, sa_family_t, 220 const struct pf_addr *, const struct pf_addr *, 221 u_int16_t, u_int16_t, u_int32_t, u_int32_t, 222 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int, 223 u_int16_t, struct ifnet *); 224static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t, 225 sa_family_t, struct pf_rule *); 226static void pf_detach_state(struct pf_state *); 227static int pf_state_key_attach(struct pf_state_key *, 228 struct pf_state_key *, struct pf_state *); 229static void pf_state_key_detach(struct pf_state *, int); 230static int pf_state_key_ctor(void *, int, void *, int); 231static u_int32_t pf_tcp_iss(struct pf_pdesc *); 232static int pf_test_rule(struct pf_rule **, struct pf_state **, 233 int, struct pfi_kif *, struct mbuf *, int, 234 struct pf_pdesc *, struct pf_rule **, 235 struct pf_ruleset **, struct inpcb *); 236static int pf_create_state(struct pf_rule *, struct pf_rule *, 237 struct pf_rule *, struct pf_pdesc *, 238 struct pf_src_node *, struct pf_state_key *, 239 struct pf_state_key *, struct mbuf *, int, 240 u_int16_t, u_int16_t, int *, struct pfi_kif *, 241 struct pf_state **, int, u_int16_t, u_int16_t, 242 int); 243static int pf_test_fragment(struct pf_rule **, int, 244 struct pfi_kif *, struct mbuf *, void *, 245 struct pf_pdesc *, struct pf_rule **, 246 struct pf_ruleset **); 247static int pf_tcp_track_full(struct pf_state_peer *, 248 struct pf_state_peer *, struct pf_state **, 249 struct pfi_kif *, struct mbuf *, int, 250 struct pf_pdesc *, u_short *, int *); 251static int pf_tcp_track_sloppy(struct pf_state_peer *, 252 struct pf_state_peer *, struct pf_state **, 253 struct pf_pdesc *, u_short *); 254static int pf_test_state_tcp(struct pf_state **, int, 255 struct pfi_kif *, struct mbuf *, int, 256 void *, struct pf_pdesc *, u_short *); 257static int pf_test_state_udp(struct pf_state **, int, 258 struct pfi_kif *, struct mbuf *, int, 259 void *, struct pf_pdesc *); 260static int pf_test_state_icmp(struct pf_state **, int, 261 struct pfi_kif *, struct mbuf *, int, 262 void *, struct pf_pdesc *, u_short *); 263static int pf_test_state_other(struct pf_state **, int, 264 struct pfi_kif *, struct mbuf *, struct pf_pdesc *); 265static u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t, 266 sa_family_t); 267static u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t, 268 sa_family_t); 269static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t, 270 int, u_int16_t); 271static void pf_set_rt_ifp(struct pf_state *, 272 struct pf_addr *); 273static int pf_check_proto_cksum(struct mbuf *, int, int, 274 u_int8_t, sa_family_t); 275static void pf_print_state_parts(struct pf_state *, 276 struct pf_state_key *, struct pf_state_key *); 277static int pf_addr_wrap_neq(struct pf_addr_wrap *, 278 struct pf_addr_wrap *); 279static struct pf_state *pf_find_state(struct pfi_kif *, 280 struct pf_state_key_cmp *, u_int); 281static int pf_src_connlimit(struct pf_state **); 282static void pf_flush_task(void *c, int pending); 283static int pf_insert_src_node(struct pf_src_node **, 284 struct pf_rule *, struct pf_addr *, sa_family_t); 285static int pf_purge_expired_states(int); 286static void pf_purge_unlinked_rules(void); 287static int pf_mtag_init(void *, int, int); 288static void pf_mtag_free(struct m_tag *); 289#ifdef INET 290static void pf_route(struct mbuf **, struct pf_rule *, int, 291 struct ifnet *, struct pf_state *, 292 struct pf_pdesc *); 293#endif /* INET */ 294#ifdef INET6 295static void pf_change_a6(struct pf_addr *, u_int16_t *, 296 struct pf_addr *, u_int8_t); 297static void pf_route6(struct mbuf **, struct pf_rule *, int, 298 struct ifnet *, struct pf_state *, 299 struct pf_pdesc *); 300#endif /* INET6 */ 301 302int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len); 303 304VNET_DECLARE(int, pf_end_threads); 305 306VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]); 307 308#define PACKET_LOOPED(pd) ((pd)->pf_mtag && \ 309 (pd)->pf_mtag->flags & PF_PACKET_LOOPED) 310 311#define STATE_LOOKUP(i, k, d, s, pd) \ 312 do { \ 313 (s) = pf_find_state((i), (k), (d)); \ 314 if ((s) == NULL || (s)->timeout == PFTM_PURGE) \ 315 return (PF_DROP); \ 316 if (PACKET_LOOPED(pd)) \ 317 return (PF_PASS); \ 318 if ((d) == PF_OUT && \ 319 (((s)->rule.ptr->rt == PF_ROUTETO && \ 320 (s)->rule.ptr->direction == PF_OUT) || \ 321 ((s)->rule.ptr->rt == PF_REPLYTO && \ 322 (s)->rule.ptr->direction == PF_IN)) && \ 323 (s)->rt_kif != NULL && \ 324 (s)->rt_kif != (i)) \ 325 return (PF_PASS); \ 326 } while (0) 327 328#define BOUND_IFACE(r, k) \ 329 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all 330 331#define STATE_INC_COUNTERS(s) \ 332 do { \ 333 s->rule.ptr->states_cur++; \ 334 s->rule.ptr->states_tot++; \ 335 if (s->anchor.ptr != NULL) { \ 336 s->anchor.ptr->states_cur++; \ 337 s->anchor.ptr->states_tot++; \ 338 } \ 339 if (s->nat_rule.ptr != NULL) { \ 340 s->nat_rule.ptr->states_cur++; \ 341 s->nat_rule.ptr->states_tot++; \ 342 } \ 343 } while (0) 344 345#define STATE_DEC_COUNTERS(s) \ 346 do { \ 347 if (s->nat_rule.ptr != NULL) \ 348 s->nat_rule.ptr->states_cur--; \ 349 if (s->anchor.ptr != NULL) \ 350 s->anchor.ptr->states_cur--; \ 351 s->rule.ptr->states_cur--; \ 352 } while (0) 353 354static MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures"); 355VNET_DEFINE(struct pf_keyhash *, pf_keyhash); 356VNET_DEFINE(struct pf_idhash *, pf_idhash); 357VNET_DEFINE(u_long, pf_hashmask); 358VNET_DEFINE(struct pf_srchash *, pf_srchash); 359VNET_DEFINE(u_long, pf_srchashmask); 360 361SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW, 0, "pf(4)"); 362 363VNET_DEFINE(u_long, pf_hashsize); 364#define V_pf_hashsize VNET(pf_hashsize) 365SYSCTL_VNET_UINT(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN, 366 &VNET_NAME(pf_hashsize), 0, "Size of pf(4) states hashtable"); 367 368VNET_DEFINE(u_long, pf_srchashsize); 369#define V_pf_srchashsize VNET(pf_srchashsize) 370SYSCTL_VNET_UINT(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN, 371 &VNET_NAME(pf_srchashsize), 0, "Size of pf(4) source nodes hashtable"); 372 373VNET_DEFINE(void *, pf_swi_cookie); 374 375VNET_DEFINE(uint32_t, pf_hashseed); 376#define V_pf_hashseed VNET(pf_hashseed) 377 378static __inline uint32_t 379pf_hashkey(struct pf_state_key *sk) 380{ 381 uint32_t h; 382 383 h = jenkins_hash32((uint32_t *)sk, 384 sizeof(struct pf_state_key_cmp)/sizeof(uint32_t), 385 V_pf_hashseed); 386 387 return (h & V_pf_hashmask); 388} 389 390static __inline uint32_t 391pf_hashsrc(struct pf_addr *addr, sa_family_t af) 392{ 393 uint32_t h; 394 395 switch (af) { 396 case AF_INET: 397 h = jenkins_hash32((uint32_t *)&addr->v4, 398 sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed); 399 break; 400 case AF_INET6: 401 h = jenkins_hash32((uint32_t *)&addr->v6, 402 sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed); 403 break; 404 default: 405 panic("%s: unknown address family %u", __func__, af); 406 } 407 408 return (h & V_pf_srchashmask); 409} 410 411#ifdef INET6 412void 413pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af) 414{ 415 switch (af) { 416#ifdef INET 417 case AF_INET: 418 dst->addr32[0] = src->addr32[0]; 419 break; 420#endif /* INET */ 421 case AF_INET6: 422 dst->addr32[0] = src->addr32[0]; 423 dst->addr32[1] = src->addr32[1]; 424 dst->addr32[2] = src->addr32[2]; 425 dst->addr32[3] = src->addr32[3]; 426 break; 427 } 428} 429#endif /* INET6 */ 430 431static void 432pf_init_threshold(struct pf_threshold *threshold, 433 u_int32_t limit, u_int32_t seconds) 434{ 435 threshold->limit = limit * PF_THRESHOLD_MULT; 436 threshold->seconds = seconds; 437 threshold->count = 0; 438 threshold->last = time_uptime; 439} 440 441static void 442pf_add_threshold(struct pf_threshold *threshold) 443{ 444 u_int32_t t = time_uptime, diff = t - threshold->last; 445 446 if (diff >= threshold->seconds) 447 threshold->count = 0; 448 else 449 threshold->count -= threshold->count * diff / 450 threshold->seconds; 451 threshold->count += PF_THRESHOLD_MULT; 452 threshold->last = t; 453} 454 455static int 456pf_check_threshold(struct pf_threshold *threshold) 457{ 458 return (threshold->count > threshold->limit); 459} 460 461static int 462pf_src_connlimit(struct pf_state **state) 463{ 464 struct pfr_addr p; 465 struct pf_flush_entry *pffe; 466 int bad = 0; 467 468 PF_STATE_LOCK_ASSERT(*state); 469 470 (*state)->src_node->conn++; 471 (*state)->src.tcp_est = 1; 472 pf_add_threshold(&(*state)->src_node->conn_rate); 473 474 if ((*state)->rule.ptr->max_src_conn && 475 (*state)->rule.ptr->max_src_conn < 476 (*state)->src_node->conn) { 477 V_pf_status.lcounters[LCNT_SRCCONN]++; 478 bad++; 479 } 480 481 if ((*state)->rule.ptr->max_src_conn_rate.limit && 482 pf_check_threshold(&(*state)->src_node->conn_rate)) { 483 V_pf_status.lcounters[LCNT_SRCCONNRATE]++; 484 bad++; 485 } 486 487 if (!bad) 488 return (0); 489 490 /* Kill this state. */ 491 (*state)->timeout = PFTM_PURGE; 492 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; 493 494 if ((*state)->rule.ptr->overload_tbl == NULL) 495 return (1); 496 497 V_pf_status.lcounters[LCNT_OVERLOAD_TABLE]++; 498 if (V_pf_status.debug >= PF_DEBUG_MISC) { 499 printf("%s: blocking address ", __func__); 500 pf_print_host(&(*state)->src_node->addr, 0, 501 (*state)->key[PF_SK_WIRE]->af); 502 printf("\n"); 503 } 504 505 bzero(&p, sizeof(p)); 506 p.pfra_af = (*state)->key[PF_SK_WIRE]->af; 507 switch ((*state)->key[PF_SK_WIRE]->af) { 508#ifdef INET 509 case AF_INET: 510 p.pfra_net = 32; 511 p.pfra_ip4addr = (*state)->src_node->addr.v4; 512 break; 513#endif /* INET */ 514#ifdef INET6 515 case AF_INET6: 516 p.pfra_net = 128; 517 p.pfra_ip6addr = (*state)->src_node->addr.v6; 518 break; 519#endif /* INET6 */ 520 } 521 522 pfr_insert_kentry((*state)->rule.ptr->overload_tbl, &p, time_second); 523 524 if ((*state)->rule.ptr->flush == 0) 525 return (1); 526 527 /* Schedule flushing task. */ 528 pffe = malloc(sizeof(*pffe), M_PFTEMP, M_NOWAIT); 529 if (pffe == NULL) 530 return (1); /* too bad :( */ 531 532 bcopy(&(*state)->src_node->addr, &pffe->addr, sizeof(pffe->addr)); 533 pffe->af = (*state)->key[PF_SK_WIRE]->af; 534 pffe->dir = (*state)->direction; 535 if ((*state)->rule.ptr->flush & PF_FLUSH_GLOBAL) 536 pffe->rule = NULL; 537 else 538 pffe->rule = (*state)->rule.ptr; 539 PF_FLUSHQ_LOCK(); 540 SLIST_INSERT_HEAD(&V_pf_flushqueue, pffe, next); 541 PF_FLUSHQ_UNLOCK(); 542 taskqueue_enqueue(taskqueue_swi, &V_pf_flushtask); 543 544 return (1); 545} 546 547static void 548pf_flush_task(void *c, int pending) 549{ 550 struct pf_flush_head queue; 551 struct pf_flush_entry *pffe, *pffe1; 552 uint32_t killed = 0; 553 554 PF_FLUSHQ_LOCK(); 555 queue = *(struct pf_flush_head *)c; 556 SLIST_INIT((struct pf_flush_head *)c); 557 PF_FLUSHQ_UNLOCK(); 558 559 V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++; 560 561 for (int i = 0; i <= V_pf_hashmask; i++) { 562 struct pf_idhash *ih = &V_pf_idhash[i]; 563 struct pf_state_key *sk; 564 struct pf_state *s; 565 566 PF_HASHROW_LOCK(ih); 567 LIST_FOREACH(s, &ih->states, entry) { 568 sk = s->key[PF_SK_WIRE]; 569 SLIST_FOREACH(pffe, &queue, next) 570 if (sk->af == pffe->af && (pffe->rule == NULL || 571 pffe->rule == s->rule.ptr) && 572 ((pffe->dir == PF_OUT && 573 PF_AEQ(&pffe->addr, &sk->addr[1], sk->af)) || 574 (pffe->dir == PF_IN && 575 PF_AEQ(&pffe->addr, &sk->addr[0], sk->af)))) { 576 s->timeout = PFTM_PURGE; 577 s->src.state = s->dst.state = TCPS_CLOSED; 578 killed++; 579 } 580 } 581 PF_HASHROW_UNLOCK(ih); 582 } 583 SLIST_FOREACH_SAFE(pffe, &queue, next, pffe1) 584 free(pffe, M_PFTEMP); 585 if (V_pf_status.debug >= PF_DEBUG_MISC) 586 printf("%s: %u states killed", __func__, killed); 587} 588 589/* 590 * Can return locked on failure, so that we can consistently 591 * allocate and insert a new one. 592 */ 593struct pf_src_node * 594pf_find_src_node(struct pf_addr *src, struct pf_rule *rule, sa_family_t af, 595 int returnlocked) 596{ 597 struct pf_srchash *sh; 598 struct pf_src_node *n; 599 600 V_pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; 601 602 sh = &V_pf_srchash[pf_hashsrc(src, af)]; 603 PF_HASHROW_LOCK(sh); 604 LIST_FOREACH(n, &sh->nodes, entry) 605 if (n->rule.ptr == rule && n->af == af && 606 ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) || 607 (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0))) 608 break; 609 if (n != NULL || returnlocked == 0) 610 PF_HASHROW_UNLOCK(sh); 611 612 return (n); 613} 614 615static int 616pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule, 617 struct pf_addr *src, sa_family_t af) 618{ 619 620 KASSERT((rule->rule_flag & PFRULE_RULESRCTRACK || 621 rule->rpool.opts & PF_POOL_STICKYADDR), 622 ("%s for non-tracking rule %p", __func__, rule)); 623 624 if (*sn == NULL) 625 *sn = pf_find_src_node(src, rule, af, 1); 626 627 if (*sn == NULL) { 628 struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)]; 629 630 PF_HASHROW_ASSERT(sh); 631 632 if (!rule->max_src_nodes || 633 rule->src_nodes < rule->max_src_nodes) 634 (*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO); 635 else 636 V_pf_status.lcounters[LCNT_SRCNODES]++; 637 if ((*sn) == NULL) { 638 PF_HASHROW_UNLOCK(sh); 639 return (-1); 640 } 641 642 pf_init_threshold(&(*sn)->conn_rate, 643 rule->max_src_conn_rate.limit, 644 rule->max_src_conn_rate.seconds); 645 646 (*sn)->af = af; 647 (*sn)->rule.ptr = rule; 648 PF_ACPY(&(*sn)->addr, src, af); 649 LIST_INSERT_HEAD(&sh->nodes, *sn, entry); 650 (*sn)->creation = time_uptime; 651 (*sn)->ruletype = rule->action; 652 if ((*sn)->rule.ptr != NULL) 653 (*sn)->rule.ptr->src_nodes++; 654 PF_HASHROW_UNLOCK(sh); 655 V_pf_status.scounters[SCNT_SRC_NODE_INSERT]++; 656 V_pf_status.src_nodes++; 657 } else { 658 if (rule->max_src_states && 659 (*sn)->states >= rule->max_src_states) { 660 V_pf_status.lcounters[LCNT_SRCSTATES]++; 661 return (-1); 662 } 663 } 664 return (0); 665} 666 667static void 668pf_remove_src_node(struct pf_src_node *src) 669{ 670 struct pf_srchash *sh; 671 672 sh = &V_pf_srchash[pf_hashsrc(&src->addr, src->af)]; 673 PF_HASHROW_LOCK(sh); 674 LIST_REMOVE(src, entry); 675 PF_HASHROW_UNLOCK(sh); 676 677 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 678 V_pf_status.src_nodes--; 679 680 uma_zfree(V_pf_sources_z, src); 681} 682 683/* Data storage structures initialization. */ 684void 685pf_initialize() 686{ 687 struct pf_keyhash *kh; 688 struct pf_idhash *ih; 689 struct pf_srchash *sh; 690 u_int i; 691 692 TUNABLE_ULONG_FETCH("net.pf.states_hashsize", &V_pf_hashsize); 693 if (V_pf_hashsize == 0 || !powerof2(V_pf_hashsize)) 694 V_pf_hashsize = PF_HASHSIZ; 695 TUNABLE_ULONG_FETCH("net.pf.source_nodes_hashsize", &V_pf_srchashsize); 696 if (V_pf_srchashsize == 0 || !powerof2(V_pf_srchashsize)) 697 V_pf_srchashsize = PF_HASHSIZ / 4; 698 699 V_pf_hashseed = arc4random(); 700 701 /* States and state keys storage. */ 702 V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_state), 703 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 704 V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z; 705 uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT); 706 707 V_pf_state_key_z = uma_zcreate("pf state keys", 708 sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL, 709 UMA_ALIGN_PTR, 0); 710 V_pf_keyhash = malloc(V_pf_hashsize * sizeof(struct pf_keyhash), 711 M_PFHASH, M_WAITOK | M_ZERO); 712 V_pf_idhash = malloc(V_pf_hashsize * sizeof(struct pf_idhash), 713 M_PFHASH, M_WAITOK | M_ZERO); 714 V_pf_hashmask = V_pf_hashsize - 1; 715 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask; 716 i++, kh++, ih++) { 717 mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF); 718 mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF); 719 } 720 721 /* Source nodes. */ 722 V_pf_sources_z = uma_zcreate("pf source nodes", 723 sizeof(struct pf_src_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 724 0); 725 V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z; 726 uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT); 727 V_pf_srchash = malloc(V_pf_srchashsize * sizeof(struct pf_srchash), 728 M_PFHASH, M_WAITOK|M_ZERO); 729 V_pf_srchashmask = V_pf_srchashsize - 1; 730 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) 731 mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF); 732 733 /* ALTQ */ 734 TAILQ_INIT(&V_pf_altqs[0]); 735 TAILQ_INIT(&V_pf_altqs[1]); 736 TAILQ_INIT(&V_pf_pabuf); 737 V_pf_altqs_active = &V_pf_altqs[0]; 738 V_pf_altqs_inactive = &V_pf_altqs[1]; 739 740 /* Mbuf tags */ 741 V_pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) + 742 sizeof(struct pf_mtag), NULL, NULL, pf_mtag_init, NULL, 743 UMA_ALIGN_PTR, 0); 744 745 /* Send & flush queues. */ 746 STAILQ_INIT(&V_pf_sendqueue); 747 SLIST_INIT(&V_pf_flushqueue); 748 TASK_INIT(&V_pf_flushtask, 0, pf_flush_task, &V_pf_flushqueue); 749 mtx_init(&pf_sendqueue_mtx, "pf send queue", NULL, MTX_DEF); 750 mtx_init(&pf_flushqueue_mtx, "pf flush queue", NULL, MTX_DEF); 751 752 /* Unlinked, but may be referenced rules. */ 753 TAILQ_INIT(&V_pf_unlinked_rules); 754 mtx_init(&pf_unlnkdrules_mtx, "pf unlinked rules", NULL, MTX_DEF); 755} 756 757void 758pf_cleanup() 759{ 760 struct pf_keyhash *kh; 761 struct pf_idhash *ih; 762 struct pf_srchash *sh; 763 struct pf_send_entry *pfse, *next; 764 u_int i; 765 766 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask; 767 i++, kh++, ih++) { 768 KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty", 769 __func__)); 770 KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty", 771 __func__)); 772 mtx_destroy(&kh->lock); 773 mtx_destroy(&ih->lock); 774 } 775 free(V_pf_keyhash, M_PFHASH); 776 free(V_pf_idhash, M_PFHASH); 777 778 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) { 779 KASSERT(LIST_EMPTY(&sh->nodes), 780 ("%s: source node hash not empty", __func__)); 781 mtx_destroy(&sh->lock); 782 } 783 free(V_pf_srchash, M_PFHASH); 784 785 STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) { 786 m_freem(pfse->pfse_m); 787 free(pfse, M_PFTEMP); 788 } 789 790 mtx_destroy(&pf_sendqueue_mtx); 791 mtx_destroy(&pf_flushqueue_mtx); 792 mtx_destroy(&pf_unlnkdrules_mtx); 793 794 uma_zdestroy(V_pf_mtag_z); 795 uma_zdestroy(V_pf_sources_z); 796 uma_zdestroy(V_pf_state_z); 797 uma_zdestroy(V_pf_state_key_z); 798} 799 800static int 801pf_mtag_init(void *mem, int size, int how) 802{ 803 struct m_tag *t; 804 805 t = (struct m_tag *)mem; 806 t->m_tag_cookie = MTAG_ABI_COMPAT; 807 t->m_tag_id = PACKET_TAG_PF; 808 t->m_tag_len = sizeof(struct pf_mtag); 809 t->m_tag_free = pf_mtag_free; 810 811 return (0); 812} 813 814static void 815pf_mtag_free(struct m_tag *t) 816{ 817 818 uma_zfree(V_pf_mtag_z, t); 819} 820 821struct pf_mtag * 822pf_get_mtag(struct mbuf *m) 823{ 824 struct m_tag *mtag; 825 826 if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL) 827 return ((struct pf_mtag *)(mtag + 1)); 828 829 mtag = uma_zalloc(V_pf_mtag_z, M_NOWAIT); 830 if (mtag == NULL) 831 return (NULL); 832 bzero(mtag + 1, sizeof(struct pf_mtag)); 833 m_tag_prepend(m, mtag); 834 835 return ((struct pf_mtag *)(mtag + 1)); 836} 837 838static int 839pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks, 840 struct pf_state *s) 841{ 842 struct pf_keyhash *kh; 843 struct pf_state_key *sk, *cur; 844 struct pf_state *si, *olds = NULL; 845 int idx; 846 847 KASSERT(s->refs == 0, ("%s: state not pristine", __func__)); 848 KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__)); 849 KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__)); 850 851 /* 852 * First run: start with wire key. 853 */ 854 sk = skw; 855 idx = PF_SK_WIRE; 856 857keyattach: 858 kh = &V_pf_keyhash[pf_hashkey(sk)]; 859 860 PF_HASHROW_LOCK(kh); 861 LIST_FOREACH(cur, &kh->keys, entry) 862 if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0) 863 break; 864 865 if (cur != NULL) { 866 /* Key exists. Check for same kif, if none, add to key. */ 867 TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) { 868 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)]; 869 870 PF_HASHROW_LOCK(ih); 871 if (si->kif == s->kif && 872 si->direction == s->direction) { 873 if (sk->proto == IPPROTO_TCP && 874 si->src.state >= TCPS_FIN_WAIT_2 && 875 si->dst.state >= TCPS_FIN_WAIT_2) { 876 si->src.state = si->dst.state = 877 TCPS_CLOSED; 878 /* Unlink later or cur can go away. */ 879 pf_ref_state(si); 880 olds = si; 881 } else { 882 if (V_pf_status.debug >= PF_DEBUG_MISC) { 883 printf("pf: %s key attach " 884 "failed on %s: ", 885 (idx == PF_SK_WIRE) ? 886 "wire" : "stack", 887 s->kif->pfik_name); 888 pf_print_state_parts(s, 889 (idx == PF_SK_WIRE) ? 890 sk : NULL, 891 (idx == PF_SK_STACK) ? 892 sk : NULL); 893 printf(", existing: "); 894 pf_print_state_parts(si, 895 (idx == PF_SK_WIRE) ? 896 sk : NULL, 897 (idx == PF_SK_STACK) ? 898 sk : NULL); 899 printf("\n"); 900 } 901 PF_HASHROW_UNLOCK(ih); 902 PF_HASHROW_UNLOCK(kh); 903 uma_zfree(V_pf_state_key_z, sk); 904 if (idx == PF_SK_STACK) 905 pf_detach_state(s); 906 return (-1); /* collision! */ 907 } 908 } 909 PF_HASHROW_UNLOCK(ih); 910 } 911 uma_zfree(V_pf_state_key_z, sk); 912 s->key[idx] = cur; 913 } else { 914 LIST_INSERT_HEAD(&kh->keys, sk, entry); 915 s->key[idx] = sk; 916 } 917 918stateattach: 919 /* List is sorted, if-bound states before floating. */ 920 if (s->kif == V_pfi_all) 921 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]); 922 else 923 TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]); 924 925 /* 926 * Attach done. See how should we (or should not?) 927 * attach a second key. 928 */ 929 if (sks == skw) { 930 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE]; 931 idx = PF_SK_STACK; 932 sks = NULL; 933 goto stateattach; 934 } else if (sks != NULL) { 935 PF_HASHROW_UNLOCK(kh); 936 if (olds) { 937 pf_unlink_state(olds, 0); 938 pf_release_state(olds); 939 olds = NULL; 940 } 941 /* 942 * Continue attaching with stack key. 943 */ 944 sk = sks; 945 idx = PF_SK_STACK; 946 sks = NULL; 947 goto keyattach; 948 } else 949 PF_HASHROW_UNLOCK(kh); 950 951 if (olds) { 952 pf_unlink_state(olds, 0); 953 pf_release_state(olds); 954 } 955 956 KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL, 957 ("%s failure", __func__)); 958 959 return (0); 960} 961 962static void 963pf_detach_state(struct pf_state *s) 964{ 965 struct pf_state_key *sks = s->key[PF_SK_STACK]; 966 struct pf_keyhash *kh; 967 968 if (sks != NULL) { 969 kh = &V_pf_keyhash[pf_hashkey(sks)]; 970 PF_HASHROW_LOCK(kh); 971 if (s->key[PF_SK_STACK] != NULL) 972 pf_state_key_detach(s, PF_SK_STACK); 973 /* 974 * If both point to same key, then we are done. 975 */ 976 if (sks == s->key[PF_SK_WIRE]) { 977 pf_state_key_detach(s, PF_SK_WIRE); 978 PF_HASHROW_UNLOCK(kh); 979 return; 980 } 981 PF_HASHROW_UNLOCK(kh); 982 } 983 984 if (s->key[PF_SK_WIRE] != NULL) { 985 kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])]; 986 PF_HASHROW_LOCK(kh); 987 if (s->key[PF_SK_WIRE] != NULL) 988 pf_state_key_detach(s, PF_SK_WIRE); 989 PF_HASHROW_UNLOCK(kh); 990 } 991} 992 993static void 994pf_state_key_detach(struct pf_state *s, int idx) 995{ 996 struct pf_state_key *sk = s->key[idx]; 997#ifdef INVARIANTS 998 struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)]; 999 1000 PF_HASHROW_ASSERT(kh); 1001#endif 1002 TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]); 1003 s->key[idx] = NULL; 1004 1005 if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) { 1006 LIST_REMOVE(sk, entry); 1007 uma_zfree(V_pf_state_key_z, sk); 1008 } 1009} 1010 1011static int 1012pf_state_key_ctor(void *mem, int size, void *arg, int flags) 1013{ 1014 struct pf_state_key *sk = mem; 1015 1016 bzero(sk, sizeof(struct pf_state_key_cmp)); 1017 TAILQ_INIT(&sk->states[PF_SK_WIRE]); 1018 TAILQ_INIT(&sk->states[PF_SK_STACK]); 1019 1020 return (0); 1021} 1022 1023struct pf_state_key * 1024pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr, 1025 struct pf_addr *daddr, u_int16_t sport, u_int16_t dport) 1026{ 1027 struct pf_state_key *sk; 1028 1029 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT); 1030 if (sk == NULL) 1031 return (NULL); 1032 1033 PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af); 1034 PF_ACPY(&sk->addr[pd->didx], daddr, pd->af); 1035 sk->port[pd->sidx] = sport; 1036 sk->port[pd->didx] = dport; 1037 sk->proto = pd->proto; 1038 sk->af = pd->af; 1039 1040 return (sk); 1041} 1042 1043struct pf_state_key * 1044pf_state_key_clone(struct pf_state_key *orig) 1045{ 1046 struct pf_state_key *sk; 1047 1048 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT); 1049 if (sk == NULL) 1050 return (NULL); 1051 1052 bcopy(orig, sk, sizeof(struct pf_state_key_cmp)); 1053 1054 return (sk); 1055} 1056 1057int 1058pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw, 1059 struct pf_state_key *sks, struct pf_state *s) 1060{ 1061 struct pf_idhash *ih; 1062 struct pf_state *cur; 1063 1064 KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]), 1065 ("%s: sks not pristine", __func__)); 1066 KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]), 1067 ("%s: skw not pristine", __func__)); 1068 KASSERT(s->refs == 0, ("%s: state not pristine", __func__)); 1069 1070 s->kif = kif; 1071 1072 if (pf_state_key_attach(skw, sks, s)) 1073 return (-1); 1074 1075 if (s->id == 0 && s->creatorid == 0) { 1076 /* XXX: should be atomic, but probability of collision low */ 1077 if ((s->id = V_pf_stateid[curcpu]++) == PFID_MAXID) 1078 V_pf_stateid[curcpu] = 1; 1079 s->id |= (uint64_t )curcpu << PFID_CPUSHIFT; 1080 s->id = htobe64(s->id); 1081 s->creatorid = V_pf_status.hostid; 1082 } 1083 1084 ih = &V_pf_idhash[PF_IDHASH(s)]; 1085 PF_HASHROW_LOCK(ih); 1086 LIST_FOREACH(cur, &ih->states, entry) 1087 if (cur->id == s->id && cur->creatorid == s->creatorid) 1088 break; 1089 1090 if (cur != NULL) { 1091 PF_HASHROW_UNLOCK(ih); 1092 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1093 printf("pf: state insert failed: " 1094 "id: %016llx creatorid: %08x", 1095 (unsigned long long)be64toh(s->id), 1096 ntohl(s->creatorid)); 1097 printf("\n"); 1098 } 1099 pf_detach_state(s); 1100 return (-1); 1101 } 1102 LIST_INSERT_HEAD(&ih->states, s, entry); 1103 /* One for keys, one for ID hash. */ 1104 refcount_init(&s->refs, 2); 1105 1106 V_pf_status.fcounters[FCNT_STATE_INSERT]++; 1107 if (pfsync_insert_state_ptr != NULL) 1108 pfsync_insert_state_ptr(s); 1109 1110 /* Returns locked. */ 1111 return (0); 1112} 1113 1114/* 1115 * Find state by ID: returns with locked row on success. 1116 */ 1117struct pf_state * 1118pf_find_state_byid(uint64_t id, uint32_t creatorid) 1119{ 1120 struct pf_idhash *ih; 1121 struct pf_state *s; 1122 1123 V_pf_status.fcounters[FCNT_STATE_SEARCH]++; 1124 1125 ih = &V_pf_idhash[(be64toh(id) % (V_pf_hashmask + 1))]; 1126 1127 PF_HASHROW_LOCK(ih); 1128 LIST_FOREACH(s, &ih->states, entry) 1129 if (s->id == id && s->creatorid == creatorid) 1130 break; 1131 1132 if (s == NULL) 1133 PF_HASHROW_UNLOCK(ih); 1134 1135 return (s); 1136} 1137 1138/* 1139 * Find state by key. 1140 * Returns with ID hash slot locked on success. 1141 */ 1142static struct pf_state * 1143pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir) 1144{ 1145 struct pf_keyhash *kh; 1146 struct pf_state_key *sk; 1147 struct pf_state *s; 1148 int idx; 1149 1150 V_pf_status.fcounters[FCNT_STATE_SEARCH]++; 1151 1152 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)]; 1153 1154 PF_HASHROW_LOCK(kh); 1155 LIST_FOREACH(sk, &kh->keys, entry) 1156 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0) 1157 break; 1158 if (sk == NULL) { 1159 PF_HASHROW_UNLOCK(kh); 1160 return (NULL); 1161 } 1162 1163 idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK); 1164 1165 /* List is sorted, if-bound states before floating ones. */ 1166 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) 1167 if (s->kif == V_pfi_all || s->kif == kif) { 1168 PF_STATE_LOCK(s); 1169 PF_HASHROW_UNLOCK(kh); 1170 if (s->timeout == PFTM_UNLINKED) { 1171 /* 1172 * State is being processed 1173 * by pf_unlink_state() in 1174 * an other thread. 1175 */ 1176 PF_STATE_UNLOCK(s); 1177 return (NULL); 1178 } 1179 return (s); 1180 } 1181 PF_HASHROW_UNLOCK(kh); 1182 1183 return (NULL); 1184} 1185 1186struct pf_state * 1187pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more) 1188{ 1189 struct pf_keyhash *kh; 1190 struct pf_state_key *sk; 1191 struct pf_state *s, *ret = NULL; 1192 int idx, inout = 0; 1193 1194 V_pf_status.fcounters[FCNT_STATE_SEARCH]++; 1195 1196 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)]; 1197 1198 PF_HASHROW_LOCK(kh); 1199 LIST_FOREACH(sk, &kh->keys, entry) 1200 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0) 1201 break; 1202 if (sk == NULL) { 1203 PF_HASHROW_UNLOCK(kh); 1204 return (NULL); 1205 } 1206 switch (dir) { 1207 case PF_IN: 1208 idx = PF_SK_WIRE; 1209 break; 1210 case PF_OUT: 1211 idx = PF_SK_STACK; 1212 break; 1213 case PF_INOUT: 1214 idx = PF_SK_WIRE; 1215 inout = 1; 1216 break; 1217 default: 1218 panic("%s: dir %u", __func__, dir); 1219 } 1220second_run: 1221 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) { 1222 if (more == NULL) { 1223 PF_HASHROW_UNLOCK(kh); 1224 return (s); 1225 } 1226 1227 if (ret) 1228 (*more)++; 1229 else 1230 ret = s; 1231 } 1232 if (inout == 1) { 1233 inout = 0; 1234 idx = PF_SK_STACK; 1235 goto second_run; 1236 } 1237 PF_HASHROW_UNLOCK(kh); 1238 1239 return (ret); 1240} 1241 1242/* END state table stuff */ 1243 1244static void 1245pf_send(struct pf_send_entry *pfse) 1246{ 1247 1248 PF_SENDQ_LOCK(); 1249 STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next); 1250 PF_SENDQ_UNLOCK(); 1251 swi_sched(V_pf_swi_cookie, 0); 1252} 1253 1254void 1255pf_intr(void *v) 1256{ 1257 struct pf_send_head queue; 1258 struct pf_send_entry *pfse, *next; 1259 1260 CURVNET_SET((struct vnet *)v); 1261 1262 PF_SENDQ_LOCK(); 1263 queue = V_pf_sendqueue; 1264 STAILQ_INIT(&V_pf_sendqueue); 1265 PF_SENDQ_UNLOCK(); 1266 1267 STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) { 1268 switch (pfse->pfse_type) { 1269#ifdef INET 1270 case PFSE_IP: 1271 ip_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL); 1272 break; 1273 case PFSE_ICMP: 1274 icmp_error(pfse->pfse_m, pfse->pfse_icmp_type, 1275 pfse->pfse_icmp_code, 0, pfse->pfse_icmp_mtu); 1276 break; 1277#endif /* INET */ 1278#ifdef INET6 1279 case PFSE_IP6: 1280 ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL, 1281 NULL); 1282 break; 1283 case PFSE_ICMP6: 1284 icmp6_error(pfse->pfse_m, pfse->pfse_icmp_type, 1285 pfse->pfse_icmp_code, pfse->pfse_icmp_mtu); 1286 break; 1287#endif /* INET6 */ 1288 default: 1289 panic("%s: unknown type", __func__); 1290 } 1291 free(pfse, M_PFTEMP); 1292 } 1293 CURVNET_RESTORE(); 1294} 1295 1296void 1297pf_purge_thread(void *v) 1298{ 1299 int fullrun; 1300 1301 CURVNET_SET((struct vnet *)v); 1302 1303 for (;;) { 1304 PF_RULES_RLOCK(); 1305 rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftm", hz / 10); 1306 1307 if (V_pf_end_threads) { 1308 /* 1309 * To cleanse up all kifs and rules we need 1310 * two runs: first one clears reference flags, 1311 * then pf_purge_expired_states() doesn't 1312 * raise them, and then second run frees. 1313 */ 1314 PF_RULES_RUNLOCK(); 1315 pf_purge_unlinked_rules(); 1316 pfi_kif_purge(); 1317 1318 /* 1319 * Now purge everything. 1320 */ 1321 pf_purge_expired_states(V_pf_hashmask + 1); 1322 pf_purge_expired_fragments(); 1323 pf_purge_expired_src_nodes(); 1324 1325 /* 1326 * Now all kifs & rules should be unreferenced, 1327 * thus should be successfully freed. 1328 */ 1329 pf_purge_unlinked_rules(); 1330 pfi_kif_purge(); 1331 1332 /* 1333 * Announce success and exit. 1334 */ 1335 PF_RULES_RLOCK(); 1336 V_pf_end_threads++; 1337 PF_RULES_RUNLOCK(); 1338 wakeup(pf_purge_thread); 1339 kproc_exit(0); 1340 } 1341 PF_RULES_RUNLOCK(); 1342 1343 /* Process 1/interval fraction of the state table every run. */ 1344 fullrun = pf_purge_expired_states(V_pf_hashmask / 1345 (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10)); 1346 1347 /* Purge other expired types every PFTM_INTERVAL seconds. */ 1348 if (fullrun) { 1349 /* 1350 * Order is important: 1351 * - states and src nodes reference rules 1352 * - states and rules reference kifs 1353 */ 1354 pf_purge_expired_fragments(); 1355 pf_purge_expired_src_nodes(); 1356 pf_purge_unlinked_rules(); 1357 pfi_kif_purge(); 1358 } 1359 } 1360 /* not reached */ 1361 CURVNET_RESTORE(); 1362} 1363 1364u_int32_t 1365pf_state_expires(const struct pf_state *state) 1366{ 1367 u_int32_t timeout; 1368 u_int32_t start; 1369 u_int32_t end; 1370 u_int32_t states; 1371 1372 /* handle all PFTM_* > PFTM_MAX here */ 1373 if (state->timeout == PFTM_PURGE) 1374 return (time_uptime); 1375 if (state->timeout == PFTM_UNTIL_PACKET) 1376 return (0); 1377 KASSERT(state->timeout != PFTM_UNLINKED, 1378 ("pf_state_expires: timeout == PFTM_UNLINKED")); 1379 KASSERT((state->timeout < PFTM_MAX), 1380 ("pf_state_expires: timeout > PFTM_MAX")); 1381 timeout = state->rule.ptr->timeout[state->timeout]; 1382 if (!timeout) 1383 timeout = V_pf_default_rule.timeout[state->timeout]; 1384 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START]; 1385 if (start) { 1386 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END]; 1387 states = state->rule.ptr->states_cur; /* XXXGL */ 1388 } else { 1389 start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START]; 1390 end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END]; 1391 states = V_pf_status.states; 1392 } 1393 if (end && states > start && start < end) { 1394 if (states < end) 1395 return (state->expire + timeout * (end - states) / 1396 (end - start)); 1397 else 1398 return (time_uptime); 1399 } 1400 return (state->expire + timeout); 1401} 1402 1403void 1404pf_purge_expired_src_nodes() 1405{ 1406 struct pf_srchash *sh; 1407 struct pf_src_node *cur, *next; 1408 int i; 1409 1410 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) { 1411 PF_HASHROW_LOCK(sh); 1412 LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next) 1413 if (cur->states <= 0 && cur->expire <= time_uptime) { 1414 if (cur->rule.ptr != NULL) 1415 cur->rule.ptr->src_nodes--; 1416 LIST_REMOVE(cur, entry); 1417 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 1418 V_pf_status.src_nodes--; 1419 uma_zfree(V_pf_sources_z, cur); 1420 } else if (cur->rule.ptr != NULL) 1421 cur->rule.ptr->rule_flag |= PFRULE_REFS; 1422 PF_HASHROW_UNLOCK(sh); 1423 } 1424} 1425 1426static void 1427pf_src_tree_remove_state(struct pf_state *s) 1428{ 1429 u_int32_t timeout; 1430 1431 if (s->src_node != NULL) { 1432 if (s->src.tcp_est) 1433 --s->src_node->conn; 1434 if (--s->src_node->states <= 0) { 1435 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1436 if (!timeout) 1437 timeout = 1438 V_pf_default_rule.timeout[PFTM_SRC_NODE]; 1439 s->src_node->expire = time_uptime + timeout; 1440 } 1441 } 1442 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) { 1443 if (--s->nat_src_node->states <= 0) { 1444 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1445 if (!timeout) 1446 timeout = 1447 V_pf_default_rule.timeout[PFTM_SRC_NODE]; 1448 s->nat_src_node->expire = time_uptime + timeout; 1449 } 1450 } 1451 s->src_node = s->nat_src_node = NULL; 1452} 1453 1454/* 1455 * Unlink and potentilly free a state. Function may be 1456 * called with ID hash row locked, but always returns 1457 * unlocked, since it needs to go through key hash locking. 1458 */ 1459int 1460pf_unlink_state(struct pf_state *s, u_int flags) 1461{ 1462 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)]; 1463 1464 if ((flags & PF_ENTER_LOCKED) == 0) 1465 PF_HASHROW_LOCK(ih); 1466 else 1467 PF_HASHROW_ASSERT(ih); 1468 1469 if (s->timeout == PFTM_UNLINKED) { 1470 /* 1471 * State is being processed 1472 * by pf_unlink_state() in 1473 * an other thread. 1474 */ 1475 PF_HASHROW_UNLOCK(ih); 1476 return (0); /* XXXGL: undefined actually */ 1477 } 1478 1479 s->timeout = PFTM_UNLINKED; 1480 1481 if (s->src.state == PF_TCPS_PROXY_DST) { 1482 /* XXX wire key the right one? */ 1483 pf_send_tcp(NULL, s->rule.ptr, s->key[PF_SK_WIRE]->af, 1484 &s->key[PF_SK_WIRE]->addr[1], 1485 &s->key[PF_SK_WIRE]->addr[0], 1486 s->key[PF_SK_WIRE]->port[1], 1487 s->key[PF_SK_WIRE]->port[0], 1488 s->src.seqhi, s->src.seqlo + 1, 1489 TH_RST|TH_ACK, 0, 0, 0, 1, s->tag, NULL); 1490 } 1491 1492 LIST_REMOVE(s, entry); 1493 pf_src_tree_remove_state(s); 1494 PF_HASHROW_UNLOCK(ih); 1495 1496 if (pfsync_delete_state_ptr != NULL) 1497 pfsync_delete_state_ptr(s); 1498 1499 pf_detach_state(s); 1500 refcount_release(&s->refs); 1501 1502 return (pf_release_state(s)); 1503} 1504 1505void 1506pf_free_state(struct pf_state *cur) 1507{ 1508 1509 KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur)); 1510 KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__, 1511 cur->timeout)); 1512 --cur->rule.ptr->states_cur; 1513 if (cur->nat_rule.ptr != NULL) 1514 --cur->nat_rule.ptr->states_cur; 1515 if (cur->anchor.ptr != NULL) 1516 --cur->anchor.ptr->states_cur; 1517 pf_normalize_tcp_cleanup(cur); 1518 uma_zfree(V_pf_state_z, cur); 1519 V_pf_status.fcounters[FCNT_STATE_REMOVALS]++; 1520} 1521 1522/* 1523 * Called only from pf_purge_thread(), thus serialized. 1524 */ 1525static int 1526pf_purge_expired_states(int maxcheck) 1527{ 1528 static u_int i = 0; 1529 1530 struct pf_idhash *ih; 1531 struct pf_state *s; 1532 int rv = 0; 1533 1534 V_pf_status.states = uma_zone_get_cur(V_pf_state_z); 1535 1536 /* 1537 * Go through hash and unlink states that expire now. 1538 */ 1539 while (maxcheck > 0) { 1540 1541 /* Wrap to start of hash when we hit the end. */ 1542 if (i > V_pf_hashmask) { 1543 i = 0; 1544 rv = 1; 1545 } 1546 1547 ih = &V_pf_idhash[i]; 1548relock: 1549 PF_HASHROW_LOCK(ih); 1550 LIST_FOREACH(s, &ih->states, entry) { 1551 if (pf_state_expires(s) <= time_uptime) { 1552 V_pf_status.states -= 1553 pf_unlink_state(s, PF_ENTER_LOCKED); 1554 goto relock; 1555 } 1556 s->rule.ptr->rule_flag |= PFRULE_REFS; 1557 if (s->nat_rule.ptr != NULL) 1558 s->nat_rule.ptr->rule_flag |= PFRULE_REFS; 1559 if (s->anchor.ptr != NULL) 1560 s->anchor.ptr->rule_flag |= PFRULE_REFS; 1561 s->kif->pfik_flags |= PFI_IFLAG_REFS; 1562 if (s->rt_kif) 1563 s->rt_kif->pfik_flags |= PFI_IFLAG_REFS; 1564 } 1565 PF_HASHROW_UNLOCK(ih); 1566 i++; 1567 maxcheck--; 1568 } 1569 1570 V_pf_status.states = uma_zone_get_cur(V_pf_state_z); 1571 1572 return (rv); 1573} 1574 1575static void 1576pf_purge_unlinked_rules() 1577{ 1578 struct pf_rulequeue tmpq; 1579 struct pf_rule *r, *r1; 1580 1581 /* 1582 * Do naive mark-and-sweep garbage collecting of old rules. 1583 * Reference flag is raised by pf_purge_expired_states() 1584 * and pf_purge_expired_src_nodes(). 1585 * 1586 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK, 1587 * use a temporary queue. 1588 */ 1589 TAILQ_INIT(&tmpq); 1590 PF_UNLNKDRULES_LOCK(); 1591 TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) { 1592 if (!(r->rule_flag & PFRULE_REFS)) { 1593 TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries); 1594 TAILQ_INSERT_TAIL(&tmpq, r, entries); 1595 } else 1596 r->rule_flag &= ~PFRULE_REFS; 1597 } 1598 PF_UNLNKDRULES_UNLOCK(); 1599 1600 if (!TAILQ_EMPTY(&tmpq)) { 1601 PF_RULES_WLOCK(); 1602 TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) { 1603 TAILQ_REMOVE(&tmpq, r, entries); 1604 pf_free_rule(r); 1605 } 1606 PF_RULES_WUNLOCK(); 1607 } 1608} 1609 1610void 1611pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af) 1612{ 1613 switch (af) { 1614#ifdef INET 1615 case AF_INET: { 1616 u_int32_t a = ntohl(addr->addr32[0]); 1617 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255, 1618 (a>>8)&255, a&255); 1619 if (p) { 1620 p = ntohs(p); 1621 printf(":%u", p); 1622 } 1623 break; 1624 } 1625#endif /* INET */ 1626#ifdef INET6 1627 case AF_INET6: { 1628 u_int16_t b; 1629 u_int8_t i, curstart, curend, maxstart, maxend; 1630 curstart = curend = maxstart = maxend = 255; 1631 for (i = 0; i < 8; i++) { 1632 if (!addr->addr16[i]) { 1633 if (curstart == 255) 1634 curstart = i; 1635 curend = i; 1636 } else { 1637 if ((curend - curstart) > 1638 (maxend - maxstart)) { 1639 maxstart = curstart; 1640 maxend = curend; 1641 } 1642 curstart = curend = 255; 1643 } 1644 } 1645 if ((curend - curstart) > 1646 (maxend - maxstart)) { 1647 maxstart = curstart; 1648 maxend = curend; 1649 } 1650 for (i = 0; i < 8; i++) { 1651 if (i >= maxstart && i <= maxend) { 1652 if (i == 0) 1653 printf(":"); 1654 if (i == maxend) 1655 printf(":"); 1656 } else { 1657 b = ntohs(addr->addr16[i]); 1658 printf("%x", b); 1659 if (i < 7) 1660 printf(":"); 1661 } 1662 } 1663 if (p) { 1664 p = ntohs(p); 1665 printf("[%u]", p); 1666 } 1667 break; 1668 } 1669#endif /* INET6 */ 1670 } 1671} 1672 1673void 1674pf_print_state(struct pf_state *s) 1675{ 1676 pf_print_state_parts(s, NULL, NULL); 1677} 1678 1679static void 1680pf_print_state_parts(struct pf_state *s, 1681 struct pf_state_key *skwp, struct pf_state_key *sksp) 1682{ 1683 struct pf_state_key *skw, *sks; 1684 u_int8_t proto, dir; 1685 1686 /* Do our best to fill these, but they're skipped if NULL */ 1687 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL); 1688 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL); 1689 proto = skw ? skw->proto : (sks ? sks->proto : 0); 1690 dir = s ? s->direction : 0; 1691 1692 switch (proto) { 1693 case IPPROTO_IPV4: 1694 printf("IPv4"); 1695 break; 1696 case IPPROTO_IPV6: 1697 printf("IPv6"); 1698 break; 1699 case IPPROTO_TCP: 1700 printf("TCP"); 1701 break; 1702 case IPPROTO_UDP: 1703 printf("UDP"); 1704 break; 1705 case IPPROTO_ICMP: 1706 printf("ICMP"); 1707 break; 1708 case IPPROTO_ICMPV6: 1709 printf("ICMPv6"); 1710 break; 1711 default: 1712 printf("%u", skw->proto); 1713 break; 1714 } 1715 switch (dir) { 1716 case PF_IN: 1717 printf(" in"); 1718 break; 1719 case PF_OUT: 1720 printf(" out"); 1721 break; 1722 } 1723 if (skw) { 1724 printf(" wire: "); 1725 pf_print_host(&skw->addr[0], skw->port[0], skw->af); 1726 printf(" "); 1727 pf_print_host(&skw->addr[1], skw->port[1], skw->af); 1728 } 1729 if (sks) { 1730 printf(" stack: "); 1731 if (sks != skw) { 1732 pf_print_host(&sks->addr[0], sks->port[0], sks->af); 1733 printf(" "); 1734 pf_print_host(&sks->addr[1], sks->port[1], sks->af); 1735 } else 1736 printf("-"); 1737 } 1738 if (s) { 1739 if (proto == IPPROTO_TCP) { 1740 printf(" [lo=%u high=%u win=%u modulator=%u", 1741 s->src.seqlo, s->src.seqhi, 1742 s->src.max_win, s->src.seqdiff); 1743 if (s->src.wscale && s->dst.wscale) 1744 printf(" wscale=%u", 1745 s->src.wscale & PF_WSCALE_MASK); 1746 printf("]"); 1747 printf(" [lo=%u high=%u win=%u modulator=%u", 1748 s->dst.seqlo, s->dst.seqhi, 1749 s->dst.max_win, s->dst.seqdiff); 1750 if (s->src.wscale && s->dst.wscale) 1751 printf(" wscale=%u", 1752 s->dst.wscale & PF_WSCALE_MASK); 1753 printf("]"); 1754 } 1755 printf(" %u:%u", s->src.state, s->dst.state); 1756 } 1757} 1758 1759void 1760pf_print_flags(u_int8_t f) 1761{ 1762 if (f) 1763 printf(" "); 1764 if (f & TH_FIN) 1765 printf("F"); 1766 if (f & TH_SYN) 1767 printf("S"); 1768 if (f & TH_RST) 1769 printf("R"); 1770 if (f & TH_PUSH) 1771 printf("P"); 1772 if (f & TH_ACK) 1773 printf("A"); 1774 if (f & TH_URG) 1775 printf("U"); 1776 if (f & TH_ECE) 1777 printf("E"); 1778 if (f & TH_CWR) 1779 printf("W"); 1780} 1781 1782#define PF_SET_SKIP_STEPS(i) \ 1783 do { \ 1784 while (head[i] != cur) { \ 1785 head[i]->skip[i].ptr = cur; \ 1786 head[i] = TAILQ_NEXT(head[i], entries); \ 1787 } \ 1788 } while (0) 1789 1790void 1791pf_calc_skip_steps(struct pf_rulequeue *rules) 1792{ 1793 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT]; 1794 int i; 1795 1796 cur = TAILQ_FIRST(rules); 1797 prev = cur; 1798 for (i = 0; i < PF_SKIP_COUNT; ++i) 1799 head[i] = cur; 1800 while (cur != NULL) { 1801 1802 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 1803 PF_SET_SKIP_STEPS(PF_SKIP_IFP); 1804 if (cur->direction != prev->direction) 1805 PF_SET_SKIP_STEPS(PF_SKIP_DIR); 1806 if (cur->af != prev->af) 1807 PF_SET_SKIP_STEPS(PF_SKIP_AF); 1808 if (cur->proto != prev->proto) 1809 PF_SET_SKIP_STEPS(PF_SKIP_PROTO); 1810 if (cur->src.neg != prev->src.neg || 1811 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr)) 1812 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR); 1813 if (cur->src.port[0] != prev->src.port[0] || 1814 cur->src.port[1] != prev->src.port[1] || 1815 cur->src.port_op != prev->src.port_op) 1816 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT); 1817 if (cur->dst.neg != prev->dst.neg || 1818 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr)) 1819 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR); 1820 if (cur->dst.port[0] != prev->dst.port[0] || 1821 cur->dst.port[1] != prev->dst.port[1] || 1822 cur->dst.port_op != prev->dst.port_op) 1823 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT); 1824 1825 prev = cur; 1826 cur = TAILQ_NEXT(cur, entries); 1827 } 1828 for (i = 0; i < PF_SKIP_COUNT; ++i) 1829 PF_SET_SKIP_STEPS(i); 1830} 1831 1832static int 1833pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2) 1834{ 1835 if (aw1->type != aw2->type) 1836 return (1); 1837 switch (aw1->type) { 1838 case PF_ADDR_ADDRMASK: 1839 case PF_ADDR_RANGE: 1840 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0)) 1841 return (1); 1842 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0)) 1843 return (1); 1844 return (0); 1845 case PF_ADDR_DYNIFTL: 1846 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt); 1847 case PF_ADDR_NOROUTE: 1848 case PF_ADDR_URPFFAILED: 1849 return (0); 1850 case PF_ADDR_TABLE: 1851 return (aw1->p.tbl != aw2->p.tbl); 1852 default: 1853 printf("invalid address type: %d\n", aw1->type); 1854 return (1); 1855 } 1856} 1857 1858u_int16_t 1859pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp) 1860{ 1861 u_int32_t l; 1862 1863 if (udp && !cksum) 1864 return (0x0000); 1865 l = cksum + old - new; 1866 l = (l >> 16) + (l & 65535); 1867 l = l & 65535; 1868 if (udp && !l) 1869 return (0xFFFF); 1870 return (l); 1871} 1872 1873static void 1874pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc, 1875 struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af) 1876{ 1877 struct pf_addr ao; 1878 u_int16_t po = *p; 1879 1880 PF_ACPY(&ao, a, af); 1881 PF_ACPY(a, an, af); 1882 1883 *p = pn; 1884 1885 switch (af) { 1886#ifdef INET 1887 case AF_INET: 1888 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 1889 ao.addr16[0], an->addr16[0], 0), 1890 ao.addr16[1], an->addr16[1], 0); 1891 *p = pn; 1892 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1893 ao.addr16[0], an->addr16[0], u), 1894 ao.addr16[1], an->addr16[1], u), 1895 po, pn, u); 1896 break; 1897#endif /* INET */ 1898#ifdef INET6 1899 case AF_INET6: 1900 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1901 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1902 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1903 ao.addr16[0], an->addr16[0], u), 1904 ao.addr16[1], an->addr16[1], u), 1905 ao.addr16[2], an->addr16[2], u), 1906 ao.addr16[3], an->addr16[3], u), 1907 ao.addr16[4], an->addr16[4], u), 1908 ao.addr16[5], an->addr16[5], u), 1909 ao.addr16[6], an->addr16[6], u), 1910 ao.addr16[7], an->addr16[7], u), 1911 po, pn, u); 1912 break; 1913#endif /* INET6 */ 1914 } 1915} 1916 1917 1918/* Changes a u_int32_t. Uses a void * so there are no align restrictions */ 1919void 1920pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u) 1921{ 1922 u_int32_t ao; 1923 1924 memcpy(&ao, a, sizeof(ao)); 1925 memcpy(a, &an, sizeof(u_int32_t)); 1926 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u), 1927 ao % 65536, an % 65536, u); 1928} 1929 1930#ifdef INET6 1931static void 1932pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u) 1933{ 1934 struct pf_addr ao; 1935 1936 PF_ACPY(&ao, a, AF_INET6); 1937 PF_ACPY(a, an, AF_INET6); 1938 1939 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1940 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1941 pf_cksum_fixup(pf_cksum_fixup(*c, 1942 ao.addr16[0], an->addr16[0], u), 1943 ao.addr16[1], an->addr16[1], u), 1944 ao.addr16[2], an->addr16[2], u), 1945 ao.addr16[3], an->addr16[3], u), 1946 ao.addr16[4], an->addr16[4], u), 1947 ao.addr16[5], an->addr16[5], u), 1948 ao.addr16[6], an->addr16[6], u), 1949 ao.addr16[7], an->addr16[7], u); 1950} 1951#endif /* INET6 */ 1952 1953static void 1954pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, 1955 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c, 1956 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af) 1957{ 1958 struct pf_addr oia, ooa; 1959 1960 PF_ACPY(&oia, ia, af); 1961 if (oa) 1962 PF_ACPY(&ooa, oa, af); 1963 1964 /* Change inner protocol port, fix inner protocol checksum. */ 1965 if (ip != NULL) { 1966 u_int16_t oip = *ip; 1967 u_int32_t opc; 1968 1969 if (pc != NULL) 1970 opc = *pc; 1971 *ip = np; 1972 if (pc != NULL) 1973 *pc = pf_cksum_fixup(*pc, oip, *ip, u); 1974 *ic = pf_cksum_fixup(*ic, oip, *ip, 0); 1975 if (pc != NULL) 1976 *ic = pf_cksum_fixup(*ic, opc, *pc, 0); 1977 } 1978 /* Change inner ip address, fix inner ip and icmp checksums. */ 1979 PF_ACPY(ia, na, af); 1980 switch (af) { 1981#ifdef INET 1982 case AF_INET: { 1983 u_int32_t oh2c = *h2c; 1984 1985 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c, 1986 oia.addr16[0], ia->addr16[0], 0), 1987 oia.addr16[1], ia->addr16[1], 0); 1988 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 1989 oia.addr16[0], ia->addr16[0], 0), 1990 oia.addr16[1], ia->addr16[1], 0); 1991 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0); 1992 break; 1993 } 1994#endif /* INET */ 1995#ifdef INET6 1996 case AF_INET6: 1997 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1998 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1999 pf_cksum_fixup(pf_cksum_fixup(*ic, 2000 oia.addr16[0], ia->addr16[0], u), 2001 oia.addr16[1], ia->addr16[1], u), 2002 oia.addr16[2], ia->addr16[2], u), 2003 oia.addr16[3], ia->addr16[3], u), 2004 oia.addr16[4], ia->addr16[4], u), 2005 oia.addr16[5], ia->addr16[5], u), 2006 oia.addr16[6], ia->addr16[6], u), 2007 oia.addr16[7], ia->addr16[7], u); 2008 break; 2009#endif /* INET6 */ 2010 } 2011 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */ 2012 if (oa) { 2013 PF_ACPY(oa, na, af); 2014 switch (af) { 2015#ifdef INET 2016 case AF_INET: 2017 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc, 2018 ooa.addr16[0], oa->addr16[0], 0), 2019 ooa.addr16[1], oa->addr16[1], 0); 2020 break; 2021#endif /* INET */ 2022#ifdef INET6 2023 case AF_INET6: 2024 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 2025 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 2026 pf_cksum_fixup(pf_cksum_fixup(*ic, 2027 ooa.addr16[0], oa->addr16[0], u), 2028 ooa.addr16[1], oa->addr16[1], u), 2029 ooa.addr16[2], oa->addr16[2], u), 2030 ooa.addr16[3], oa->addr16[3], u), 2031 ooa.addr16[4], oa->addr16[4], u), 2032 ooa.addr16[5], oa->addr16[5], u), 2033 ooa.addr16[6], oa->addr16[6], u), 2034 ooa.addr16[7], oa->addr16[7], u); 2035 break; 2036#endif /* INET6 */ 2037 } 2038 } 2039} 2040 2041 2042/* 2043 * Need to modulate the sequence numbers in the TCP SACK option 2044 * (credits to Krzysztof Pfaff for report and patch) 2045 */ 2046static int 2047pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd, 2048 struct tcphdr *th, struct pf_state_peer *dst) 2049{ 2050 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen; 2051 u_int8_t opts[TCP_MAXOLEN], *opt = opts; 2052 int copyback = 0, i, olen; 2053 struct sackblk sack; 2054 2055#define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2) 2056 if (hlen < TCPOLEN_SACKLEN || 2057 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af)) 2058 return 0; 2059 2060 while (hlen >= TCPOLEN_SACKLEN) { 2061 olen = opt[1]; 2062 switch (*opt) { 2063 case TCPOPT_EOL: /* FALLTHROUGH */ 2064 case TCPOPT_NOP: 2065 opt++; 2066 hlen--; 2067 break; 2068 case TCPOPT_SACK: 2069 if (olen > hlen) 2070 olen = hlen; 2071 if (olen >= TCPOLEN_SACKLEN) { 2072 for (i = 2; i + TCPOLEN_SACK <= olen; 2073 i += TCPOLEN_SACK) { 2074 memcpy(&sack, &opt[i], sizeof(sack)); 2075 pf_change_a(&sack.start, &th->th_sum, 2076 htonl(ntohl(sack.start) - 2077 dst->seqdiff), 0); 2078 pf_change_a(&sack.end, &th->th_sum, 2079 htonl(ntohl(sack.end) - 2080 dst->seqdiff), 0); 2081 memcpy(&opt[i], &sack, sizeof(sack)); 2082 } 2083 copyback = 1; 2084 } 2085 /* FALLTHROUGH */ 2086 default: 2087 if (olen < 2) 2088 olen = 2; 2089 hlen -= olen; 2090 opt += olen; 2091 } 2092 } 2093 2094 if (copyback) 2095 m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts); 2096 return (copyback); 2097} 2098 2099static void 2100pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af, 2101 const struct pf_addr *saddr, const struct pf_addr *daddr, 2102 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack, 2103 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag, 2104 u_int16_t rtag, struct ifnet *ifp) 2105{ 2106 struct pf_send_entry *pfse; 2107 struct mbuf *m; 2108 int len, tlen; 2109#ifdef INET 2110 struct ip *h = NULL; 2111#endif /* INET */ 2112#ifdef INET6 2113 struct ip6_hdr *h6 = NULL; 2114#endif /* INET6 */ 2115 struct tcphdr *th; 2116 char *opt; 2117 struct pf_mtag *pf_mtag; 2118 2119 len = 0; 2120 th = NULL; 2121 2122 /* maximum segment size tcp option */ 2123 tlen = sizeof(struct tcphdr); 2124 if (mss) 2125 tlen += 4; 2126 2127 switch (af) { 2128#ifdef INET 2129 case AF_INET: 2130 len = sizeof(struct ip) + tlen; 2131 break; 2132#endif /* INET */ 2133#ifdef INET6 2134 case AF_INET6: 2135 len = sizeof(struct ip6_hdr) + tlen; 2136 break; 2137#endif /* INET6 */ 2138 default: 2139 panic("%s: unsupported af %d", __func__, af); 2140 } 2141 2142 /* Allocate outgoing queue entry, mbuf and mbuf tag. */ 2143 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT); 2144 if (pfse == NULL) 2145 return; 2146 m = m_gethdr(M_NOWAIT, MT_HEADER); 2147 if (m == NULL) { 2148 free(pfse, M_PFTEMP); 2149 return; 2150 } 2151#ifdef MAC 2152 mac_netinet_firewall_send(m); 2153#endif 2154 if ((pf_mtag = pf_get_mtag(m)) == NULL) { 2155 free(pfse, M_PFTEMP); 2156 m_freem(m); 2157 return; 2158 } 2159 if (tag) 2160 m->m_flags |= M_SKIP_FIREWALL; 2161 pf_mtag->tag = rtag; 2162 2163 if (r != NULL && r->rtableid >= 0) 2164 M_SETFIB(m, r->rtableid); 2165 2166#ifdef ALTQ 2167 if (r != NULL && r->qid) { 2168 pf_mtag->qid = r->qid; 2169 2170 /* add hints for ecn */ 2171 pf_mtag->hdr = mtod(m, struct ip *); 2172 } 2173#endif /* ALTQ */ 2174 m->m_data += max_linkhdr; 2175 m->m_pkthdr.len = m->m_len = len; 2176 m->m_pkthdr.rcvif = NULL; 2177 bzero(m->m_data, len); 2178 switch (af) { 2179#ifdef INET 2180 case AF_INET: 2181 h = mtod(m, struct ip *); 2182 2183 /* IP header fields included in the TCP checksum */ 2184 h->ip_p = IPPROTO_TCP; 2185 h->ip_len = htons(tlen); 2186 h->ip_src.s_addr = saddr->v4.s_addr; 2187 h->ip_dst.s_addr = daddr->v4.s_addr; 2188 2189 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip)); 2190 break; 2191#endif /* INET */ 2192#ifdef INET6 2193 case AF_INET6: 2194 h6 = mtod(m, struct ip6_hdr *); 2195 2196 /* IP header fields included in the TCP checksum */ 2197 h6->ip6_nxt = IPPROTO_TCP; 2198 h6->ip6_plen = htons(tlen); 2199 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr)); 2200 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr)); 2201 2202 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr)); 2203 break; 2204#endif /* INET6 */ 2205 } 2206 2207 /* TCP header */ 2208 th->th_sport = sport; 2209 th->th_dport = dport; 2210 th->th_seq = htonl(seq); 2211 th->th_ack = htonl(ack); 2212 th->th_off = tlen >> 2; 2213 th->th_flags = flags; 2214 th->th_win = htons(win); 2215 2216 if (mss) { 2217 opt = (char *)(th + 1); 2218 opt[0] = TCPOPT_MAXSEG; 2219 opt[1] = 4; 2220 HTONS(mss); 2221 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2); 2222 } 2223 2224 switch (af) { 2225#ifdef INET 2226 case AF_INET: 2227 /* TCP checksum */ 2228 th->th_sum = in_cksum(m, len); 2229 2230 /* Finish the IP header */ 2231 h->ip_v = 4; 2232 h->ip_hl = sizeof(*h) >> 2; 2233 h->ip_tos = IPTOS_LOWDELAY; 2234 h->ip_off = V_path_mtu_discovery ? IP_DF : 0; 2235 h->ip_len = len; 2236 h->ip_ttl = ttl ? ttl : V_ip_defttl; 2237 h->ip_sum = 0; 2238 2239 pfse->pfse_type = PFSE_IP; 2240 break; 2241#endif /* INET */ 2242#ifdef INET6 2243 case AF_INET6: 2244 /* TCP checksum */ 2245 th->th_sum = in6_cksum(m, IPPROTO_TCP, 2246 sizeof(struct ip6_hdr), tlen); 2247 2248 h6->ip6_vfc |= IPV6_VERSION; 2249 h6->ip6_hlim = IPV6_DEFHLIM; 2250 2251 pfse->pfse_type = PFSE_IP6; 2252 break; 2253#endif /* INET6 */ 2254 } 2255 pfse->pfse_m = m; 2256 pf_send(pfse); 2257} 2258 2259static void 2260pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af, 2261 struct pf_rule *r) 2262{ 2263 struct pf_send_entry *pfse; 2264 struct mbuf *m0; 2265 struct pf_mtag *pf_mtag; 2266 2267 /* Allocate outgoing queue entry, mbuf and mbuf tag. */ 2268 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT); 2269 if (pfse == NULL) 2270 return; 2271 2272 if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) { 2273 free(pfse, M_PFTEMP); 2274 return; 2275 } 2276 2277 if ((pf_mtag = pf_get_mtag(m0)) == NULL) { 2278 free(pfse, M_PFTEMP); 2279 return; 2280 } 2281 /* XXX: revisit */ 2282 m0->m_flags |= M_SKIP_FIREWALL; 2283 2284 if (r->rtableid >= 0) 2285 M_SETFIB(m0, r->rtableid); 2286 2287#ifdef ALTQ 2288 if (r->qid) { 2289 pf_mtag->qid = r->qid; 2290 /* add hints for ecn */ 2291 pf_mtag->hdr = mtod(m0, struct ip *); 2292 } 2293#endif /* ALTQ */ 2294 2295 switch (af) { 2296#ifdef INET 2297 case AF_INET: 2298 { 2299 struct ip *ip; 2300 2301 /* icmp_error() expects host byte ordering */ 2302 ip = mtod(m0, struct ip *); 2303 NTOHS(ip->ip_len); 2304 NTOHS(ip->ip_off); 2305 2306 pfse->pfse_type = PFSE_ICMP; 2307 break; 2308 } 2309#endif /* INET */ 2310#ifdef INET6 2311 case AF_INET6: 2312 pfse->pfse_type = PFSE_ICMP6; 2313 break; 2314#endif /* INET6 */ 2315 } 2316 pfse->pfse_m = m0; 2317 pfse->pfse_icmp_type = type; 2318 pfse->pfse_icmp_code = code; 2319 pf_send(pfse); 2320} 2321 2322/* 2323 * Return 1 if the addresses a and b match (with mask m), otherwise return 0. 2324 * If n is 0, they match if they are equal. If n is != 0, they match if they 2325 * are different. 2326 */ 2327int 2328pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m, 2329 struct pf_addr *b, sa_family_t af) 2330{ 2331 int match = 0; 2332 2333 switch (af) { 2334#ifdef INET 2335 case AF_INET: 2336 if ((a->addr32[0] & m->addr32[0]) == 2337 (b->addr32[0] & m->addr32[0])) 2338 match++; 2339 break; 2340#endif /* INET */ 2341#ifdef INET6 2342 case AF_INET6: 2343 if (((a->addr32[0] & m->addr32[0]) == 2344 (b->addr32[0] & m->addr32[0])) && 2345 ((a->addr32[1] & m->addr32[1]) == 2346 (b->addr32[1] & m->addr32[1])) && 2347 ((a->addr32[2] & m->addr32[2]) == 2348 (b->addr32[2] & m->addr32[2])) && 2349 ((a->addr32[3] & m->addr32[3]) == 2350 (b->addr32[3] & m->addr32[3]))) 2351 match++; 2352 break; 2353#endif /* INET6 */ 2354 } 2355 if (match) { 2356 if (n) 2357 return (0); 2358 else 2359 return (1); 2360 } else { 2361 if (n) 2362 return (1); 2363 else 2364 return (0); 2365 } 2366} 2367 2368/* 2369 * Return 1 if b <= a <= e, otherwise return 0. 2370 */ 2371int 2372pf_match_addr_range(struct pf_addr *b, struct pf_addr *e, 2373 struct pf_addr *a, sa_family_t af) 2374{ 2375 switch (af) { 2376#ifdef INET 2377 case AF_INET: 2378 if ((a->addr32[0] < b->addr32[0]) || 2379 (a->addr32[0] > e->addr32[0])) 2380 return (0); 2381 break; 2382#endif /* INET */ 2383#ifdef INET6 2384 case AF_INET6: { 2385 int i; 2386 2387 /* check a >= b */ 2388 for (i = 0; i < 4; ++i) 2389 if (a->addr32[i] > b->addr32[i]) 2390 break; 2391 else if (a->addr32[i] < b->addr32[i]) 2392 return (0); 2393 /* check a <= e */ 2394 for (i = 0; i < 4; ++i) 2395 if (a->addr32[i] < e->addr32[i]) 2396 break; 2397 else if (a->addr32[i] > e->addr32[i]) 2398 return (0); 2399 break; 2400 } 2401#endif /* INET6 */ 2402 } 2403 return (1); 2404} 2405 2406static int 2407pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p) 2408{ 2409 switch (op) { 2410 case PF_OP_IRG: 2411 return ((p > a1) && (p < a2)); 2412 case PF_OP_XRG: 2413 return ((p < a1) || (p > a2)); 2414 case PF_OP_RRG: 2415 return ((p >= a1) && (p <= a2)); 2416 case PF_OP_EQ: 2417 return (p == a1); 2418 case PF_OP_NE: 2419 return (p != a1); 2420 case PF_OP_LT: 2421 return (p < a1); 2422 case PF_OP_LE: 2423 return (p <= a1); 2424 case PF_OP_GT: 2425 return (p > a1); 2426 case PF_OP_GE: 2427 return (p >= a1); 2428 } 2429 return (0); /* never reached */ 2430} 2431 2432int 2433pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p) 2434{ 2435 NTOHS(a1); 2436 NTOHS(a2); 2437 NTOHS(p); 2438 return (pf_match(op, a1, a2, p)); 2439} 2440 2441static int 2442pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u) 2443{ 2444 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2445 return (0); 2446 return (pf_match(op, a1, a2, u)); 2447} 2448 2449static int 2450pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g) 2451{ 2452 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2453 return (0); 2454 return (pf_match(op, a1, a2, g)); 2455} 2456 2457int 2458pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag, int mtag) 2459{ 2460 if (*tag == -1) 2461 *tag = mtag; 2462 2463 return ((!r->match_tag_not && r->match_tag == *tag) || 2464 (r->match_tag_not && r->match_tag != *tag)); 2465} 2466 2467int 2468pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag) 2469{ 2470 2471 KASSERT(tag > 0, ("%s: tag %d", __func__, tag)); 2472 2473 if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL)) 2474 return (ENOMEM); 2475 2476 pd->pf_mtag->tag = tag; 2477 2478 return (0); 2479} 2480 2481#define PF_ANCHOR_STACKSIZE 32 2482struct pf_anchor_stackframe { 2483 struct pf_ruleset *rs; 2484 struct pf_rule *r; /* XXX: + match bit */ 2485 struct pf_anchor *child; 2486}; 2487 2488/* 2489 * XXX: We rely on malloc(9) returning pointer aligned addresses. 2490 */ 2491#define PF_ANCHORSTACK_MATCH 0x00000001 2492#define PF_ANCHORSTACK_MASK (PF_ANCHORSTACK_MATCH) 2493 2494#define PF_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH) 2495#define PF_ANCHOR_RULE(f) (struct pf_rule *) \ 2496 ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK) 2497#define PF_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \ 2498 ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \ 2499} while (0) 2500 2501void 2502pf_step_into_anchor(struct pf_anchor_stackframe *stack, int *depth, 2503 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a, 2504 int *match) 2505{ 2506 struct pf_anchor_stackframe *f; 2507 2508 PF_RULES_RASSERT(); 2509 2510 if (match) 2511 *match = 0; 2512 if (*depth >= PF_ANCHOR_STACKSIZE) { 2513 printf("%s: anchor stack overflow on %s\n", 2514 __func__, (*r)->anchor->name); 2515 *r = TAILQ_NEXT(*r, entries); 2516 return; 2517 } else if (*depth == 0 && a != NULL) 2518 *a = *r; 2519 f = stack + (*depth)++; 2520 f->rs = *rs; 2521 f->r = *r; 2522 if ((*r)->anchor_wildcard) { 2523 struct pf_anchor_node *parent = &(*r)->anchor->children; 2524 2525 if ((f->child = RB_MIN(pf_anchor_node, parent)) == NULL) { 2526 *r = NULL; 2527 return; 2528 } 2529 *rs = &f->child->ruleset; 2530 } else { 2531 f->child = NULL; 2532 *rs = &(*r)->anchor->ruleset; 2533 } 2534 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2535} 2536 2537int 2538pf_step_out_of_anchor(struct pf_anchor_stackframe *stack, int *depth, 2539 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a, 2540 int *match) 2541{ 2542 struct pf_anchor_stackframe *f; 2543 struct pf_rule *fr; 2544 int quick = 0; 2545 2546 PF_RULES_RASSERT(); 2547 2548 do { 2549 if (*depth <= 0) 2550 break; 2551 f = stack + *depth - 1; 2552 fr = PF_ANCHOR_RULE(f); 2553 if (f->child != NULL) { 2554 struct pf_anchor_node *parent; 2555 2556 /* 2557 * This block traverses through 2558 * a wildcard anchor. 2559 */ 2560 parent = &fr->anchor->children; 2561 if (match != NULL && *match) { 2562 /* 2563 * If any of "*" matched, then 2564 * "foo/ *" matched, mark frame 2565 * appropriately. 2566 */ 2567 PF_ANCHOR_SET_MATCH(f); 2568 *match = 0; 2569 } 2570 f->child = RB_NEXT(pf_anchor_node, parent, f->child); 2571 if (f->child != NULL) { 2572 *rs = &f->child->ruleset; 2573 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2574 if (*r == NULL) 2575 continue; 2576 else 2577 break; 2578 } 2579 } 2580 (*depth)--; 2581 if (*depth == 0 && a != NULL) 2582 *a = NULL; 2583 *rs = f->rs; 2584 if (PF_ANCHOR_MATCH(f) || (match != NULL && *match)) 2585 quick = fr->quick; 2586 *r = TAILQ_NEXT(fr, entries); 2587 } while (*r == NULL); 2588 2589 return (quick); 2590} 2591 2592#ifdef INET6 2593void 2594pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr, 2595 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af) 2596{ 2597 switch (af) { 2598#ifdef INET 2599 case AF_INET: 2600 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2601 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2602 break; 2603#endif /* INET */ 2604 case AF_INET6: 2605 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2606 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2607 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) | 2608 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]); 2609 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) | 2610 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]); 2611 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) | 2612 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]); 2613 break; 2614 } 2615} 2616 2617void 2618pf_addr_inc(struct pf_addr *addr, sa_family_t af) 2619{ 2620 switch (af) { 2621#ifdef INET 2622 case AF_INET: 2623 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1); 2624 break; 2625#endif /* INET */ 2626 case AF_INET6: 2627 if (addr->addr32[3] == 0xffffffff) { 2628 addr->addr32[3] = 0; 2629 if (addr->addr32[2] == 0xffffffff) { 2630 addr->addr32[2] = 0; 2631 if (addr->addr32[1] == 0xffffffff) { 2632 addr->addr32[1] = 0; 2633 addr->addr32[0] = 2634 htonl(ntohl(addr->addr32[0]) + 1); 2635 } else 2636 addr->addr32[1] = 2637 htonl(ntohl(addr->addr32[1]) + 1); 2638 } else 2639 addr->addr32[2] = 2640 htonl(ntohl(addr->addr32[2]) + 1); 2641 } else 2642 addr->addr32[3] = 2643 htonl(ntohl(addr->addr32[3]) + 1); 2644 break; 2645 } 2646} 2647#endif /* INET6 */ 2648 2649int 2650pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m) 2651{ 2652 struct pf_addr *saddr, *daddr; 2653 u_int16_t sport, dport; 2654 struct inpcbinfo *pi; 2655 struct inpcb *inp; 2656 2657 pd->lookup.uid = UID_MAX; 2658 pd->lookup.gid = GID_MAX; 2659 2660 switch (pd->proto) { 2661 case IPPROTO_TCP: 2662 if (pd->hdr.tcp == NULL) 2663 return (-1); 2664 sport = pd->hdr.tcp->th_sport; 2665 dport = pd->hdr.tcp->th_dport; 2666 pi = &V_tcbinfo; 2667 break; 2668 case IPPROTO_UDP: 2669 if (pd->hdr.udp == NULL) 2670 return (-1); 2671 sport = pd->hdr.udp->uh_sport; 2672 dport = pd->hdr.udp->uh_dport; 2673 pi = &V_udbinfo; 2674 break; 2675 default: 2676 return (-1); 2677 } 2678 if (direction == PF_IN) { 2679 saddr = pd->src; 2680 daddr = pd->dst; 2681 } else { 2682 u_int16_t p; 2683 2684 p = sport; 2685 sport = dport; 2686 dport = p; 2687 saddr = pd->dst; 2688 daddr = pd->src; 2689 } 2690 switch (pd->af) { 2691#ifdef INET 2692 case AF_INET: 2693 inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4, 2694 dport, INPLOOKUP_RLOCKPCB, NULL, m); 2695 if (inp == NULL) { 2696 inp = in_pcblookup_mbuf(pi, saddr->v4, sport, 2697 daddr->v4, dport, INPLOOKUP_WILDCARD | 2698 INPLOOKUP_RLOCKPCB, NULL, m); 2699 if (inp == NULL) 2700 return (-1); 2701 } 2702 break; 2703#endif /* INET */ 2704#ifdef INET6 2705 case AF_INET6: 2706 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6, 2707 dport, INPLOOKUP_RLOCKPCB, NULL, m); 2708 if (inp == NULL) { 2709 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, 2710 &daddr->v6, dport, INPLOOKUP_WILDCARD | 2711 INPLOOKUP_RLOCKPCB, NULL, m); 2712 if (inp == NULL) 2713 return (-1); 2714 } 2715 break; 2716#endif /* INET6 */ 2717 2718 default: 2719 return (-1); 2720 } 2721 INP_RLOCK_ASSERT(inp); 2722 pd->lookup.uid = inp->inp_cred->cr_uid; 2723 pd->lookup.gid = inp->inp_cred->cr_groups[0]; 2724 INP_RUNLOCK(inp); 2725 2726 return (1); 2727} 2728 2729static u_int8_t 2730pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 2731{ 2732 int hlen; 2733 u_int8_t hdr[60]; 2734 u_int8_t *opt, optlen; 2735 u_int8_t wscale = 0; 2736 2737 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 2738 if (hlen <= sizeof(struct tcphdr)) 2739 return (0); 2740 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 2741 return (0); 2742 opt = hdr + sizeof(struct tcphdr); 2743 hlen -= sizeof(struct tcphdr); 2744 while (hlen >= 3) { 2745 switch (*opt) { 2746 case TCPOPT_EOL: 2747 case TCPOPT_NOP: 2748 ++opt; 2749 --hlen; 2750 break; 2751 case TCPOPT_WINDOW: 2752 wscale = opt[2]; 2753 if (wscale > TCP_MAX_WINSHIFT) 2754 wscale = TCP_MAX_WINSHIFT; 2755 wscale |= PF_WSCALE_FLAG; 2756 /* FALLTHROUGH */ 2757 default: 2758 optlen = opt[1]; 2759 if (optlen < 2) 2760 optlen = 2; 2761 hlen -= optlen; 2762 opt += optlen; 2763 break; 2764 } 2765 } 2766 return (wscale); 2767} 2768 2769static u_int16_t 2770pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 2771{ 2772 int hlen; 2773 u_int8_t hdr[60]; 2774 u_int8_t *opt, optlen; 2775 u_int16_t mss = V_tcp_mssdflt; 2776 2777 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 2778 if (hlen <= sizeof(struct tcphdr)) 2779 return (0); 2780 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 2781 return (0); 2782 opt = hdr + sizeof(struct tcphdr); 2783 hlen -= sizeof(struct tcphdr); 2784 while (hlen >= TCPOLEN_MAXSEG) { 2785 switch (*opt) { 2786 case TCPOPT_EOL: 2787 case TCPOPT_NOP: 2788 ++opt; 2789 --hlen; 2790 break; 2791 case TCPOPT_MAXSEG: 2792 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2); 2793 NTOHS(mss); 2794 /* FALLTHROUGH */ 2795 default: 2796 optlen = opt[1]; 2797 if (optlen < 2) 2798 optlen = 2; 2799 hlen -= optlen; 2800 opt += optlen; 2801 break; 2802 } 2803 } 2804 return (mss); 2805} 2806 2807static u_int16_t 2808pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer) 2809{ 2810#ifdef INET 2811 struct sockaddr_in *dst; 2812 struct route ro; 2813#endif /* INET */ 2814#ifdef INET6 2815 struct sockaddr_in6 *dst6; 2816 struct route_in6 ro6; 2817#endif /* INET6 */ 2818 struct rtentry *rt = NULL; 2819 int hlen = 0; 2820 u_int16_t mss = V_tcp_mssdflt; 2821 2822 switch (af) { 2823#ifdef INET 2824 case AF_INET: 2825 hlen = sizeof(struct ip); 2826 bzero(&ro, sizeof(ro)); 2827 dst = (struct sockaddr_in *)&ro.ro_dst; 2828 dst->sin_family = AF_INET; 2829 dst->sin_len = sizeof(*dst); 2830 dst->sin_addr = addr->v4; 2831 in_rtalloc_ign(&ro, 0, rtableid); 2832 rt = ro.ro_rt; 2833 break; 2834#endif /* INET */ 2835#ifdef INET6 2836 case AF_INET6: 2837 hlen = sizeof(struct ip6_hdr); 2838 bzero(&ro6, sizeof(ro6)); 2839 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst; 2840 dst6->sin6_family = AF_INET6; 2841 dst6->sin6_len = sizeof(*dst6); 2842 dst6->sin6_addr = addr->v6; 2843 in6_rtalloc_ign(&ro6, 0, rtableid); 2844 rt = ro6.ro_rt; 2845 break; 2846#endif /* INET6 */ 2847 } 2848 2849 if (rt && rt->rt_ifp) { 2850 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr); 2851 mss = max(V_tcp_mssdflt, mss); 2852 RTFREE(rt); 2853 } 2854 mss = min(mss, offer); 2855 mss = max(mss, 64); /* sanity - at least max opt space */ 2856 return (mss); 2857} 2858 2859static void 2860pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr) 2861{ 2862 struct pf_rule *r = s->rule.ptr; 2863 struct pf_src_node *sn = NULL; 2864 2865 s->rt_kif = NULL; 2866 if (!r->rt || r->rt == PF_FASTROUTE) 2867 return; 2868 switch (s->key[PF_SK_WIRE]->af) { 2869#ifdef INET 2870 case AF_INET: 2871 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL, &sn); 2872 s->rt_kif = r->rpool.cur->kif; 2873 break; 2874#endif /* INET */ 2875#ifdef INET6 2876 case AF_INET6: 2877 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL, &sn); 2878 s->rt_kif = r->rpool.cur->kif; 2879 break; 2880#endif /* INET6 */ 2881 } 2882} 2883 2884static u_int32_t 2885pf_tcp_iss(struct pf_pdesc *pd) 2886{ 2887 MD5_CTX ctx; 2888 u_int32_t digest[4]; 2889 2890 if (V_pf_tcp_secret_init == 0) { 2891 read_random(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret)); 2892 MD5Init(&V_pf_tcp_secret_ctx); 2893 MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret, 2894 sizeof(V_pf_tcp_secret)); 2895 V_pf_tcp_secret_init = 1; 2896 } 2897 2898 ctx = V_pf_tcp_secret_ctx; 2899 2900 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short)); 2901 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short)); 2902 if (pd->af == AF_INET6) { 2903 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr)); 2904 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr)); 2905 } else { 2906 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr)); 2907 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr)); 2908 } 2909 MD5Final((u_char *)digest, &ctx); 2910 V_pf_tcp_iss_off += 4096; 2911#define ISN_RANDOM_INCREMENT (4096 - 1) 2912 return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) + 2913 V_pf_tcp_iss_off); 2914#undef ISN_RANDOM_INCREMENT 2915} 2916 2917static int 2918pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, 2919 struct pfi_kif *kif, struct mbuf *m, int off, struct pf_pdesc *pd, 2920 struct pf_rule **am, struct pf_ruleset **rsm, struct inpcb *inp) 2921{ 2922 struct pf_rule *nr = NULL; 2923 struct pf_addr * const saddr = pd->src; 2924 struct pf_addr * const daddr = pd->dst; 2925 sa_family_t af = pd->af; 2926 struct pf_rule *r, *a = NULL; 2927 struct pf_ruleset *ruleset = NULL; 2928 struct pf_src_node *nsn = NULL; 2929 struct tcphdr *th = pd->hdr.tcp; 2930 struct pf_state_key *sk = NULL, *nk = NULL; 2931 u_short reason; 2932 int rewrite = 0, hdrlen = 0; 2933 int tag = -1, rtableid = -1; 2934 int asd = 0; 2935 int match = 0; 2936 int state_icmp = 0; 2937 u_int16_t sport = 0, dport = 0; 2938 u_int16_t bproto_sum = 0, bip_sum = 0; 2939 u_int8_t icmptype = 0, icmpcode = 0; 2940 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE]; 2941 2942 PF_RULES_RASSERT(); 2943 2944 if (inp != NULL) { 2945 INP_LOCK_ASSERT(inp); 2946 pd->lookup.uid = inp->inp_cred->cr_uid; 2947 pd->lookup.gid = inp->inp_cred->cr_groups[0]; 2948 pd->lookup.done = 1; 2949 } 2950 2951 switch (pd->proto) { 2952 case IPPROTO_TCP: 2953 sport = th->th_sport; 2954 dport = th->th_dport; 2955 hdrlen = sizeof(*th); 2956 break; 2957 case IPPROTO_UDP: 2958 sport = pd->hdr.udp->uh_sport; 2959 dport = pd->hdr.udp->uh_dport; 2960 hdrlen = sizeof(*pd->hdr.udp); 2961 break; 2962#ifdef INET 2963 case IPPROTO_ICMP: 2964 if (pd->af != AF_INET) 2965 break; 2966 sport = dport = pd->hdr.icmp->icmp_id; 2967 hdrlen = sizeof(*pd->hdr.icmp); 2968 icmptype = pd->hdr.icmp->icmp_type; 2969 icmpcode = pd->hdr.icmp->icmp_code; 2970 2971 if (icmptype == ICMP_UNREACH || 2972 icmptype == ICMP_SOURCEQUENCH || 2973 icmptype == ICMP_REDIRECT || 2974 icmptype == ICMP_TIMXCEED || 2975 icmptype == ICMP_PARAMPROB) 2976 state_icmp++; 2977 break; 2978#endif /* INET */ 2979#ifdef INET6 2980 case IPPROTO_ICMPV6: 2981 if (af != AF_INET6) 2982 break; 2983 sport = dport = pd->hdr.icmp6->icmp6_id; 2984 hdrlen = sizeof(*pd->hdr.icmp6); 2985 icmptype = pd->hdr.icmp6->icmp6_type; 2986 icmpcode = pd->hdr.icmp6->icmp6_code; 2987 2988 if (icmptype == ICMP6_DST_UNREACH || 2989 icmptype == ICMP6_PACKET_TOO_BIG || 2990 icmptype == ICMP6_TIME_EXCEEDED || 2991 icmptype == ICMP6_PARAM_PROB) 2992 state_icmp++; 2993 break; 2994#endif /* INET6 */ 2995 default: 2996 sport = dport = hdrlen = 0; 2997 break; 2998 } 2999 3000 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 3001 3002 /* check packet for BINAT/NAT/RDR */ 3003 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk, 3004 &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) { 3005 KASSERT(sk != NULL, ("%s: null sk", __func__)); 3006 KASSERT(nk != NULL, ("%s: null nk", __func__)); 3007 3008 if (pd->ip_sum) 3009 bip_sum = *pd->ip_sum; 3010 3011 switch (pd->proto) { 3012 case IPPROTO_TCP: 3013 bproto_sum = th->th_sum; 3014 pd->proto_sum = &th->th_sum; 3015 3016 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3017 nk->port[pd->sidx] != sport) { 3018 pf_change_ap(saddr, &th->th_sport, pd->ip_sum, 3019 &th->th_sum, &nk->addr[pd->sidx], 3020 nk->port[pd->sidx], 0, af); 3021 pd->sport = &th->th_sport; 3022 sport = th->th_sport; 3023 } 3024 3025 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3026 nk->port[pd->didx] != dport) { 3027 pf_change_ap(daddr, &th->th_dport, pd->ip_sum, 3028 &th->th_sum, &nk->addr[pd->didx], 3029 nk->port[pd->didx], 0, af); 3030 dport = th->th_dport; 3031 pd->dport = &th->th_dport; 3032 } 3033 rewrite++; 3034 break; 3035 case IPPROTO_UDP: 3036 bproto_sum = pd->hdr.udp->uh_sum; 3037 pd->proto_sum = &pd->hdr.udp->uh_sum; 3038 3039 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3040 nk->port[pd->sidx] != sport) { 3041 pf_change_ap(saddr, &pd->hdr.udp->uh_sport, 3042 pd->ip_sum, &pd->hdr.udp->uh_sum, 3043 &nk->addr[pd->sidx], 3044 nk->port[pd->sidx], 1, af); 3045 sport = pd->hdr.udp->uh_sport; 3046 pd->sport = &pd->hdr.udp->uh_sport; 3047 } 3048 3049 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3050 nk->port[pd->didx] != dport) { 3051 pf_change_ap(daddr, &pd->hdr.udp->uh_dport, 3052 pd->ip_sum, &pd->hdr.udp->uh_sum, 3053 &nk->addr[pd->didx], 3054 nk->port[pd->didx], 1, af); 3055 dport = pd->hdr.udp->uh_dport; 3056 pd->dport = &pd->hdr.udp->uh_dport; 3057 } 3058 rewrite++; 3059 break; 3060#ifdef INET 3061 case IPPROTO_ICMP: 3062 nk->port[0] = nk->port[1]; 3063 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET)) 3064 pf_change_a(&saddr->v4.s_addr, pd->ip_sum, 3065 nk->addr[pd->sidx].v4.s_addr, 0); 3066 3067 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET)) 3068 pf_change_a(&daddr->v4.s_addr, pd->ip_sum, 3069 nk->addr[pd->didx].v4.s_addr, 0); 3070 3071 if (nk->port[1] != pd->hdr.icmp->icmp_id) { 3072 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup( 3073 pd->hdr.icmp->icmp_cksum, sport, 3074 nk->port[1], 0); 3075 pd->hdr.icmp->icmp_id = nk->port[1]; 3076 pd->sport = &pd->hdr.icmp->icmp_id; 3077 } 3078 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 3079 break; 3080#endif /* INET */ 3081#ifdef INET6 3082 case IPPROTO_ICMPV6: 3083 nk->port[0] = nk->port[1]; 3084 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6)) 3085 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum, 3086 &nk->addr[pd->sidx], 0); 3087 3088 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6)) 3089 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum, 3090 &nk->addr[pd->didx], 0); 3091 rewrite++; 3092 break; 3093#endif /* INET */ 3094 default: 3095 switch (af) { 3096#ifdef INET 3097 case AF_INET: 3098 if (PF_ANEQ(saddr, 3099 &nk->addr[pd->sidx], AF_INET)) 3100 pf_change_a(&saddr->v4.s_addr, 3101 pd->ip_sum, 3102 nk->addr[pd->sidx].v4.s_addr, 0); 3103 3104 if (PF_ANEQ(daddr, 3105 &nk->addr[pd->didx], AF_INET)) 3106 pf_change_a(&daddr->v4.s_addr, 3107 pd->ip_sum, 3108 nk->addr[pd->didx].v4.s_addr, 0); 3109 break; 3110#endif /* INET */ 3111#ifdef INET6 3112 case AF_INET6: 3113 if (PF_ANEQ(saddr, 3114 &nk->addr[pd->sidx], AF_INET6)) 3115 PF_ACPY(saddr, &nk->addr[pd->sidx], af); 3116 3117 if (PF_ANEQ(daddr, 3118 &nk->addr[pd->didx], AF_INET6)) 3119 PF_ACPY(saddr, &nk->addr[pd->didx], af); 3120 break; 3121#endif /* INET */ 3122 } 3123 break; 3124 } 3125 if (nr->natpass) 3126 r = NULL; 3127 pd->nat_rule = nr; 3128 } 3129 3130 while (r != NULL) { 3131 r->evaluations++; 3132 if (pfi_kif_match(r->kif, kif) == r->ifnot) 3133 r = r->skip[PF_SKIP_IFP].ptr; 3134 else if (r->direction && r->direction != direction) 3135 r = r->skip[PF_SKIP_DIR].ptr; 3136 else if (r->af && r->af != af) 3137 r = r->skip[PF_SKIP_AF].ptr; 3138 else if (r->proto && r->proto != pd->proto) 3139 r = r->skip[PF_SKIP_PROTO].ptr; 3140 else if (PF_MISMATCHAW(&r->src.addr, saddr, af, 3141 r->src.neg, kif, M_GETFIB(m))) 3142 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 3143 /* tcp/udp only. port_op always 0 in other cases */ 3144 else if (r->src.port_op && !pf_match_port(r->src.port_op, 3145 r->src.port[0], r->src.port[1], sport)) 3146 r = r->skip[PF_SKIP_SRC_PORT].ptr; 3147 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, 3148 r->dst.neg, NULL, M_GETFIB(m))) 3149 r = r->skip[PF_SKIP_DST_ADDR].ptr; 3150 /* tcp/udp only. port_op always 0 in other cases */ 3151 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 3152 r->dst.port[0], r->dst.port[1], dport)) 3153 r = r->skip[PF_SKIP_DST_PORT].ptr; 3154 /* icmp only. type always 0 in other cases */ 3155 else if (r->type && r->type != icmptype + 1) 3156 r = TAILQ_NEXT(r, entries); 3157 /* icmp only. type always 0 in other cases */ 3158 else if (r->code && r->code != icmpcode + 1) 3159 r = TAILQ_NEXT(r, entries); 3160 else if (r->tos && !(r->tos == pd->tos)) 3161 r = TAILQ_NEXT(r, entries); 3162 else if (r->rule_flag & PFRULE_FRAGMENT) 3163 r = TAILQ_NEXT(r, entries); 3164 else if (pd->proto == IPPROTO_TCP && 3165 (r->flagset & th->th_flags) != r->flags) 3166 r = TAILQ_NEXT(r, entries); 3167 /* tcp/udp only. uid.op always 0 in other cases */ 3168 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done = 3169 pf_socket_lookup(direction, pd, m), 1)) && 3170 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1], 3171 pd->lookup.uid)) 3172 r = TAILQ_NEXT(r, entries); 3173 /* tcp/udp only. gid.op always 0 in other cases */ 3174 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done = 3175 pf_socket_lookup(direction, pd, m), 1)) && 3176 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1], 3177 pd->lookup.gid)) 3178 r = TAILQ_NEXT(r, entries); 3179 else if (r->prob && 3180 r->prob <= arc4random()) 3181 r = TAILQ_NEXT(r, entries); 3182 else if (r->match_tag && !pf_match_tag(m, r, &tag, 3183 pd->pf_mtag ? pd->pf_mtag->tag : 0)) 3184 r = TAILQ_NEXT(r, entries); 3185 else if (r->os_fingerprint != PF_OSFP_ANY && 3186 (pd->proto != IPPROTO_TCP || !pf_osfp_match( 3187 pf_osfp_fingerprint(pd, m, off, th), 3188 r->os_fingerprint))) 3189 r = TAILQ_NEXT(r, entries); 3190 else { 3191 if (r->tag) 3192 tag = r->tag; 3193 if (r->rtableid >= 0) 3194 rtableid = r->rtableid; 3195 if (r->anchor == NULL) { 3196 match = 1; 3197 *rm = r; 3198 *am = a; 3199 *rsm = ruleset; 3200 if ((*rm)->quick) 3201 break; 3202 r = TAILQ_NEXT(r, entries); 3203 } else 3204 pf_step_into_anchor(anchor_stack, &asd, 3205 &ruleset, PF_RULESET_FILTER, &r, &a, 3206 &match); 3207 } 3208 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd, 3209 &ruleset, PF_RULESET_FILTER, &r, &a, &match)) 3210 break; 3211 } 3212 r = *rm; 3213 a = *am; 3214 ruleset = *rsm; 3215 3216 REASON_SET(&reason, PFRES_MATCH); 3217 3218 if (r->log || (nr != NULL && nr->log)) { 3219 if (rewrite) 3220 m_copyback(m, off, hdrlen, pd->hdr.any); 3221 PFLOG_PACKET(kif, m, af, direction, reason, r->log ? r : nr, a, 3222 ruleset, pd, 1); 3223 } 3224 3225 if ((r->action == PF_DROP) && 3226 ((r->rule_flag & PFRULE_RETURNRST) || 3227 (r->rule_flag & PFRULE_RETURNICMP) || 3228 (r->rule_flag & PFRULE_RETURN))) { 3229 /* undo NAT changes, if they have taken place */ 3230 if (nr != NULL) { 3231 PF_ACPY(saddr, &sk->addr[pd->sidx], af); 3232 PF_ACPY(daddr, &sk->addr[pd->didx], af); 3233 if (pd->sport) 3234 *pd->sport = sk->port[pd->sidx]; 3235 if (pd->dport) 3236 *pd->dport = sk->port[pd->didx]; 3237 if (pd->proto_sum) 3238 *pd->proto_sum = bproto_sum; 3239 if (pd->ip_sum) 3240 *pd->ip_sum = bip_sum; 3241 m_copyback(m, off, hdrlen, pd->hdr.any); 3242 } 3243 if (pd->proto == IPPROTO_TCP && 3244 ((r->rule_flag & PFRULE_RETURNRST) || 3245 (r->rule_flag & PFRULE_RETURN)) && 3246 !(th->th_flags & TH_RST)) { 3247 u_int32_t ack = ntohl(th->th_seq) + pd->p_len; 3248 int len = 0; 3249#ifdef INET 3250 struct ip *h4; 3251#endif 3252#ifdef INET6 3253 struct ip6_hdr *h6; 3254#endif 3255 3256 switch (af) { 3257#ifdef INET 3258 case AF_INET: 3259 h4 = mtod(m, struct ip *); 3260 len = ntohs(h4->ip_len) - off; 3261 break; 3262#endif 3263#ifdef INET6 3264 case AF_INET6: 3265 h6 = mtod(m, struct ip6_hdr *); 3266 len = ntohs(h6->ip6_plen) - (off - sizeof(*h6)); 3267 break; 3268#endif 3269 } 3270 3271 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af)) 3272 REASON_SET(&reason, PFRES_PROTCKSUM); 3273 else { 3274 if (th->th_flags & TH_SYN) 3275 ack++; 3276 if (th->th_flags & TH_FIN) 3277 ack++; 3278 pf_send_tcp(m, r, af, pd->dst, 3279 pd->src, th->th_dport, th->th_sport, 3280 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0, 3281 r->return_ttl, 1, 0, kif->pfik_ifp); 3282 } 3283 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET && 3284 r->return_icmp) 3285 pf_send_icmp(m, r->return_icmp >> 8, 3286 r->return_icmp & 255, af, r); 3287 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 && 3288 r->return_icmp6) 3289 pf_send_icmp(m, r->return_icmp6 >> 8, 3290 r->return_icmp6 & 255, af, r); 3291 } 3292 3293 if (r->action == PF_DROP) 3294 goto cleanup; 3295 3296 if (tag > 0 && pf_tag_packet(m, pd, tag)) { 3297 REASON_SET(&reason, PFRES_MEMORY); 3298 goto cleanup; 3299 } 3300 if (rtableid >= 0) 3301 M_SETFIB(m, rtableid); 3302 3303 if (!state_icmp && (r->keep_state || nr != NULL || 3304 (pd->flags & PFDESC_TCP_NORM))) { 3305 int action; 3306 action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off, 3307 sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum, 3308 hdrlen); 3309 if (action != PF_PASS) 3310 return (action); 3311 } else { 3312 if (sk != NULL) 3313 uma_zfree(V_pf_state_key_z, sk); 3314 if (nk != NULL) 3315 uma_zfree(V_pf_state_key_z, nk); 3316 } 3317 3318 /* copy back packet headers if we performed NAT operations */ 3319 if (rewrite) 3320 m_copyback(m, off, hdrlen, pd->hdr.any); 3321 3322 if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) && 3323 direction == PF_OUT && 3324 pfsync_defer_ptr != NULL && pfsync_defer_ptr(*sm, m)) 3325 /* 3326 * We want the state created, but we dont 3327 * want to send this in case a partner 3328 * firewall has to know about it to allow 3329 * replies through it. 3330 */ 3331 return (PF_DEFER); 3332 3333 return (PF_PASS); 3334 3335cleanup: 3336 if (sk != NULL) 3337 uma_zfree(V_pf_state_key_z, sk); 3338 if (nk != NULL) 3339 uma_zfree(V_pf_state_key_z, nk); 3340 return (PF_DROP); 3341} 3342 3343static int 3344pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a, 3345 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *nk, 3346 struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport, 3347 u_int16_t dport, int *rewrite, struct pfi_kif *kif, struct pf_state **sm, 3348 int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen) 3349{ 3350 struct pf_state *s = NULL; 3351 struct pf_src_node *sn = NULL; 3352 struct tcphdr *th = pd->hdr.tcp; 3353 u_int16_t mss = V_tcp_mssdflt; 3354 u_short reason; 3355 3356 /* check maximums */ 3357 if (r->max_states && (r->states_cur >= r->max_states)) { 3358 V_pf_status.lcounters[LCNT_STATES]++; 3359 REASON_SET(&reason, PFRES_MAXSTATES); 3360 return (PF_DROP); 3361 } 3362 /* src node for filter rule */ 3363 if ((r->rule_flag & PFRULE_SRCTRACK || 3364 r->rpool.opts & PF_POOL_STICKYADDR) && 3365 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) { 3366 REASON_SET(&reason, PFRES_SRCLIMIT); 3367 goto csfailed; 3368 } 3369 /* src node for translation rule */ 3370 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) && 3371 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) { 3372 REASON_SET(&reason, PFRES_SRCLIMIT); 3373 goto csfailed; 3374 } 3375 s = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO); 3376 if (s == NULL) { 3377 REASON_SET(&reason, PFRES_MEMORY); 3378 goto csfailed; 3379 } 3380 s->rule.ptr = r; 3381 s->nat_rule.ptr = nr; 3382 s->anchor.ptr = a; 3383 STATE_INC_COUNTERS(s); 3384 if (r->allow_opts) 3385 s->state_flags |= PFSTATE_ALLOWOPTS; 3386 if (r->rule_flag & PFRULE_STATESLOPPY) 3387 s->state_flags |= PFSTATE_SLOPPY; 3388 s->log = r->log & PF_LOG_ALL; 3389 s->sync_state = PFSYNC_S_NONE; 3390 if (nr != NULL) 3391 s->log |= nr->log & PF_LOG_ALL; 3392 switch (pd->proto) { 3393 case IPPROTO_TCP: 3394 s->src.seqlo = ntohl(th->th_seq); 3395 s->src.seqhi = s->src.seqlo + pd->p_len + 1; 3396 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN && 3397 r->keep_state == PF_STATE_MODULATE) { 3398 /* Generate sequence number modulator */ 3399 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) == 3400 0) 3401 s->src.seqdiff = 1; 3402 pf_change_a(&th->th_seq, &th->th_sum, 3403 htonl(s->src.seqlo + s->src.seqdiff), 0); 3404 *rewrite = 1; 3405 } else 3406 s->src.seqdiff = 0; 3407 if (th->th_flags & TH_SYN) { 3408 s->src.seqhi++; 3409 s->src.wscale = pf_get_wscale(m, off, 3410 th->th_off, pd->af); 3411 } 3412 s->src.max_win = MAX(ntohs(th->th_win), 1); 3413 if (s->src.wscale & PF_WSCALE_MASK) { 3414 /* Remove scale factor from initial window */ 3415 int win = s->src.max_win; 3416 win += 1 << (s->src.wscale & PF_WSCALE_MASK); 3417 s->src.max_win = (win - 1) >> 3418 (s->src.wscale & PF_WSCALE_MASK); 3419 } 3420 if (th->th_flags & TH_FIN) 3421 s->src.seqhi++; 3422 s->dst.seqhi = 1; 3423 s->dst.max_win = 1; 3424 s->src.state = TCPS_SYN_SENT; 3425 s->dst.state = TCPS_CLOSED; 3426 s->timeout = PFTM_TCP_FIRST_PACKET; 3427 break; 3428 case IPPROTO_UDP: 3429 s->src.state = PFUDPS_SINGLE; 3430 s->dst.state = PFUDPS_NO_TRAFFIC; 3431 s->timeout = PFTM_UDP_FIRST_PACKET; 3432 break; 3433 case IPPROTO_ICMP: 3434#ifdef INET6 3435 case IPPROTO_ICMPV6: 3436#endif 3437 s->timeout = PFTM_ICMP_FIRST_PACKET; 3438 break; 3439 default: 3440 s->src.state = PFOTHERS_SINGLE; 3441 s->dst.state = PFOTHERS_NO_TRAFFIC; 3442 s->timeout = PFTM_OTHER_FIRST_PACKET; 3443 } 3444 3445 s->creation = time_uptime; 3446 s->expire = time_uptime; 3447 3448 if (sn != NULL) { 3449 s->src_node = sn; 3450 s->src_node->states++; 3451 } 3452 if (nsn != NULL) { 3453 /* XXX We only modify one side for now. */ 3454 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af); 3455 s->nat_src_node = nsn; 3456 s->nat_src_node->states++; 3457 } 3458 if (pd->proto == IPPROTO_TCP) { 3459 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m, 3460 off, pd, th, &s->src, &s->dst)) { 3461 REASON_SET(&reason, PFRES_MEMORY); 3462 pf_src_tree_remove_state(s); 3463 STATE_DEC_COUNTERS(s); 3464 uma_zfree(V_pf_state_z, s); 3465 return (PF_DROP); 3466 } 3467 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub && 3468 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s, 3469 &s->src, &s->dst, rewrite)) { 3470 /* This really shouldn't happen!!! */ 3471 DPFPRINTF(PF_DEBUG_URGENT, 3472 ("pf_normalize_tcp_stateful failed on first pkt")); 3473 pf_normalize_tcp_cleanup(s); 3474 pf_src_tree_remove_state(s); 3475 STATE_DEC_COUNTERS(s); 3476 uma_zfree(V_pf_state_z, s); 3477 return (PF_DROP); 3478 } 3479 } 3480 s->direction = pd->dir; 3481 3482 /* 3483 * sk/nk could already been setup by pf_get_translation(). 3484 */ 3485 if (nr == NULL) { 3486 KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p", 3487 __func__, nr, sk, nk)); 3488 sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport); 3489 if (sk == NULL) 3490 goto csfailed; 3491 nk = sk; 3492 } else 3493 KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p", 3494 __func__, nr, sk, nk)); 3495 3496 /* Swap sk/nk for PF_OUT. */ 3497 if (pf_state_insert(BOUND_IFACE(r, kif), 3498 (pd->dir == PF_IN) ? sk : nk, 3499 (pd->dir == PF_IN) ? nk : sk, s)) { 3500 if (pd->proto == IPPROTO_TCP) 3501 pf_normalize_tcp_cleanup(s); 3502 REASON_SET(&reason, PFRES_STATEINS); 3503 pf_src_tree_remove_state(s); 3504 STATE_DEC_COUNTERS(s); 3505 uma_zfree(V_pf_state_z, s); 3506 return (PF_DROP); 3507 } else 3508 *sm = s; 3509 3510 pf_set_rt_ifp(s, pd->src); /* needs s->state_key set */ 3511 if (tag > 0) 3512 s->tag = tag; 3513 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) == 3514 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) { 3515 s->src.state = PF_TCPS_PROXY_SRC; 3516 /* undo NAT changes, if they have taken place */ 3517 if (nr != NULL) { 3518 struct pf_state_key *skt = s->key[PF_SK_WIRE]; 3519 if (pd->dir == PF_OUT) 3520 skt = s->key[PF_SK_STACK]; 3521 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af); 3522 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af); 3523 if (pd->sport) 3524 *pd->sport = skt->port[pd->sidx]; 3525 if (pd->dport) 3526 *pd->dport = skt->port[pd->didx]; 3527 if (pd->proto_sum) 3528 *pd->proto_sum = bproto_sum; 3529 if (pd->ip_sum) 3530 *pd->ip_sum = bip_sum; 3531 m_copyback(m, off, hdrlen, pd->hdr.any); 3532 } 3533 s->src.seqhi = htonl(arc4random()); 3534 /* Find mss option */ 3535 int rtid = M_GETFIB(m); 3536 mss = pf_get_mss(m, off, th->th_off, pd->af); 3537 mss = pf_calc_mss(pd->src, pd->af, rtid, mss); 3538 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss); 3539 s->src.mss = mss; 3540 pf_send_tcp(NULL, r, pd->af, pd->dst, pd->src, th->th_dport, 3541 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1, 3542 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL); 3543 REASON_SET(&reason, PFRES_SYNPROXY); 3544 return (PF_SYNPROXY_DROP); 3545 } 3546 3547 return (PF_PASS); 3548 3549csfailed: 3550 if (sk != NULL) 3551 uma_zfree(V_pf_state_key_z, sk); 3552 if (nk != NULL) 3553 uma_zfree(V_pf_state_key_z, nk); 3554 3555 if (sn != NULL && sn->states == 0 && sn->expire == 0) 3556 pf_remove_src_node(sn); 3557 3558 if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) 3559 pf_remove_src_node(nsn); 3560 3561 return (PF_DROP); 3562} 3563 3564static int 3565pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif, 3566 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am, 3567 struct pf_ruleset **rsm) 3568{ 3569 struct pf_rule *r, *a = NULL; 3570 struct pf_ruleset *ruleset = NULL; 3571 sa_family_t af = pd->af; 3572 u_short reason; 3573 int tag = -1; 3574 int asd = 0; 3575 int match = 0; 3576 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE]; 3577 3578 PF_RULES_RASSERT(); 3579 3580 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 3581 while (r != NULL) { 3582 r->evaluations++; 3583 if (pfi_kif_match(r->kif, kif) == r->ifnot) 3584 r = r->skip[PF_SKIP_IFP].ptr; 3585 else if (r->direction && r->direction != direction) 3586 r = r->skip[PF_SKIP_DIR].ptr; 3587 else if (r->af && r->af != af) 3588 r = r->skip[PF_SKIP_AF].ptr; 3589 else if (r->proto && r->proto != pd->proto) 3590 r = r->skip[PF_SKIP_PROTO].ptr; 3591 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 3592 r->src.neg, kif, M_GETFIB(m))) 3593 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 3594 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 3595 r->dst.neg, NULL, M_GETFIB(m))) 3596 r = r->skip[PF_SKIP_DST_ADDR].ptr; 3597 else if (r->tos && !(r->tos == pd->tos)) 3598 r = TAILQ_NEXT(r, entries); 3599 else if (r->os_fingerprint != PF_OSFP_ANY) 3600 r = TAILQ_NEXT(r, entries); 3601 else if (pd->proto == IPPROTO_UDP && 3602 (r->src.port_op || r->dst.port_op)) 3603 r = TAILQ_NEXT(r, entries); 3604 else if (pd->proto == IPPROTO_TCP && 3605 (r->src.port_op || r->dst.port_op || r->flagset)) 3606 r = TAILQ_NEXT(r, entries); 3607 else if ((pd->proto == IPPROTO_ICMP || 3608 pd->proto == IPPROTO_ICMPV6) && 3609 (r->type || r->code)) 3610 r = TAILQ_NEXT(r, entries); 3611 else if (r->prob && r->prob <= 3612 (arc4random() % (UINT_MAX - 1) + 1)) 3613 r = TAILQ_NEXT(r, entries); 3614 else if (r->match_tag && !pf_match_tag(m, r, &tag, 3615 pd->pf_mtag ? pd->pf_mtag->tag : 0)) 3616 r = TAILQ_NEXT(r, entries); 3617 else { 3618 if (r->anchor == NULL) { 3619 match = 1; 3620 *rm = r; 3621 *am = a; 3622 *rsm = ruleset; 3623 if ((*rm)->quick) 3624 break; 3625 r = TAILQ_NEXT(r, entries); 3626 } else 3627 pf_step_into_anchor(anchor_stack, &asd, 3628 &ruleset, PF_RULESET_FILTER, &r, &a, 3629 &match); 3630 } 3631 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd, 3632 &ruleset, PF_RULESET_FILTER, &r, &a, &match)) 3633 break; 3634 } 3635 r = *rm; 3636 a = *am; 3637 ruleset = *rsm; 3638 3639 REASON_SET(&reason, PFRES_MATCH); 3640 3641 if (r->log) 3642 PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd, 3643 1); 3644 3645 if (r->action != PF_PASS) 3646 return (PF_DROP); 3647 3648 if (tag > 0 && pf_tag_packet(m, pd, tag)) { 3649 REASON_SET(&reason, PFRES_MEMORY); 3650 return (PF_DROP); 3651 } 3652 3653 return (PF_PASS); 3654} 3655 3656static int 3657pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst, 3658 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off, 3659 struct pf_pdesc *pd, u_short *reason, int *copyback) 3660{ 3661 struct tcphdr *th = pd->hdr.tcp; 3662 u_int16_t win = ntohs(th->th_win); 3663 u_int32_t ack, end, seq, orig_seq; 3664 u_int8_t sws, dws; 3665 int ackskew; 3666 3667 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) { 3668 sws = src->wscale & PF_WSCALE_MASK; 3669 dws = dst->wscale & PF_WSCALE_MASK; 3670 } else 3671 sws = dws = 0; 3672 3673 /* 3674 * Sequence tracking algorithm from Guido van Rooij's paper: 3675 * http://www.madison-gurkha.com/publications/tcp_filtering/ 3676 * tcp_filtering.ps 3677 */ 3678 3679 orig_seq = seq = ntohl(th->th_seq); 3680 if (src->seqlo == 0) { 3681 /* First packet from this end. Set its state */ 3682 3683 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) && 3684 src->scrub == NULL) { 3685 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) { 3686 REASON_SET(reason, PFRES_MEMORY); 3687 return (PF_DROP); 3688 } 3689 } 3690 3691 /* Deferred generation of sequence number modulator */ 3692 if (dst->seqdiff && !src->seqdiff) { 3693 /* use random iss for the TCP server */ 3694 while ((src->seqdiff = arc4random() - seq) == 0) 3695 ; 3696 ack = ntohl(th->th_ack) - dst->seqdiff; 3697 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 3698 src->seqdiff), 0); 3699 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 3700 *copyback = 1; 3701 } else { 3702 ack = ntohl(th->th_ack); 3703 } 3704 3705 end = seq + pd->p_len; 3706 if (th->th_flags & TH_SYN) { 3707 end++; 3708 if (dst->wscale & PF_WSCALE_FLAG) { 3709 src->wscale = pf_get_wscale(m, off, th->th_off, 3710 pd->af); 3711 if (src->wscale & PF_WSCALE_FLAG) { 3712 /* Remove scale factor from initial 3713 * window */ 3714 sws = src->wscale & PF_WSCALE_MASK; 3715 win = ((u_int32_t)win + (1 << sws) - 1) 3716 >> sws; 3717 dws = dst->wscale & PF_WSCALE_MASK; 3718 } else { 3719 /* fixup other window */ 3720 dst->max_win <<= dst->wscale & 3721 PF_WSCALE_MASK; 3722 /* in case of a retrans SYN|ACK */ 3723 dst->wscale = 0; 3724 } 3725 } 3726 } 3727 if (th->th_flags & TH_FIN) 3728 end++; 3729 3730 src->seqlo = seq; 3731 if (src->state < TCPS_SYN_SENT) 3732 src->state = TCPS_SYN_SENT; 3733 3734 /* 3735 * May need to slide the window (seqhi may have been set by 3736 * the crappy stack check or if we picked up the connection 3737 * after establishment) 3738 */ 3739 if (src->seqhi == 1 || 3740 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi)) 3741 src->seqhi = end + MAX(1, dst->max_win << dws); 3742 if (win > src->max_win) 3743 src->max_win = win; 3744 3745 } else { 3746 ack = ntohl(th->th_ack) - dst->seqdiff; 3747 if (src->seqdiff) { 3748 /* Modulate sequence numbers */ 3749 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 3750 src->seqdiff), 0); 3751 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 3752 *copyback = 1; 3753 } 3754 end = seq + pd->p_len; 3755 if (th->th_flags & TH_SYN) 3756 end++; 3757 if (th->th_flags & TH_FIN) 3758 end++; 3759 } 3760 3761 if ((th->th_flags & TH_ACK) == 0) { 3762 /* Let it pass through the ack skew check */ 3763 ack = dst->seqlo; 3764 } else if ((ack == 0 && 3765 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) || 3766 /* broken tcp stacks do not set ack */ 3767 (dst->state < TCPS_SYN_SENT)) { 3768 /* 3769 * Many stacks (ours included) will set the ACK number in an 3770 * FIN|ACK if the SYN times out -- no sequence to ACK. 3771 */ 3772 ack = dst->seqlo; 3773 } 3774 3775 if (seq == end) { 3776 /* Ease sequencing restrictions on no data packets */ 3777 seq = src->seqlo; 3778 end = seq; 3779 } 3780 3781 ackskew = dst->seqlo - ack; 3782 3783 3784 /* 3785 * Need to demodulate the sequence numbers in any TCP SACK options 3786 * (Selective ACK). We could optionally validate the SACK values 3787 * against the current ACK window, either forwards or backwards, but 3788 * I'm not confident that SACK has been implemented properly 3789 * everywhere. It wouldn't surprise me if several stacks accidently 3790 * SACK too far backwards of previously ACKed data. There really aren't 3791 * any security implications of bad SACKing unless the target stack 3792 * doesn't validate the option length correctly. Someone trying to 3793 * spoof into a TCP connection won't bother blindly sending SACK 3794 * options anyway. 3795 */ 3796 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) { 3797 if (pf_modulate_sack(m, off, pd, th, dst)) 3798 *copyback = 1; 3799 } 3800 3801 3802#define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */ 3803 if (SEQ_GEQ(src->seqhi, end) && 3804 /* Last octet inside other's window space */ 3805 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) && 3806 /* Retrans: not more than one window back */ 3807 (ackskew >= -MAXACKWINDOW) && 3808 /* Acking not more than one reassembled fragment backwards */ 3809 (ackskew <= (MAXACKWINDOW << sws)) && 3810 /* Acking not more than one window forward */ 3811 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo || 3812 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) || 3813 (pd->flags & PFDESC_IP_REAS) == 0)) { 3814 /* Require an exact/+1 sequence match on resets when possible */ 3815 3816 if (dst->scrub || src->scrub) { 3817 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 3818 *state, src, dst, copyback)) 3819 return (PF_DROP); 3820 } 3821 3822 /* update max window */ 3823 if (src->max_win < win) 3824 src->max_win = win; 3825 /* synchronize sequencing */ 3826 if (SEQ_GT(end, src->seqlo)) 3827 src->seqlo = end; 3828 /* slide the window of what the other end can send */ 3829 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 3830 dst->seqhi = ack + MAX((win << sws), 1); 3831 3832 3833 /* update states */ 3834 if (th->th_flags & TH_SYN) 3835 if (src->state < TCPS_SYN_SENT) 3836 src->state = TCPS_SYN_SENT; 3837 if (th->th_flags & TH_FIN) 3838 if (src->state < TCPS_CLOSING) 3839 src->state = TCPS_CLOSING; 3840 if (th->th_flags & TH_ACK) { 3841 if (dst->state == TCPS_SYN_SENT) { 3842 dst->state = TCPS_ESTABLISHED; 3843 if (src->state == TCPS_ESTABLISHED && 3844 (*state)->src_node != NULL && 3845 pf_src_connlimit(state)) { 3846 REASON_SET(reason, PFRES_SRCLIMIT); 3847 return (PF_DROP); 3848 } 3849 } else if (dst->state == TCPS_CLOSING) 3850 dst->state = TCPS_FIN_WAIT_2; 3851 } 3852 if (th->th_flags & TH_RST) 3853 src->state = dst->state = TCPS_TIME_WAIT; 3854 3855 /* update expire time */ 3856 (*state)->expire = time_uptime; 3857 if (src->state >= TCPS_FIN_WAIT_2 && 3858 dst->state >= TCPS_FIN_WAIT_2) 3859 (*state)->timeout = PFTM_TCP_CLOSED; 3860 else if (src->state >= TCPS_CLOSING && 3861 dst->state >= TCPS_CLOSING) 3862 (*state)->timeout = PFTM_TCP_FIN_WAIT; 3863 else if (src->state < TCPS_ESTABLISHED || 3864 dst->state < TCPS_ESTABLISHED) 3865 (*state)->timeout = PFTM_TCP_OPENING; 3866 else if (src->state >= TCPS_CLOSING || 3867 dst->state >= TCPS_CLOSING) 3868 (*state)->timeout = PFTM_TCP_CLOSING; 3869 else 3870 (*state)->timeout = PFTM_TCP_ESTABLISHED; 3871 3872 /* Fall through to PASS packet */ 3873 3874 } else if ((dst->state < TCPS_SYN_SENT || 3875 dst->state >= TCPS_FIN_WAIT_2 || 3876 src->state >= TCPS_FIN_WAIT_2) && 3877 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) && 3878 /* Within a window forward of the originating packet */ 3879 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) { 3880 /* Within a window backward of the originating packet */ 3881 3882 /* 3883 * This currently handles three situations: 3884 * 1) Stupid stacks will shotgun SYNs before their peer 3885 * replies. 3886 * 2) When PF catches an already established stream (the 3887 * firewall rebooted, the state table was flushed, routes 3888 * changed...) 3889 * 3) Packets get funky immediately after the connection 3890 * closes (this should catch Solaris spurious ACK|FINs 3891 * that web servers like to spew after a close) 3892 * 3893 * This must be a little more careful than the above code 3894 * since packet floods will also be caught here. We don't 3895 * update the TTL here to mitigate the damage of a packet 3896 * flood and so the same code can handle awkward establishment 3897 * and a loosened connection close. 3898 * In the establishment case, a correct peer response will 3899 * validate the connection, go through the normal state code 3900 * and keep updating the state TTL. 3901 */ 3902 3903 if (V_pf_status.debug >= PF_DEBUG_MISC) { 3904 printf("pf: loose state match: "); 3905 pf_print_state(*state); 3906 pf_print_flags(th->th_flags); 3907 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 3908 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack, 3909 pd->p_len, ackskew, (unsigned long long)(*state)->packets[0], 3910 (unsigned long long)(*state)->packets[1], 3911 pd->dir == PF_IN ? "in" : "out", 3912 pd->dir == (*state)->direction ? "fwd" : "rev"); 3913 } 3914 3915 if (dst->scrub || src->scrub) { 3916 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 3917 *state, src, dst, copyback)) 3918 return (PF_DROP); 3919 } 3920 3921 /* update max window */ 3922 if (src->max_win < win) 3923 src->max_win = win; 3924 /* synchronize sequencing */ 3925 if (SEQ_GT(end, src->seqlo)) 3926 src->seqlo = end; 3927 /* slide the window of what the other end can send */ 3928 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 3929 dst->seqhi = ack + MAX((win << sws), 1); 3930 3931 /* 3932 * Cannot set dst->seqhi here since this could be a shotgunned 3933 * SYN and not an already established connection. 3934 */ 3935 3936 if (th->th_flags & TH_FIN) 3937 if (src->state < TCPS_CLOSING) 3938 src->state = TCPS_CLOSING; 3939 if (th->th_flags & TH_RST) 3940 src->state = dst->state = TCPS_TIME_WAIT; 3941 3942 /* Fall through to PASS packet */ 3943 3944 } else { 3945 if ((*state)->dst.state == TCPS_SYN_SENT && 3946 (*state)->src.state == TCPS_SYN_SENT) { 3947 /* Send RST for state mismatches during handshake */ 3948 if (!(th->th_flags & TH_RST)) 3949 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, 3950 pd->dst, pd->src, th->th_dport, 3951 th->th_sport, ntohl(th->th_ack), 0, 3952 TH_RST, 0, 0, 3953 (*state)->rule.ptr->return_ttl, 1, 0, 3954 kif->pfik_ifp); 3955 src->seqlo = 0; 3956 src->seqhi = 1; 3957 src->max_win = 1; 3958 } else if (V_pf_status.debug >= PF_DEBUG_MISC) { 3959 printf("pf: BAD state: "); 3960 pf_print_state(*state); 3961 pf_print_flags(th->th_flags); 3962 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 3963 "pkts=%llu:%llu dir=%s,%s\n", 3964 seq, orig_seq, ack, pd->p_len, ackskew, 3965 (unsigned long long)(*state)->packets[0], 3966 (unsigned long long)(*state)->packets[1], 3967 pd->dir == PF_IN ? "in" : "out", 3968 pd->dir == (*state)->direction ? "fwd" : "rev"); 3969 printf("pf: State failure on: %c %c %c %c | %c %c\n", 3970 SEQ_GEQ(src->seqhi, end) ? ' ' : '1', 3971 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ? 3972 ' ': '2', 3973 (ackskew >= -MAXACKWINDOW) ? ' ' : '3', 3974 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4', 3975 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5', 3976 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6'); 3977 } 3978 REASON_SET(reason, PFRES_BADSTATE); 3979 return (PF_DROP); 3980 } 3981 3982 return (PF_PASS); 3983} 3984 3985static int 3986pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst, 3987 struct pf_state **state, struct pf_pdesc *pd, u_short *reason) 3988{ 3989 struct tcphdr *th = pd->hdr.tcp; 3990 3991 if (th->th_flags & TH_SYN) 3992 if (src->state < TCPS_SYN_SENT) 3993 src->state = TCPS_SYN_SENT; 3994 if (th->th_flags & TH_FIN) 3995 if (src->state < TCPS_CLOSING) 3996 src->state = TCPS_CLOSING; 3997 if (th->th_flags & TH_ACK) { 3998 if (dst->state == TCPS_SYN_SENT) { 3999 dst->state = TCPS_ESTABLISHED; 4000 if (src->state == TCPS_ESTABLISHED && 4001 (*state)->src_node != NULL && 4002 pf_src_connlimit(state)) { 4003 REASON_SET(reason, PFRES_SRCLIMIT); 4004 return (PF_DROP); 4005 } 4006 } else if (dst->state == TCPS_CLOSING) { 4007 dst->state = TCPS_FIN_WAIT_2; 4008 } else if (src->state == TCPS_SYN_SENT && 4009 dst->state < TCPS_SYN_SENT) { 4010 /* 4011 * Handle a special sloppy case where we only see one 4012 * half of the connection. If there is a ACK after 4013 * the initial SYN without ever seeing a packet from 4014 * the destination, set the connection to established. 4015 */ 4016 dst->state = src->state = TCPS_ESTABLISHED; 4017 if ((*state)->src_node != NULL && 4018 pf_src_connlimit(state)) { 4019 REASON_SET(reason, PFRES_SRCLIMIT); 4020 return (PF_DROP); 4021 } 4022 } else if (src->state == TCPS_CLOSING && 4023 dst->state == TCPS_ESTABLISHED && 4024 dst->seqlo == 0) { 4025 /* 4026 * Handle the closing of half connections where we 4027 * don't see the full bidirectional FIN/ACK+ACK 4028 * handshake. 4029 */ 4030 dst->state = TCPS_CLOSING; 4031 } 4032 } 4033 if (th->th_flags & TH_RST) 4034 src->state = dst->state = TCPS_TIME_WAIT; 4035 4036 /* update expire time */ 4037 (*state)->expire = time_uptime; 4038 if (src->state >= TCPS_FIN_WAIT_2 && 4039 dst->state >= TCPS_FIN_WAIT_2) 4040 (*state)->timeout = PFTM_TCP_CLOSED; 4041 else if (src->state >= TCPS_CLOSING && 4042 dst->state >= TCPS_CLOSING) 4043 (*state)->timeout = PFTM_TCP_FIN_WAIT; 4044 else if (src->state < TCPS_ESTABLISHED || 4045 dst->state < TCPS_ESTABLISHED) 4046 (*state)->timeout = PFTM_TCP_OPENING; 4047 else if (src->state >= TCPS_CLOSING || 4048 dst->state >= TCPS_CLOSING) 4049 (*state)->timeout = PFTM_TCP_CLOSING; 4050 else 4051 (*state)->timeout = PFTM_TCP_ESTABLISHED; 4052 4053 return (PF_PASS); 4054} 4055 4056static int 4057pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, 4058 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, 4059 u_short *reason) 4060{ 4061 struct pf_state_key_cmp key; 4062 struct tcphdr *th = pd->hdr.tcp; 4063 int copyback = 0; 4064 struct pf_state_peer *src, *dst; 4065 struct pf_state_key *sk; 4066 4067 bzero(&key, sizeof(key)); 4068 key.af = pd->af; 4069 key.proto = IPPROTO_TCP; 4070 if (direction == PF_IN) { /* wire side, straight */ 4071 PF_ACPY(&key.addr[0], pd->src, key.af); 4072 PF_ACPY(&key.addr[1], pd->dst, key.af); 4073 key.port[0] = th->th_sport; 4074 key.port[1] = th->th_dport; 4075 } else { /* stack side, reverse */ 4076 PF_ACPY(&key.addr[1], pd->src, key.af); 4077 PF_ACPY(&key.addr[0], pd->dst, key.af); 4078 key.port[1] = th->th_sport; 4079 key.port[0] = th->th_dport; 4080 } 4081 4082 STATE_LOOKUP(kif, &key, direction, *state, pd); 4083 4084 if (direction == (*state)->direction) { 4085 src = &(*state)->src; 4086 dst = &(*state)->dst; 4087 } else { 4088 src = &(*state)->dst; 4089 dst = &(*state)->src; 4090 } 4091 4092 sk = (*state)->key[pd->didx]; 4093 4094 if ((*state)->src.state == PF_TCPS_PROXY_SRC) { 4095 if (direction != (*state)->direction) { 4096 REASON_SET(reason, PFRES_SYNPROXY); 4097 return (PF_SYNPROXY_DROP); 4098 } 4099 if (th->th_flags & TH_SYN) { 4100 if (ntohl(th->th_seq) != (*state)->src.seqlo) { 4101 REASON_SET(reason, PFRES_SYNPROXY); 4102 return (PF_DROP); 4103 } 4104 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst, 4105 pd->src, th->th_dport, th->th_sport, 4106 (*state)->src.seqhi, ntohl(th->th_seq) + 1, 4107 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0, NULL); 4108 REASON_SET(reason, PFRES_SYNPROXY); 4109 return (PF_SYNPROXY_DROP); 4110 } else if (!(th->th_flags & TH_ACK) || 4111 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4112 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4113 REASON_SET(reason, PFRES_SYNPROXY); 4114 return (PF_DROP); 4115 } else if ((*state)->src_node != NULL && 4116 pf_src_connlimit(state)) { 4117 REASON_SET(reason, PFRES_SRCLIMIT); 4118 return (PF_DROP); 4119 } else 4120 (*state)->src.state = PF_TCPS_PROXY_DST; 4121 } 4122 if ((*state)->src.state == PF_TCPS_PROXY_DST) { 4123 if (direction == (*state)->direction) { 4124 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) || 4125 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4126 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4127 REASON_SET(reason, PFRES_SYNPROXY); 4128 return (PF_DROP); 4129 } 4130 (*state)->src.max_win = MAX(ntohs(th->th_win), 1); 4131 if ((*state)->dst.seqhi == 1) 4132 (*state)->dst.seqhi = htonl(arc4random()); 4133 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, 4134 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4135 sk->port[pd->sidx], sk->port[pd->didx], 4136 (*state)->dst.seqhi, 0, TH_SYN, 0, 4137 (*state)->src.mss, 0, 0, (*state)->tag, NULL); 4138 REASON_SET(reason, PFRES_SYNPROXY); 4139 return (PF_SYNPROXY_DROP); 4140 } else if (((th->th_flags & (TH_SYN|TH_ACK)) != 4141 (TH_SYN|TH_ACK)) || 4142 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) { 4143 REASON_SET(reason, PFRES_SYNPROXY); 4144 return (PF_DROP); 4145 } else { 4146 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1); 4147 (*state)->dst.seqlo = ntohl(th->th_seq); 4148 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst, 4149 pd->src, th->th_dport, th->th_sport, 4150 ntohl(th->th_ack), ntohl(th->th_seq) + 1, 4151 TH_ACK, (*state)->src.max_win, 0, 0, 0, 4152 (*state)->tag, NULL); 4153 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, 4154 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4155 sk->port[pd->sidx], sk->port[pd->didx], 4156 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1, 4157 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0, NULL); 4158 (*state)->src.seqdiff = (*state)->dst.seqhi - 4159 (*state)->src.seqlo; 4160 (*state)->dst.seqdiff = (*state)->src.seqhi - 4161 (*state)->dst.seqlo; 4162 (*state)->src.seqhi = (*state)->src.seqlo + 4163 (*state)->dst.max_win; 4164 (*state)->dst.seqhi = (*state)->dst.seqlo + 4165 (*state)->src.max_win; 4166 (*state)->src.wscale = (*state)->dst.wscale = 0; 4167 (*state)->src.state = (*state)->dst.state = 4168 TCPS_ESTABLISHED; 4169 REASON_SET(reason, PFRES_SYNPROXY); 4170 return (PF_SYNPROXY_DROP); 4171 } 4172 } 4173 4174 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) && 4175 dst->state >= TCPS_FIN_WAIT_2 && 4176 src->state >= TCPS_FIN_WAIT_2) { 4177 if (V_pf_status.debug >= PF_DEBUG_MISC) { 4178 printf("pf: state reuse "); 4179 pf_print_state(*state); 4180 pf_print_flags(th->th_flags); 4181 printf("\n"); 4182 } 4183 /* XXX make sure it's the same direction ?? */ 4184 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; 4185 pf_unlink_state(*state, PF_ENTER_LOCKED); 4186 *state = NULL; 4187 return (PF_DROP); 4188 } 4189 4190 if ((*state)->state_flags & PFSTATE_SLOPPY) { 4191 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP) 4192 return (PF_DROP); 4193 } else { 4194 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason, 4195 ©back) == PF_DROP) 4196 return (PF_DROP); 4197 } 4198 4199 /* translate source/destination address, if necessary */ 4200 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4201 struct pf_state_key *nk = (*state)->key[pd->didx]; 4202 4203 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 4204 nk->port[pd->sidx] != th->th_sport) 4205 pf_change_ap(pd->src, &th->th_sport, pd->ip_sum, 4206 &th->th_sum, &nk->addr[pd->sidx], 4207 nk->port[pd->sidx], 0, pd->af); 4208 4209 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 4210 nk->port[pd->didx] != th->th_dport) 4211 pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum, 4212 &th->th_sum, &nk->addr[pd->didx], 4213 nk->port[pd->didx], 0, pd->af); 4214 copyback = 1; 4215 } 4216 4217 /* Copyback sequence modulation or stateful scrub changes if needed */ 4218 if (copyback) 4219 m_copyback(m, off, sizeof(*th), (caddr_t)th); 4220 4221 return (PF_PASS); 4222} 4223 4224static int 4225pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif, 4226 struct mbuf *m, int off, void *h, struct pf_pdesc *pd) 4227{ 4228 struct pf_state_peer *src, *dst; 4229 struct pf_state_key_cmp key; 4230 struct udphdr *uh = pd->hdr.udp; 4231 4232 bzero(&key, sizeof(key)); 4233 key.af = pd->af; 4234 key.proto = IPPROTO_UDP; 4235 if (direction == PF_IN) { /* wire side, straight */ 4236 PF_ACPY(&key.addr[0], pd->src, key.af); 4237 PF_ACPY(&key.addr[1], pd->dst, key.af); 4238 key.port[0] = uh->uh_sport; 4239 key.port[1] = uh->uh_dport; 4240 } else { /* stack side, reverse */ 4241 PF_ACPY(&key.addr[1], pd->src, key.af); 4242 PF_ACPY(&key.addr[0], pd->dst, key.af); 4243 key.port[1] = uh->uh_sport; 4244 key.port[0] = uh->uh_dport; 4245 } 4246 4247 STATE_LOOKUP(kif, &key, direction, *state, pd); 4248 4249 if (direction == (*state)->direction) { 4250 src = &(*state)->src; 4251 dst = &(*state)->dst; 4252 } else { 4253 src = &(*state)->dst; 4254 dst = &(*state)->src; 4255 } 4256 4257 /* update states */ 4258 if (src->state < PFUDPS_SINGLE) 4259 src->state = PFUDPS_SINGLE; 4260 if (dst->state == PFUDPS_SINGLE) 4261 dst->state = PFUDPS_MULTIPLE; 4262 4263 /* update expire time */ 4264 (*state)->expire = time_uptime; 4265 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE) 4266 (*state)->timeout = PFTM_UDP_MULTIPLE; 4267 else 4268 (*state)->timeout = PFTM_UDP_SINGLE; 4269 4270 /* translate source/destination address, if necessary */ 4271 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4272 struct pf_state_key *nk = (*state)->key[pd->didx]; 4273 4274 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 4275 nk->port[pd->sidx] != uh->uh_sport) 4276 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum, 4277 &uh->uh_sum, &nk->addr[pd->sidx], 4278 nk->port[pd->sidx], 1, pd->af); 4279 4280 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 4281 nk->port[pd->didx] != uh->uh_dport) 4282 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum, 4283 &uh->uh_sum, &nk->addr[pd->didx], 4284 nk->port[pd->didx], 1, pd->af); 4285 m_copyback(m, off, sizeof(*uh), (caddr_t)uh); 4286 } 4287 4288 return (PF_PASS); 4289} 4290 4291static int 4292pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, 4293 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason) 4294{ 4295 struct pf_addr *saddr = pd->src, *daddr = pd->dst; 4296 u_int16_t icmpid = 0, *icmpsum; 4297 u_int8_t icmptype; 4298 int state_icmp = 0; 4299 struct pf_state_key_cmp key; 4300 4301 bzero(&key, sizeof(key)); 4302 switch (pd->proto) { 4303#ifdef INET 4304 case IPPROTO_ICMP: 4305 icmptype = pd->hdr.icmp->icmp_type; 4306 icmpid = pd->hdr.icmp->icmp_id; 4307 icmpsum = &pd->hdr.icmp->icmp_cksum; 4308 4309 if (icmptype == ICMP_UNREACH || 4310 icmptype == ICMP_SOURCEQUENCH || 4311 icmptype == ICMP_REDIRECT || 4312 icmptype == ICMP_TIMXCEED || 4313 icmptype == ICMP_PARAMPROB) 4314 state_icmp++; 4315 break; 4316#endif /* INET */ 4317#ifdef INET6 4318 case IPPROTO_ICMPV6: 4319 icmptype = pd->hdr.icmp6->icmp6_type; 4320 icmpid = pd->hdr.icmp6->icmp6_id; 4321 icmpsum = &pd->hdr.icmp6->icmp6_cksum; 4322 4323 if (icmptype == ICMP6_DST_UNREACH || 4324 icmptype == ICMP6_PACKET_TOO_BIG || 4325 icmptype == ICMP6_TIME_EXCEEDED || 4326 icmptype == ICMP6_PARAM_PROB) 4327 state_icmp++; 4328 break; 4329#endif /* INET6 */ 4330 } 4331 4332 if (!state_icmp) { 4333 4334 /* 4335 * ICMP query/reply message not related to a TCP/UDP packet. 4336 * Search for an ICMP state. 4337 */ 4338 key.af = pd->af; 4339 key.proto = pd->proto; 4340 key.port[0] = key.port[1] = icmpid; 4341 if (direction == PF_IN) { /* wire side, straight */ 4342 PF_ACPY(&key.addr[0], pd->src, key.af); 4343 PF_ACPY(&key.addr[1], pd->dst, key.af); 4344 } else { /* stack side, reverse */ 4345 PF_ACPY(&key.addr[1], pd->src, key.af); 4346 PF_ACPY(&key.addr[0], pd->dst, key.af); 4347 } 4348 4349 STATE_LOOKUP(kif, &key, direction, *state, pd); 4350 4351 (*state)->expire = time_uptime; 4352 (*state)->timeout = PFTM_ICMP_ERROR_REPLY; 4353 4354 /* translate source/destination address, if necessary */ 4355 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4356 struct pf_state_key *nk = (*state)->key[pd->didx]; 4357 4358 switch (pd->af) { 4359#ifdef INET 4360 case AF_INET: 4361 if (PF_ANEQ(pd->src, 4362 &nk->addr[pd->sidx], AF_INET)) 4363 pf_change_a(&saddr->v4.s_addr, 4364 pd->ip_sum, 4365 nk->addr[pd->sidx].v4.s_addr, 0); 4366 4367 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], 4368 AF_INET)) 4369 pf_change_a(&daddr->v4.s_addr, 4370 pd->ip_sum, 4371 nk->addr[pd->didx].v4.s_addr, 0); 4372 4373 if (nk->port[0] != 4374 pd->hdr.icmp->icmp_id) { 4375 pd->hdr.icmp->icmp_cksum = 4376 pf_cksum_fixup( 4377 pd->hdr.icmp->icmp_cksum, icmpid, 4378 nk->port[pd->sidx], 0); 4379 pd->hdr.icmp->icmp_id = 4380 nk->port[pd->sidx]; 4381 } 4382 4383 m_copyback(m, off, ICMP_MINLEN, 4384 (caddr_t )pd->hdr.icmp); 4385 break; 4386#endif /* INET */ 4387#ifdef INET6 4388 case AF_INET6: 4389 if (PF_ANEQ(pd->src, 4390 &nk->addr[pd->sidx], AF_INET6)) 4391 pf_change_a6(saddr, 4392 &pd->hdr.icmp6->icmp6_cksum, 4393 &nk->addr[pd->sidx], 0); 4394 4395 if (PF_ANEQ(pd->dst, 4396 &nk->addr[pd->didx], AF_INET6)) 4397 pf_change_a6(daddr, 4398 &pd->hdr.icmp6->icmp6_cksum, 4399 &nk->addr[pd->didx], 0); 4400 4401 m_copyback(m, off, sizeof(struct icmp6_hdr), 4402 (caddr_t )pd->hdr.icmp6); 4403 break; 4404#endif /* INET6 */ 4405 } 4406 } 4407 return (PF_PASS); 4408 4409 } else { 4410 /* 4411 * ICMP error message in response to a TCP/UDP packet. 4412 * Extract the inner TCP/UDP header and search for that state. 4413 */ 4414 4415 struct pf_pdesc pd2; 4416 bzero(&pd2, sizeof pd2); 4417#ifdef INET 4418 struct ip h2; 4419#endif /* INET */ 4420#ifdef INET6 4421 struct ip6_hdr h2_6; 4422 int terminal = 0; 4423#endif /* INET6 */ 4424 int ipoff2 = 0; 4425 int off2 = 0; 4426 4427 pd2.af = pd->af; 4428 /* Payload packet is from the opposite direction. */ 4429 pd2.sidx = (direction == PF_IN) ? 1 : 0; 4430 pd2.didx = (direction == PF_IN) ? 0 : 1; 4431 switch (pd->af) { 4432#ifdef INET 4433 case AF_INET: 4434 /* offset of h2 in mbuf chain */ 4435 ipoff2 = off + ICMP_MINLEN; 4436 4437 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2), 4438 NULL, reason, pd2.af)) { 4439 DPFPRINTF(PF_DEBUG_MISC, 4440 ("pf: ICMP error message too short " 4441 "(ip)\n")); 4442 return (PF_DROP); 4443 } 4444 /* 4445 * ICMP error messages don't refer to non-first 4446 * fragments 4447 */ 4448 if (h2.ip_off & htons(IP_OFFMASK)) { 4449 REASON_SET(reason, PFRES_FRAG); 4450 return (PF_DROP); 4451 } 4452 4453 /* offset of protocol header that follows h2 */ 4454 off2 = ipoff2 + (h2.ip_hl << 2); 4455 4456 pd2.proto = h2.ip_p; 4457 pd2.src = (struct pf_addr *)&h2.ip_src; 4458 pd2.dst = (struct pf_addr *)&h2.ip_dst; 4459 pd2.ip_sum = &h2.ip_sum; 4460 break; 4461#endif /* INET */ 4462#ifdef INET6 4463 case AF_INET6: 4464 ipoff2 = off + sizeof(struct icmp6_hdr); 4465 4466 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6), 4467 NULL, reason, pd2.af)) { 4468 DPFPRINTF(PF_DEBUG_MISC, 4469 ("pf: ICMP error message too short " 4470 "(ip6)\n")); 4471 return (PF_DROP); 4472 } 4473 pd2.proto = h2_6.ip6_nxt; 4474 pd2.src = (struct pf_addr *)&h2_6.ip6_src; 4475 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst; 4476 pd2.ip_sum = NULL; 4477 off2 = ipoff2 + sizeof(h2_6); 4478 do { 4479 switch (pd2.proto) { 4480 case IPPROTO_FRAGMENT: 4481 /* 4482 * ICMPv6 error messages for 4483 * non-first fragments 4484 */ 4485 REASON_SET(reason, PFRES_FRAG); 4486 return (PF_DROP); 4487 case IPPROTO_AH: 4488 case IPPROTO_HOPOPTS: 4489 case IPPROTO_ROUTING: 4490 case IPPROTO_DSTOPTS: { 4491 /* get next header and header length */ 4492 struct ip6_ext opt6; 4493 4494 if (!pf_pull_hdr(m, off2, &opt6, 4495 sizeof(opt6), NULL, reason, 4496 pd2.af)) { 4497 DPFPRINTF(PF_DEBUG_MISC, 4498 ("pf: ICMPv6 short opt\n")); 4499 return (PF_DROP); 4500 } 4501 if (pd2.proto == IPPROTO_AH) 4502 off2 += (opt6.ip6e_len + 2) * 4; 4503 else 4504 off2 += (opt6.ip6e_len + 1) * 8; 4505 pd2.proto = opt6.ip6e_nxt; 4506 /* goto the next header */ 4507 break; 4508 } 4509 default: 4510 terminal++; 4511 break; 4512 } 4513 } while (!terminal); 4514 break; 4515#endif /* INET6 */ 4516 } 4517 4518 switch (pd2.proto) { 4519 case IPPROTO_TCP: { 4520 struct tcphdr th; 4521 u_int32_t seq; 4522 struct pf_state_peer *src, *dst; 4523 u_int8_t dws; 4524 int copyback = 0; 4525 4526 /* 4527 * Only the first 8 bytes of the TCP header can be 4528 * expected. Don't access any TCP header fields after 4529 * th_seq, an ackskew test is not possible. 4530 */ 4531 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason, 4532 pd2.af)) { 4533 DPFPRINTF(PF_DEBUG_MISC, 4534 ("pf: ICMP error message too short " 4535 "(tcp)\n")); 4536 return (PF_DROP); 4537 } 4538 4539 key.af = pd2.af; 4540 key.proto = IPPROTO_TCP; 4541 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4542 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4543 key.port[pd2.sidx] = th.th_sport; 4544 key.port[pd2.didx] = th.th_dport; 4545 4546 STATE_LOOKUP(kif, &key, direction, *state, pd); 4547 4548 if (direction == (*state)->direction) { 4549 src = &(*state)->dst; 4550 dst = &(*state)->src; 4551 } else { 4552 src = &(*state)->src; 4553 dst = &(*state)->dst; 4554 } 4555 4556 if (src->wscale && dst->wscale) 4557 dws = dst->wscale & PF_WSCALE_MASK; 4558 else 4559 dws = 0; 4560 4561 /* Demodulate sequence number */ 4562 seq = ntohl(th.th_seq) - src->seqdiff; 4563 if (src->seqdiff) { 4564 pf_change_a(&th.th_seq, icmpsum, 4565 htonl(seq), 0); 4566 copyback = 1; 4567 } 4568 4569 if (!((*state)->state_flags & PFSTATE_SLOPPY) && 4570 (!SEQ_GEQ(src->seqhi, seq) || 4571 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) { 4572 if (V_pf_status.debug >= PF_DEBUG_MISC) { 4573 printf("pf: BAD ICMP %d:%d ", 4574 icmptype, pd->hdr.icmp->icmp_code); 4575 pf_print_host(pd->src, 0, pd->af); 4576 printf(" -> "); 4577 pf_print_host(pd->dst, 0, pd->af); 4578 printf(" state: "); 4579 pf_print_state(*state); 4580 printf(" seq=%u\n", seq); 4581 } 4582 REASON_SET(reason, PFRES_BADSTATE); 4583 return (PF_DROP); 4584 } else { 4585 if (V_pf_status.debug >= PF_DEBUG_MISC) { 4586 printf("pf: OK ICMP %d:%d ", 4587 icmptype, pd->hdr.icmp->icmp_code); 4588 pf_print_host(pd->src, 0, pd->af); 4589 printf(" -> "); 4590 pf_print_host(pd->dst, 0, pd->af); 4591 printf(" state: "); 4592 pf_print_state(*state); 4593 printf(" seq=%u\n", seq); 4594 } 4595 } 4596 4597 /* translate source/destination address, if necessary */ 4598 if ((*state)->key[PF_SK_WIRE] != 4599 (*state)->key[PF_SK_STACK]) { 4600 struct pf_state_key *nk = 4601 (*state)->key[pd->didx]; 4602 4603 if (PF_ANEQ(pd2.src, 4604 &nk->addr[pd2.sidx], pd2.af) || 4605 nk->port[pd2.sidx] != th.th_sport) 4606 pf_change_icmp(pd2.src, &th.th_sport, 4607 daddr, &nk->addr[pd2.sidx], 4608 nk->port[pd2.sidx], NULL, 4609 pd2.ip_sum, icmpsum, 4610 pd->ip_sum, 0, pd2.af); 4611 4612 if (PF_ANEQ(pd2.dst, 4613 &nk->addr[pd2.didx], pd2.af) || 4614 nk->port[pd2.didx] != th.th_dport) 4615 pf_change_icmp(pd2.dst, &th.th_dport, 4616 NULL, /* XXX Inbound NAT? */ 4617 &nk->addr[pd2.didx], 4618 nk->port[pd2.didx], NULL, 4619 pd2.ip_sum, icmpsum, 4620 pd->ip_sum, 0, pd2.af); 4621 copyback = 1; 4622 } 4623 4624 if (copyback) { 4625 switch (pd2.af) { 4626#ifdef INET 4627 case AF_INET: 4628 m_copyback(m, off, ICMP_MINLEN, 4629 (caddr_t )pd->hdr.icmp); 4630 m_copyback(m, ipoff2, sizeof(h2), 4631 (caddr_t )&h2); 4632 break; 4633#endif /* INET */ 4634#ifdef INET6 4635 case AF_INET6: 4636 m_copyback(m, off, 4637 sizeof(struct icmp6_hdr), 4638 (caddr_t )pd->hdr.icmp6); 4639 m_copyback(m, ipoff2, sizeof(h2_6), 4640 (caddr_t )&h2_6); 4641 break; 4642#endif /* INET6 */ 4643 } 4644 m_copyback(m, off2, 8, (caddr_t)&th); 4645 } 4646 4647 return (PF_PASS); 4648 break; 4649 } 4650 case IPPROTO_UDP: { 4651 struct udphdr uh; 4652 4653 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh), 4654 NULL, reason, pd2.af)) { 4655 DPFPRINTF(PF_DEBUG_MISC, 4656 ("pf: ICMP error message too short " 4657 "(udp)\n")); 4658 return (PF_DROP); 4659 } 4660 4661 key.af = pd2.af; 4662 key.proto = IPPROTO_UDP; 4663 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4664 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4665 key.port[pd2.sidx] = uh.uh_sport; 4666 key.port[pd2.didx] = uh.uh_dport; 4667 4668 STATE_LOOKUP(kif, &key, direction, *state, pd); 4669 4670 /* translate source/destination address, if necessary */ 4671 if ((*state)->key[PF_SK_WIRE] != 4672 (*state)->key[PF_SK_STACK]) { 4673 struct pf_state_key *nk = 4674 (*state)->key[pd->didx]; 4675 4676 if (PF_ANEQ(pd2.src, 4677 &nk->addr[pd2.sidx], pd2.af) || 4678 nk->port[pd2.sidx] != uh.uh_sport) 4679 pf_change_icmp(pd2.src, &uh.uh_sport, 4680 daddr, &nk->addr[pd2.sidx], 4681 nk->port[pd2.sidx], &uh.uh_sum, 4682 pd2.ip_sum, icmpsum, 4683 pd->ip_sum, 1, pd2.af); 4684 4685 if (PF_ANEQ(pd2.dst, 4686 &nk->addr[pd2.didx], pd2.af) || 4687 nk->port[pd2.didx] != uh.uh_dport) 4688 pf_change_icmp(pd2.dst, &uh.uh_dport, 4689 NULL, /* XXX Inbound NAT? */ 4690 &nk->addr[pd2.didx], 4691 nk->port[pd2.didx], &uh.uh_sum, 4692 pd2.ip_sum, icmpsum, 4693 pd->ip_sum, 1, pd2.af); 4694 4695 switch (pd2.af) { 4696#ifdef INET 4697 case AF_INET: 4698 m_copyback(m, off, ICMP_MINLEN, 4699 (caddr_t )pd->hdr.icmp); 4700 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 4701 break; 4702#endif /* INET */ 4703#ifdef INET6 4704 case AF_INET6: 4705 m_copyback(m, off, 4706 sizeof(struct icmp6_hdr), 4707 (caddr_t )pd->hdr.icmp6); 4708 m_copyback(m, ipoff2, sizeof(h2_6), 4709 (caddr_t )&h2_6); 4710 break; 4711#endif /* INET6 */ 4712 } 4713 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh); 4714 } 4715 return (PF_PASS); 4716 break; 4717 } 4718#ifdef INET 4719 case IPPROTO_ICMP: { 4720 struct icmp iih; 4721 4722 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN, 4723 NULL, reason, pd2.af)) { 4724 DPFPRINTF(PF_DEBUG_MISC, 4725 ("pf: ICMP error message too short i" 4726 "(icmp)\n")); 4727 return (PF_DROP); 4728 } 4729 4730 key.af = pd2.af; 4731 key.proto = IPPROTO_ICMP; 4732 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4733 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4734 key.port[0] = key.port[1] = iih.icmp_id; 4735 4736 STATE_LOOKUP(kif, &key, direction, *state, pd); 4737 4738 /* translate source/destination address, if necessary */ 4739 if ((*state)->key[PF_SK_WIRE] != 4740 (*state)->key[PF_SK_STACK]) { 4741 struct pf_state_key *nk = 4742 (*state)->key[pd->didx]; 4743 4744 if (PF_ANEQ(pd2.src, 4745 &nk->addr[pd2.sidx], pd2.af) || 4746 nk->port[pd2.sidx] != iih.icmp_id) 4747 pf_change_icmp(pd2.src, &iih.icmp_id, 4748 daddr, &nk->addr[pd2.sidx], 4749 nk->port[pd2.sidx], NULL, 4750 pd2.ip_sum, icmpsum, 4751 pd->ip_sum, 0, AF_INET); 4752 4753 if (PF_ANEQ(pd2.dst, 4754 &nk->addr[pd2.didx], pd2.af) || 4755 nk->port[pd2.didx] != iih.icmp_id) 4756 pf_change_icmp(pd2.dst, &iih.icmp_id, 4757 NULL, /* XXX Inbound NAT? */ 4758 &nk->addr[pd2.didx], 4759 nk->port[pd2.didx], NULL, 4760 pd2.ip_sum, icmpsum, 4761 pd->ip_sum, 0, AF_INET); 4762 4763 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 4764 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 4765 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih); 4766 } 4767 return (PF_PASS); 4768 break; 4769 } 4770#endif /* INET */ 4771#ifdef INET6 4772 case IPPROTO_ICMPV6: { 4773 struct icmp6_hdr iih; 4774 4775 if (!pf_pull_hdr(m, off2, &iih, 4776 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) { 4777 DPFPRINTF(PF_DEBUG_MISC, 4778 ("pf: ICMP error message too short " 4779 "(icmp6)\n")); 4780 return (PF_DROP); 4781 } 4782 4783 key.af = pd2.af; 4784 key.proto = IPPROTO_ICMPV6; 4785 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4786 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4787 key.port[0] = key.port[1] = iih.icmp6_id; 4788 4789 STATE_LOOKUP(kif, &key, direction, *state, pd); 4790 4791 /* translate source/destination address, if necessary */ 4792 if ((*state)->key[PF_SK_WIRE] != 4793 (*state)->key[PF_SK_STACK]) { 4794 struct pf_state_key *nk = 4795 (*state)->key[pd->didx]; 4796 4797 if (PF_ANEQ(pd2.src, 4798 &nk->addr[pd2.sidx], pd2.af) || 4799 nk->port[pd2.sidx] != iih.icmp6_id) 4800 pf_change_icmp(pd2.src, &iih.icmp6_id, 4801 daddr, &nk->addr[pd2.sidx], 4802 nk->port[pd2.sidx], NULL, 4803 pd2.ip_sum, icmpsum, 4804 pd->ip_sum, 0, AF_INET6); 4805 4806 if (PF_ANEQ(pd2.dst, 4807 &nk->addr[pd2.didx], pd2.af) || 4808 nk->port[pd2.didx] != iih.icmp6_id) 4809 pf_change_icmp(pd2.dst, &iih.icmp6_id, 4810 NULL, /* XXX Inbound NAT? */ 4811 &nk->addr[pd2.didx], 4812 nk->port[pd2.didx], NULL, 4813 pd2.ip_sum, icmpsum, 4814 pd->ip_sum, 0, AF_INET6); 4815 4816 m_copyback(m, off, sizeof(struct icmp6_hdr), 4817 (caddr_t)pd->hdr.icmp6); 4818 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6); 4819 m_copyback(m, off2, sizeof(struct icmp6_hdr), 4820 (caddr_t)&iih); 4821 } 4822 return (PF_PASS); 4823 break; 4824 } 4825#endif /* INET6 */ 4826 default: { 4827 key.af = pd2.af; 4828 key.proto = pd2.proto; 4829 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4830 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4831 key.port[0] = key.port[1] = 0; 4832 4833 STATE_LOOKUP(kif, &key, direction, *state, pd); 4834 4835 /* translate source/destination address, if necessary */ 4836 if ((*state)->key[PF_SK_WIRE] != 4837 (*state)->key[PF_SK_STACK]) { 4838 struct pf_state_key *nk = 4839 (*state)->key[pd->didx]; 4840 4841 if (PF_ANEQ(pd2.src, 4842 &nk->addr[pd2.sidx], pd2.af)) 4843 pf_change_icmp(pd2.src, NULL, daddr, 4844 &nk->addr[pd2.sidx], 0, NULL, 4845 pd2.ip_sum, icmpsum, 4846 pd->ip_sum, 0, pd2.af); 4847 4848 if (PF_ANEQ(pd2.dst, 4849 &nk->addr[pd2.didx], pd2.af)) 4850 pf_change_icmp(pd2.src, NULL, 4851 NULL, /* XXX Inbound NAT? */ 4852 &nk->addr[pd2.didx], 0, NULL, 4853 pd2.ip_sum, icmpsum, 4854 pd->ip_sum, 0, pd2.af); 4855 4856 switch (pd2.af) { 4857#ifdef INET 4858 case AF_INET: 4859 m_copyback(m, off, ICMP_MINLEN, 4860 (caddr_t)pd->hdr.icmp); 4861 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 4862 break; 4863#endif /* INET */ 4864#ifdef INET6 4865 case AF_INET6: 4866 m_copyback(m, off, 4867 sizeof(struct icmp6_hdr), 4868 (caddr_t )pd->hdr.icmp6); 4869 m_copyback(m, ipoff2, sizeof(h2_6), 4870 (caddr_t )&h2_6); 4871 break; 4872#endif /* INET6 */ 4873 } 4874 } 4875 return (PF_PASS); 4876 break; 4877 } 4878 } 4879 } 4880} 4881 4882static int 4883pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif, 4884 struct mbuf *m, struct pf_pdesc *pd) 4885{ 4886 struct pf_state_peer *src, *dst; 4887 struct pf_state_key_cmp key; 4888 4889 bzero(&key, sizeof(key)); 4890 key.af = pd->af; 4891 key.proto = pd->proto; 4892 if (direction == PF_IN) { 4893 PF_ACPY(&key.addr[0], pd->src, key.af); 4894 PF_ACPY(&key.addr[1], pd->dst, key.af); 4895 key.port[0] = key.port[1] = 0; 4896 } else { 4897 PF_ACPY(&key.addr[1], pd->src, key.af); 4898 PF_ACPY(&key.addr[0], pd->dst, key.af); 4899 key.port[1] = key.port[0] = 0; 4900 } 4901 4902 STATE_LOOKUP(kif, &key, direction, *state, pd); 4903 4904 if (direction == (*state)->direction) { 4905 src = &(*state)->src; 4906 dst = &(*state)->dst; 4907 } else { 4908 src = &(*state)->dst; 4909 dst = &(*state)->src; 4910 } 4911 4912 /* update states */ 4913 if (src->state < PFOTHERS_SINGLE) 4914 src->state = PFOTHERS_SINGLE; 4915 if (dst->state == PFOTHERS_SINGLE) 4916 dst->state = PFOTHERS_MULTIPLE; 4917 4918 /* update expire time */ 4919 (*state)->expire = time_uptime; 4920 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE) 4921 (*state)->timeout = PFTM_OTHER_MULTIPLE; 4922 else 4923 (*state)->timeout = PFTM_OTHER_SINGLE; 4924 4925 /* translate source/destination address, if necessary */ 4926 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4927 struct pf_state_key *nk = (*state)->key[pd->didx]; 4928 4929 KASSERT(nk, ("%s: nk is null", __func__)); 4930 KASSERT(pd, ("%s: pd is null", __func__)); 4931 KASSERT(pd->src, ("%s: pd->src is null", __func__)); 4932 KASSERT(pd->dst, ("%s: pd->dst is null", __func__)); 4933 switch (pd->af) { 4934#ifdef INET 4935 case AF_INET: 4936 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 4937 pf_change_a(&pd->src->v4.s_addr, 4938 pd->ip_sum, 4939 nk->addr[pd->sidx].v4.s_addr, 4940 0); 4941 4942 4943 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 4944 pf_change_a(&pd->dst->v4.s_addr, 4945 pd->ip_sum, 4946 nk->addr[pd->didx].v4.s_addr, 4947 0); 4948 4949 break; 4950#endif /* INET */ 4951#ifdef INET6 4952 case AF_INET6: 4953 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 4954 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af); 4955 4956 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 4957 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af); 4958#endif /* INET6 */ 4959 } 4960 } 4961 return (PF_PASS); 4962} 4963 4964/* 4965 * ipoff and off are measured from the start of the mbuf chain. 4966 * h must be at "ipoff" on the mbuf chain. 4967 */ 4968void * 4969pf_pull_hdr(struct mbuf *m, int off, void *p, int len, 4970 u_short *actionp, u_short *reasonp, sa_family_t af) 4971{ 4972 switch (af) { 4973#ifdef INET 4974 case AF_INET: { 4975 struct ip *h = mtod(m, struct ip *); 4976 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; 4977 4978 if (fragoff) { 4979 if (fragoff >= len) 4980 ACTION_SET(actionp, PF_PASS); 4981 else { 4982 ACTION_SET(actionp, PF_DROP); 4983 REASON_SET(reasonp, PFRES_FRAG); 4984 } 4985 return (NULL); 4986 } 4987 if (m->m_pkthdr.len < off + len || 4988 ntohs(h->ip_len) < off + len) { 4989 ACTION_SET(actionp, PF_DROP); 4990 REASON_SET(reasonp, PFRES_SHORT); 4991 return (NULL); 4992 } 4993 break; 4994 } 4995#endif /* INET */ 4996#ifdef INET6 4997 case AF_INET6: { 4998 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 4999 5000 if (m->m_pkthdr.len < off + len || 5001 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) < 5002 (unsigned)(off + len)) { 5003 ACTION_SET(actionp, PF_DROP); 5004 REASON_SET(reasonp, PFRES_SHORT); 5005 return (NULL); 5006 } 5007 break; 5008 } 5009#endif /* INET6 */ 5010 } 5011 m_copydata(m, off, len, p); 5012 return (p); 5013} 5014 5015int 5016pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif, 5017 int rtableid) 5018{ 5019#ifdef RADIX_MPATH 5020 struct radix_node_head *rnh; 5021#endif 5022 struct sockaddr_in *dst; 5023 int ret = 1; 5024 int check_mpath; 5025#ifdef INET6 5026 struct sockaddr_in6 *dst6; 5027 struct route_in6 ro; 5028#else 5029 struct route ro; 5030#endif 5031 struct radix_node *rn; 5032 struct rtentry *rt; 5033 struct ifnet *ifp; 5034 5035 check_mpath = 0; 5036#ifdef RADIX_MPATH 5037 /* XXX: stick to table 0 for now */ 5038 rnh = rt_tables_get_rnh(0, af); 5039 if (rnh != NULL && rn_mpath_capable(rnh)) 5040 check_mpath = 1; 5041#endif 5042 bzero(&ro, sizeof(ro)); 5043 switch (af) { 5044 case AF_INET: 5045 dst = satosin(&ro.ro_dst); 5046 dst->sin_family = AF_INET; 5047 dst->sin_len = sizeof(*dst); 5048 dst->sin_addr = addr->v4; 5049 break; 5050#ifdef INET6 5051 case AF_INET6: 5052 /* 5053 * Skip check for addresses with embedded interface scope, 5054 * as they would always match anyway. 5055 */ 5056 if (IN6_IS_SCOPE_EMBED(&addr->v6)) 5057 goto out; 5058 dst6 = (struct sockaddr_in6 *)&ro.ro_dst; 5059 dst6->sin6_family = AF_INET6; 5060 dst6->sin6_len = sizeof(*dst6); 5061 dst6->sin6_addr = addr->v6; 5062 break; 5063#endif /* INET6 */ 5064 default: 5065 return (0); 5066 } 5067 5068 /* Skip checks for ipsec interfaces */ 5069 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC) 5070 goto out; 5071 5072 switch (af) { 5073#ifdef INET6 5074 case AF_INET6: 5075 in6_rtalloc_ign(&ro, 0, rtableid); 5076 break; 5077#endif 5078#ifdef INET 5079 case AF_INET: 5080 in_rtalloc_ign((struct route *)&ro, 0, rtableid); 5081 break; 5082#endif 5083 default: 5084 rtalloc_ign((struct route *)&ro, 0); /* No/default FIB. */ 5085 break; 5086 } 5087 5088 if (ro.ro_rt != NULL) { 5089 /* No interface given, this is a no-route check */ 5090 if (kif == NULL) 5091 goto out; 5092 5093 if (kif->pfik_ifp == NULL) { 5094 ret = 0; 5095 goto out; 5096 } 5097 5098 /* Perform uRPF check if passed input interface */ 5099 ret = 0; 5100 rn = (struct radix_node *)ro.ro_rt; 5101 do { 5102 rt = (struct rtentry *)rn; 5103 ifp = rt->rt_ifp; 5104 5105 if (kif->pfik_ifp == ifp) 5106 ret = 1; 5107#ifdef RADIX_MPATH 5108 rn = rn_mpath_next(rn); 5109#endif 5110 } while (check_mpath == 1 && rn != NULL && ret == 0); 5111 } else 5112 ret = 0; 5113out: 5114 if (ro.ro_rt != NULL) 5115 RTFREE(ro.ro_rt); 5116 return (ret); 5117} 5118 5119#ifdef INET 5120static void 5121pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 5122 struct pf_state *s, struct pf_pdesc *pd) 5123{ 5124 struct mbuf *m0, *m1; 5125 struct sockaddr_in dst; 5126 struct ip *ip; 5127 struct ifnet *ifp = NULL; 5128 struct pf_addr naddr; 5129 struct pf_src_node *sn = NULL; 5130 int error = 0; 5131 int sw_csum; 5132 5133 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__)); 5134 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction", 5135 __func__)); 5136 5137 if ((pd->pf_mtag == NULL && 5138 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) || 5139 pd->pf_mtag->routed++ > 3) { 5140 m0 = *m; 5141 *m = NULL; 5142 goto bad_locked; 5143 } 5144 5145 if (r->rt == PF_DUPTO) { 5146 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) { 5147 if (s) 5148 PF_STATE_UNLOCK(s); 5149 return; 5150 } 5151 } else { 5152 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) { 5153 if (s) 5154 PF_STATE_UNLOCK(s); 5155 return; 5156 } 5157 m0 = *m; 5158 } 5159 5160 ip = mtod(m0, struct ip *); 5161 5162 bzero(&dst, sizeof(dst)); 5163 dst.sin_family = AF_INET; 5164 dst.sin_len = sizeof(dst); 5165 dst.sin_addr = ip->ip_dst; 5166 5167 if (r->rt == PF_FASTROUTE) { 5168 struct rtentry *rt; 5169 5170 if (s) 5171 PF_STATE_UNLOCK(s); 5172 rt = rtalloc1_fib(sintosa(&dst), 0, 0, M_GETFIB(m0)); 5173 if (rt == NULL) { 5174 RTFREE_LOCKED(rt); 5175 KMOD_IPSTAT_INC(ips_noroute); 5176 error = EHOSTUNREACH; 5177 goto bad; 5178 } 5179 5180 ifp = rt->rt_ifp; 5181 rt->rt_rmx.rmx_pksent++; 5182 5183 if (rt->rt_flags & RTF_GATEWAY) 5184 bcopy(satosin(rt->rt_gateway), &dst, sizeof(dst)); 5185 RTFREE_LOCKED(rt); 5186 } else { 5187 if (TAILQ_EMPTY(&r->rpool.list)) { 5188 DPFPRINTF(PF_DEBUG_URGENT, 5189 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__)); 5190 goto bad_locked; 5191 } 5192 if (s == NULL) { 5193 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src, 5194 &naddr, NULL, &sn); 5195 if (!PF_AZERO(&naddr, AF_INET)) 5196 dst.sin_addr.s_addr = naddr.v4.s_addr; 5197 ifp = r->rpool.cur->kif ? 5198 r->rpool.cur->kif->pfik_ifp : NULL; 5199 } else { 5200 if (!PF_AZERO(&s->rt_addr, AF_INET)) 5201 dst.sin_addr.s_addr = 5202 s->rt_addr.v4.s_addr; 5203 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 5204 PF_STATE_UNLOCK(s); 5205 } 5206 } 5207 if (ifp == NULL) 5208 goto bad; 5209 5210 if (oifp != ifp) { 5211 if (pf_test(PF_OUT, ifp, &m0, NULL) != PF_PASS) 5212 goto bad; 5213 else if (m0 == NULL) 5214 goto done; 5215 if (m0->m_len < sizeof(struct ip)) { 5216 DPFPRINTF(PF_DEBUG_URGENT, 5217 ("%s: m0->m_len < sizeof(struct ip)\n", __func__)); 5218 goto bad; 5219 } 5220 ip = mtod(m0, struct ip *); 5221 } 5222 5223 if (ifp->if_flags & IFF_LOOPBACK) 5224 m0->m_flags |= M_SKIP_FIREWALL; 5225 5226 /* Back to host byte order. */ 5227 ip->ip_len = ntohs(ip->ip_len); 5228 ip->ip_off = ntohs(ip->ip_off); 5229 5230 /* Copied from FreeBSD 10.0-CURRENT ip_output. */ 5231 m0->m_pkthdr.csum_flags |= CSUM_IP; 5232 sw_csum = m0->m_pkthdr.csum_flags & ~ifp->if_hwassist; 5233 if (sw_csum & CSUM_DELAY_DATA) { 5234 in_delayed_cksum(m0); 5235 sw_csum &= ~CSUM_DELAY_DATA; 5236 } 5237#ifdef SCTP 5238 if (sw_csum & CSUM_SCTP) { 5239 sctp_delayed_cksum(m, (uint32_t)(ip->ip_hl << 2)); 5240 sw_csum &= ~CSUM_SCTP; 5241 } 5242#endif 5243 m0->m_pkthdr.csum_flags &= ifp->if_hwassist; 5244 5245 /* 5246 * If small enough for interface, or the interface will take 5247 * care of the fragmentation for us, we can just send directly. 5248 */ 5249 if (ip->ip_len <= ifp->if_mtu || 5250 (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0 || 5251 ((ip->ip_off & IP_DF) == 0 && (ifp->if_hwassist & CSUM_FRAGMENT))) { 5252 ip->ip_len = htons(ip->ip_len); 5253 ip->ip_off = htons(ip->ip_off); 5254 ip->ip_sum = 0; 5255 if (sw_csum & CSUM_DELAY_IP) 5256 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2); 5257 m0->m_flags &= ~(M_PROTOFLAGS); 5258 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL); 5259 goto done; 5260 } 5261 5262 /* Balk when DF bit is set or the interface didn't support TSO. */ 5263 if ((ip->ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) { 5264 error = EMSGSIZE; 5265 KMOD_IPSTAT_INC(ips_cantfrag); 5266 if (r->rt != PF_DUPTO) { 5267 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, 5268 ifp->if_mtu); 5269 goto done; 5270 } else 5271 goto bad; 5272 } 5273 5274 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist, sw_csum); 5275 if (error) 5276 goto bad; 5277 5278 for (; m0; m0 = m1) { 5279 m1 = m0->m_nextpkt; 5280 m0->m_nextpkt = NULL; 5281 if (error == 0) { 5282 m0->m_flags &= ~(M_PROTOFLAGS); 5283 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL); 5284 } else 5285 m_freem(m0); 5286 } 5287 5288 if (error == 0) 5289 KMOD_IPSTAT_INC(ips_fragmented); 5290 5291done: 5292 if (r->rt != PF_DUPTO) 5293 *m = NULL; 5294 return; 5295 5296bad_locked: 5297 if (s) 5298 PF_STATE_UNLOCK(s); 5299bad: 5300 m_freem(m0); 5301 goto done; 5302} 5303#endif /* INET */ 5304 5305#ifdef INET6 5306static void 5307pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 5308 struct pf_state *s, struct pf_pdesc *pd) 5309{ 5310 struct mbuf *m0; 5311 struct sockaddr_in6 dst; 5312 struct ip6_hdr *ip6; 5313 struct ifnet *ifp = NULL; 5314 struct pf_addr naddr; 5315 struct pf_src_node *sn = NULL; 5316 5317 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__)); 5318 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction", 5319 __func__)); 5320 5321 if ((pd->pf_mtag == NULL && 5322 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) || 5323 pd->pf_mtag->routed++ > 3) { 5324 m0 = *m; 5325 *m = NULL; 5326 goto bad_locked; 5327 } 5328 5329 if (r->rt == PF_DUPTO) { 5330 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) { 5331 if (s) 5332 PF_STATE_UNLOCK(s); 5333 return; 5334 } 5335 } else { 5336 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) { 5337 if (s) 5338 PF_STATE_UNLOCK(s); 5339 return; 5340 } 5341 m0 = *m; 5342 } 5343 5344 ip6 = mtod(m0, struct ip6_hdr *); 5345 5346 bzero(&dst, sizeof(dst)); 5347 dst.sin6_family = AF_INET6; 5348 dst.sin6_len = sizeof(dst); 5349 dst.sin6_addr = ip6->ip6_dst; 5350 5351 /* Cheat. XXX why only in the v6 case??? */ 5352 if (r->rt == PF_FASTROUTE) { 5353 if (s) 5354 PF_STATE_UNLOCK(s); 5355 m0->m_flags |= M_SKIP_FIREWALL; 5356 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL); 5357 return; 5358 } 5359 5360 if (TAILQ_EMPTY(&r->rpool.list)) { 5361 DPFPRINTF(PF_DEBUG_URGENT, 5362 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__)); 5363 goto bad_locked; 5364 } 5365 if (s == NULL) { 5366 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src, 5367 &naddr, NULL, &sn); 5368 if (!PF_AZERO(&naddr, AF_INET6)) 5369 PF_ACPY((struct pf_addr *)&dst.sin6_addr, 5370 &naddr, AF_INET6); 5371 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; 5372 } else { 5373 if (!PF_AZERO(&s->rt_addr, AF_INET6)) 5374 PF_ACPY((struct pf_addr *)&dst.sin6_addr, 5375 &s->rt_addr, AF_INET6); 5376 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 5377 } 5378 5379 if (s) 5380 PF_STATE_UNLOCK(s); 5381 5382 if (ifp == NULL) 5383 goto bad; 5384 5385 if (oifp != ifp) { 5386 if (pf_test6(PF_OUT, ifp, &m0, NULL) != PF_PASS) 5387 goto bad; 5388 else if (m0 == NULL) 5389 goto done; 5390 if (m0->m_len < sizeof(struct ip6_hdr)) { 5391 DPFPRINTF(PF_DEBUG_URGENT, 5392 ("%s: m0->m_len < sizeof(struct ip6_hdr)\n", 5393 __func__)); 5394 goto bad; 5395 } 5396 ip6 = mtod(m0, struct ip6_hdr *); 5397 } 5398 5399 if (ifp->if_flags & IFF_LOOPBACK) 5400 m0->m_flags |= M_SKIP_FIREWALL; 5401 5402 /* 5403 * If the packet is too large for the outgoing interface, 5404 * send back an icmp6 error. 5405 */ 5406 if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr)) 5407 dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index); 5408 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) 5409 nd6_output(ifp, ifp, m0, &dst, NULL); 5410 else { 5411 in6_ifstat_inc(ifp, ifs6_in_toobig); 5412 if (r->rt != PF_DUPTO) 5413 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu); 5414 else 5415 goto bad; 5416 } 5417 5418done: 5419 if (r->rt != PF_DUPTO) 5420 *m = NULL; 5421 return; 5422 5423bad_locked: 5424 if (s) 5425 PF_STATE_UNLOCK(s); 5426bad: 5427 m_freem(m0); 5428 goto done; 5429} 5430#endif /* INET6 */ 5431 5432/* 5433 * FreeBSD supports cksum offloads for the following drivers. 5434 * em(4), fxp(4), ixgb(4), lge(4), ndis(4), nge(4), re(4), 5435 * ti(4), txp(4), xl(4) 5436 * 5437 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR : 5438 * network driver performed cksum including pseudo header, need to verify 5439 * csum_data 5440 * CSUM_DATA_VALID : 5441 * network driver performed cksum, needs to additional pseudo header 5442 * cksum computation with partial csum_data(i.e. lack of H/W support for 5443 * pseudo header, for instance hme(4), sk(4) and possibly gem(4)) 5444 * 5445 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and 5446 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper 5447 * TCP/UDP layer. 5448 * Also, set csum_data to 0xffff to force cksum validation. 5449 */ 5450static int 5451pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af) 5452{ 5453 u_int16_t sum = 0; 5454 int hw_assist = 0; 5455 struct ip *ip; 5456 5457 if (off < sizeof(struct ip) || len < sizeof(struct udphdr)) 5458 return (1); 5459 if (m->m_pkthdr.len < off + len) 5460 return (1); 5461 5462 switch (p) { 5463 case IPPROTO_TCP: 5464 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 5465 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) { 5466 sum = m->m_pkthdr.csum_data; 5467 } else { 5468 ip = mtod(m, struct ip *); 5469 sum = in_pseudo(ip->ip_src.s_addr, 5470 ip->ip_dst.s_addr, htonl((u_short)len + 5471 m->m_pkthdr.csum_data + IPPROTO_TCP)); 5472 } 5473 sum ^= 0xffff; 5474 ++hw_assist; 5475 } 5476 break; 5477 case IPPROTO_UDP: 5478 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 5479 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) { 5480 sum = m->m_pkthdr.csum_data; 5481 } else { 5482 ip = mtod(m, struct ip *); 5483 sum = in_pseudo(ip->ip_src.s_addr, 5484 ip->ip_dst.s_addr, htonl((u_short)len + 5485 m->m_pkthdr.csum_data + IPPROTO_UDP)); 5486 } 5487 sum ^= 0xffff; 5488 ++hw_assist; 5489 } 5490 break; 5491 case IPPROTO_ICMP: 5492#ifdef INET6 5493 case IPPROTO_ICMPV6: 5494#endif /* INET6 */ 5495 break; 5496 default: 5497 return (1); 5498 } 5499 5500 if (!hw_assist) { 5501 switch (af) { 5502 case AF_INET: 5503 if (p == IPPROTO_ICMP) { 5504 if (m->m_len < off) 5505 return (1); 5506 m->m_data += off; 5507 m->m_len -= off; 5508 sum = in_cksum(m, len); 5509 m->m_data -= off; 5510 m->m_len += off; 5511 } else { 5512 if (m->m_len < sizeof(struct ip)) 5513 return (1); 5514 sum = in4_cksum(m, p, off, len); 5515 } 5516 break; 5517#ifdef INET6 5518 case AF_INET6: 5519 if (m->m_len < sizeof(struct ip6_hdr)) 5520 return (1); 5521 sum = in6_cksum(m, p, off, len); 5522 break; 5523#endif /* INET6 */ 5524 default: 5525 return (1); 5526 } 5527 } 5528 if (sum) { 5529 switch (p) { 5530 case IPPROTO_TCP: 5531 { 5532 KMOD_TCPSTAT_INC(tcps_rcvbadsum); 5533 break; 5534 } 5535 case IPPROTO_UDP: 5536 { 5537 KMOD_UDPSTAT_INC(udps_badsum); 5538 break; 5539 } 5540#ifdef INET 5541 case IPPROTO_ICMP: 5542 { 5543 KMOD_ICMPSTAT_INC(icps_checksum); 5544 break; 5545 } 5546#endif 5547#ifdef INET6 5548 case IPPROTO_ICMPV6: 5549 { 5550 KMOD_ICMP6STAT_INC(icp6s_checksum); 5551 break; 5552 } 5553#endif /* INET6 */ 5554 } 5555 return (1); 5556 } else { 5557 if (p == IPPROTO_TCP || p == IPPROTO_UDP) { 5558 m->m_pkthdr.csum_flags |= 5559 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 5560 m->m_pkthdr.csum_data = 0xffff; 5561 } 5562 } 5563 return (0); 5564} 5565 5566 5567#ifdef INET 5568int 5569pf_test(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp) 5570{ 5571 struct pfi_kif *kif; 5572 u_short action, reason = 0, log = 0; 5573 struct mbuf *m = *m0; 5574 struct ip *h = NULL; 5575 struct m_tag *ipfwtag; 5576 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr; 5577 struct pf_state *s = NULL; 5578 struct pf_ruleset *ruleset = NULL; 5579 struct pf_pdesc pd; 5580 int off, dirndx, pqid = 0; 5581 5582 M_ASSERTPKTHDR(m); 5583 5584 if (!V_pf_status.running) 5585 return (PF_PASS); 5586 5587 memset(&pd, 0, sizeof(pd)); 5588 5589 kif = (struct pfi_kif *)ifp->if_pf_kif; 5590 5591 if (kif == NULL) { 5592 DPFPRINTF(PF_DEBUG_URGENT, 5593 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname)); 5594 return (PF_DROP); 5595 } 5596 if (kif->pfik_flags & PFI_IFLAG_SKIP) 5597 return (PF_PASS); 5598 5599 if (m->m_flags & M_SKIP_FIREWALL) 5600 return (PF_PASS); 5601 5602 if (m->m_pkthdr.len < (int)sizeof(struct ip)) { 5603 action = PF_DROP; 5604 REASON_SET(&reason, PFRES_SHORT); 5605 log = 1; 5606 goto done; 5607 } 5608 5609 pd.pf_mtag = pf_find_mtag(m); 5610 5611 PF_RULES_RLOCK(); 5612 5613 if (ip_divert_ptr != NULL && 5614 ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) { 5615 struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1); 5616 if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) { 5617 if (pd.pf_mtag == NULL && 5618 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) { 5619 action = PF_DROP; 5620 goto done; 5621 } 5622 pd.pf_mtag->flags |= PF_PACKET_LOOPED; 5623 m_tag_delete(m, ipfwtag); 5624 } 5625 if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) { 5626 m->m_flags |= M_FASTFWD_OURS; 5627 pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT; 5628 } 5629 } else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) { 5630 /* We do IP header normalization and packet reassembly here */ 5631 action = PF_DROP; 5632 goto done; 5633 } 5634 m = *m0; /* pf_normalize messes with m0 */ 5635 h = mtod(m, struct ip *); 5636 5637 off = h->ip_hl << 2; 5638 if (off < (int)sizeof(struct ip)) { 5639 action = PF_DROP; 5640 REASON_SET(&reason, PFRES_SHORT); 5641 log = 1; 5642 goto done; 5643 } 5644 5645 pd.src = (struct pf_addr *)&h->ip_src; 5646 pd.dst = (struct pf_addr *)&h->ip_dst; 5647 pd.sport = pd.dport = NULL; 5648 pd.ip_sum = &h->ip_sum; 5649 pd.proto_sum = NULL; 5650 pd.proto = h->ip_p; 5651 pd.dir = dir; 5652 pd.sidx = (dir == PF_IN) ? 0 : 1; 5653 pd.didx = (dir == PF_IN) ? 1 : 0; 5654 pd.af = AF_INET; 5655 pd.tos = h->ip_tos; 5656 pd.tot_len = ntohs(h->ip_len); 5657 5658 /* handle fragments that didn't get reassembled by normalization */ 5659 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) { 5660 action = pf_test_fragment(&r, dir, kif, m, h, 5661 &pd, &a, &ruleset); 5662 goto done; 5663 } 5664 5665 switch (h->ip_p) { 5666 5667 case IPPROTO_TCP: { 5668 struct tcphdr th; 5669 5670 pd.hdr.tcp = &th; 5671 if (!pf_pull_hdr(m, off, &th, sizeof(th), 5672 &action, &reason, AF_INET)) { 5673 log = action != PF_PASS; 5674 goto done; 5675 } 5676 pd.p_len = pd.tot_len - off - (th.th_off << 2); 5677 if ((th.th_flags & TH_ACK) && pd.p_len == 0) 5678 pqid = 1; 5679 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 5680 if (action == PF_DROP) 5681 goto done; 5682 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 5683 &reason); 5684 if (action == PF_PASS) { 5685 if (pfsync_update_state_ptr != NULL) 5686 pfsync_update_state_ptr(s); 5687 r = s->rule.ptr; 5688 a = s->anchor.ptr; 5689 log = s->log; 5690 } else if (s == NULL) 5691 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, 5692 &a, &ruleset, inp); 5693 break; 5694 } 5695 5696 case IPPROTO_UDP: { 5697 struct udphdr uh; 5698 5699 pd.hdr.udp = &uh; 5700 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 5701 &action, &reason, AF_INET)) { 5702 log = action != PF_PASS; 5703 goto done; 5704 } 5705 if (uh.uh_dport == 0 || 5706 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 5707 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 5708 action = PF_DROP; 5709 REASON_SET(&reason, PFRES_SHORT); 5710 goto done; 5711 } 5712 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 5713 if (action == PF_PASS) { 5714 if (pfsync_update_state_ptr != NULL) 5715 pfsync_update_state_ptr(s); 5716 r = s->rule.ptr; 5717 a = s->anchor.ptr; 5718 log = s->log; 5719 } else if (s == NULL) 5720 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, 5721 &a, &ruleset, inp); 5722 break; 5723 } 5724 5725 case IPPROTO_ICMP: { 5726 struct icmp ih; 5727 5728 pd.hdr.icmp = &ih; 5729 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN, 5730 &action, &reason, AF_INET)) { 5731 log = action != PF_PASS; 5732 goto done; 5733 } 5734 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd, 5735 &reason); 5736 if (action == PF_PASS) { 5737 if (pfsync_update_state_ptr != NULL) 5738 pfsync_update_state_ptr(s); 5739 r = s->rule.ptr; 5740 a = s->anchor.ptr; 5741 log = s->log; 5742 } else if (s == NULL) 5743 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, 5744 &a, &ruleset, inp); 5745 break; 5746 } 5747 5748#ifdef INET6 5749 case IPPROTO_ICMPV6: { 5750 action = PF_DROP; 5751 DPFPRINTF(PF_DEBUG_MISC, 5752 ("pf: dropping IPv4 packet with ICMPv6 payload\n")); 5753 goto done; 5754 } 5755#endif 5756 5757 default: 5758 action = pf_test_state_other(&s, dir, kif, m, &pd); 5759 if (action == PF_PASS) { 5760 if (pfsync_update_state_ptr != NULL) 5761 pfsync_update_state_ptr(s); 5762 r = s->rule.ptr; 5763 a = s->anchor.ptr; 5764 log = s->log; 5765 } else if (s == NULL) 5766 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, 5767 &a, &ruleset, inp); 5768 break; 5769 } 5770 5771done: 5772 PF_RULES_RUNLOCK(); 5773 if (action == PF_PASS && h->ip_hl > 5 && 5774 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 5775 action = PF_DROP; 5776 REASON_SET(&reason, PFRES_IPOPTIONS); 5777 log = 1; 5778 DPFPRINTF(PF_DEBUG_MISC, 5779 ("pf: dropping packet with ip options\n")); 5780 } 5781 5782 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) { 5783 action = PF_DROP; 5784 REASON_SET(&reason, PFRES_MEMORY); 5785 } 5786 if (r->rtableid >= 0) 5787 M_SETFIB(m, r->rtableid); 5788 5789#ifdef ALTQ 5790 if (action == PF_PASS && r->qid) { 5791 if (pd.pf_mtag == NULL && 5792 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) { 5793 action = PF_DROP; 5794 REASON_SET(&reason, PFRES_MEMORY); 5795 } 5796 if (pqid || (pd.tos & IPTOS_LOWDELAY)) 5797 pd.pf_mtag->qid = r->pqid; 5798 else 5799 pd.pf_mtag->qid = r->qid; 5800 /* add hints for ecn */ 5801 pd.pf_mtag->hdr = h; 5802 5803 } 5804#endif /* ALTQ */ 5805 5806 /* 5807 * connections redirected to loopback should not match sockets 5808 * bound specifically to loopback due to security implications, 5809 * see tcp_input() and in_pcblookup_listen(). 5810 */ 5811 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 5812 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 5813 (s->nat_rule.ptr->action == PF_RDR || 5814 s->nat_rule.ptr->action == PF_BINAT) && 5815 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) 5816 m->m_flags |= M_SKIP_FIREWALL; 5817 5818 if (action == PF_PASS && r->divert.port && ip_divert_ptr != NULL && 5819 !PACKET_LOOPED(&pd)) { 5820 5821 ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0, 5822 sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO); 5823 if (ipfwtag != NULL) { 5824 ((struct ipfw_rule_ref *)(ipfwtag+1))->info = 5825 ntohs(r->divert.port); 5826 ((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir; 5827 5828 if (s) 5829 PF_STATE_UNLOCK(s); 5830 5831 m_tag_prepend(m, ipfwtag); 5832 if (m->m_flags & M_FASTFWD_OURS) { 5833 if (pd.pf_mtag == NULL && 5834 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) { 5835 action = PF_DROP; 5836 REASON_SET(&reason, PFRES_MEMORY); 5837 log = 1; 5838 DPFPRINTF(PF_DEBUG_MISC, 5839 ("pf: failed to allocate tag\n")); 5840 } 5841 pd.pf_mtag->flags |= PF_FASTFWD_OURS_PRESENT; 5842 m->m_flags &= ~M_FASTFWD_OURS; 5843 } 5844 ip_divert_ptr(*m0, dir == PF_IN ? DIR_IN : DIR_OUT); 5845 *m0 = NULL; 5846 5847 return (action); 5848 } else { 5849 /* XXX: ipfw has the same behaviour! */ 5850 action = PF_DROP; 5851 REASON_SET(&reason, PFRES_MEMORY); 5852 log = 1; 5853 DPFPRINTF(PF_DEBUG_MISC, 5854 ("pf: failed to allocate divert tag\n")); 5855 } 5856 } 5857 5858 if (log) { 5859 struct pf_rule *lr; 5860 5861 if (s != NULL && s->nat_rule.ptr != NULL && 5862 s->nat_rule.ptr->log & PF_LOG_ALL) 5863 lr = s->nat_rule.ptr; 5864 else 5865 lr = r; 5866 PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd, 5867 (s == NULL)); 5868 } 5869 5870 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 5871 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++; 5872 5873 if (action == PF_PASS || r->action == PF_DROP) { 5874 dirndx = (dir == PF_OUT); 5875 r->packets[dirndx]++; 5876 r->bytes[dirndx] += pd.tot_len; 5877 if (a != NULL) { 5878 a->packets[dirndx]++; 5879 a->bytes[dirndx] += pd.tot_len; 5880 } 5881 if (s != NULL) { 5882 if (s->nat_rule.ptr != NULL) { 5883 s->nat_rule.ptr->packets[dirndx]++; 5884 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 5885 } 5886 if (s->src_node != NULL) { 5887 s->src_node->packets[dirndx]++; 5888 s->src_node->bytes[dirndx] += pd.tot_len; 5889 } 5890 if (s->nat_src_node != NULL) { 5891 s->nat_src_node->packets[dirndx]++; 5892 s->nat_src_node->bytes[dirndx] += pd.tot_len; 5893 } 5894 dirndx = (dir == s->direction) ? 0 : 1; 5895 s->packets[dirndx]++; 5896 s->bytes[dirndx] += pd.tot_len; 5897 } 5898 tr = r; 5899 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 5900 if (nr != NULL && r == &V_pf_default_rule) 5901 tr = nr; 5902 if (tr->src.addr.type == PF_ADDR_TABLE) 5903 pfr_update_stats(tr->src.addr.p.tbl, 5904 (s == NULL) ? pd.src : 5905 &s->key[(s->direction == PF_IN)]-> 5906 addr[(s->direction == PF_OUT)], 5907 pd.af, pd.tot_len, dir == PF_OUT, 5908 r->action == PF_PASS, tr->src.neg); 5909 if (tr->dst.addr.type == PF_ADDR_TABLE) 5910 pfr_update_stats(tr->dst.addr.p.tbl, 5911 (s == NULL) ? pd.dst : 5912 &s->key[(s->direction == PF_IN)]-> 5913 addr[(s->direction == PF_IN)], 5914 pd.af, pd.tot_len, dir == PF_OUT, 5915 r->action == PF_PASS, tr->dst.neg); 5916 } 5917 5918 switch (action) { 5919 case PF_SYNPROXY_DROP: 5920 m_freem(*m0); 5921 case PF_DEFER: 5922 *m0 = NULL; 5923 action = PF_PASS; 5924 break; 5925 default: 5926 /* pf_route() returns unlocked. */ 5927 if (r->rt) { 5928 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd); 5929 return (action); 5930 } 5931 break; 5932 } 5933 if (s) 5934 PF_STATE_UNLOCK(s); 5935 5936 return (action); 5937} 5938#endif /* INET */ 5939 5940#ifdef INET6 5941int 5942pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp) 5943{ 5944 struct pfi_kif *kif; 5945 u_short action, reason = 0, log = 0; 5946 struct mbuf *m = *m0, *n = NULL; 5947 struct ip6_hdr *h = NULL; 5948 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr; 5949 struct pf_state *s = NULL; 5950 struct pf_ruleset *ruleset = NULL; 5951 struct pf_pdesc pd; 5952 int off, terminal = 0, dirndx, rh_cnt = 0; 5953 5954 M_ASSERTPKTHDR(m); 5955 5956 if (!V_pf_status.running) 5957 return (PF_PASS); 5958 5959 memset(&pd, 0, sizeof(pd)); 5960 pd.pf_mtag = pf_find_mtag(m); 5961 5962 if (pd.pf_mtag && pd.pf_mtag->flags & PF_TAG_GENERATED) 5963 return (PF_PASS); 5964 5965 kif = (struct pfi_kif *)ifp->if_pf_kif; 5966 if (kif == NULL) { 5967 DPFPRINTF(PF_DEBUG_URGENT, 5968 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname)); 5969 return (PF_DROP); 5970 } 5971 if (kif->pfik_flags & PFI_IFLAG_SKIP) 5972 return (PF_PASS); 5973 5974 if (m->m_pkthdr.len < (int)sizeof(*h)) { 5975 action = PF_DROP; 5976 REASON_SET(&reason, PFRES_SHORT); 5977 log = 1; 5978 goto done; 5979 } 5980 5981 PF_RULES_RLOCK(); 5982 5983 /* We do IP header normalization and packet reassembly here */ 5984 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) { 5985 action = PF_DROP; 5986 goto done; 5987 } 5988 m = *m0; /* pf_normalize messes with m0 */ 5989 h = mtod(m, struct ip6_hdr *); 5990 5991#if 1 5992 /* 5993 * we do not support jumbogram yet. if we keep going, zero ip6_plen 5994 * will do something bad, so drop the packet for now. 5995 */ 5996 if (htons(h->ip6_plen) == 0) { 5997 action = PF_DROP; 5998 REASON_SET(&reason, PFRES_NORM); /*XXX*/ 5999 goto done; 6000 } 6001#endif 6002 6003 pd.src = (struct pf_addr *)&h->ip6_src; 6004 pd.dst = (struct pf_addr *)&h->ip6_dst; 6005 pd.sport = pd.dport = NULL; 6006 pd.ip_sum = NULL; 6007 pd.proto_sum = NULL; 6008 pd.dir = dir; 6009 pd.sidx = (dir == PF_IN) ? 0 : 1; 6010 pd.didx = (dir == PF_IN) ? 1 : 0; 6011 pd.af = AF_INET6; 6012 pd.tos = 0; 6013 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr); 6014 6015 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr); 6016 pd.proto = h->ip6_nxt; 6017 do { 6018 switch (pd.proto) { 6019 case IPPROTO_FRAGMENT: 6020 action = pf_test_fragment(&r, dir, kif, m, h, 6021 &pd, &a, &ruleset); 6022 if (action == PF_DROP) 6023 REASON_SET(&reason, PFRES_FRAG); 6024 goto done; 6025 case IPPROTO_ROUTING: { 6026 struct ip6_rthdr rthdr; 6027 6028 if (rh_cnt++) { 6029 DPFPRINTF(PF_DEBUG_MISC, 6030 ("pf: IPv6 more than one rthdr\n")); 6031 action = PF_DROP; 6032 REASON_SET(&reason, PFRES_IPOPTIONS); 6033 log = 1; 6034 goto done; 6035 } 6036 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL, 6037 &reason, pd.af)) { 6038 DPFPRINTF(PF_DEBUG_MISC, 6039 ("pf: IPv6 short rthdr\n")); 6040 action = PF_DROP; 6041 REASON_SET(&reason, PFRES_SHORT); 6042 log = 1; 6043 goto done; 6044 } 6045 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) { 6046 DPFPRINTF(PF_DEBUG_MISC, 6047 ("pf: IPv6 rthdr0\n")); 6048 action = PF_DROP; 6049 REASON_SET(&reason, PFRES_IPOPTIONS); 6050 log = 1; 6051 goto done; 6052 } 6053 /* FALLTHROUGH */ 6054 } 6055 case IPPROTO_AH: 6056 case IPPROTO_HOPOPTS: 6057 case IPPROTO_DSTOPTS: { 6058 /* get next header and header length */ 6059 struct ip6_ext opt6; 6060 6061 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6), 6062 NULL, &reason, pd.af)) { 6063 DPFPRINTF(PF_DEBUG_MISC, 6064 ("pf: IPv6 short opt\n")); 6065 action = PF_DROP; 6066 log = 1; 6067 goto done; 6068 } 6069 if (pd.proto == IPPROTO_AH) 6070 off += (opt6.ip6e_len + 2) * 4; 6071 else 6072 off += (opt6.ip6e_len + 1) * 8; 6073 pd.proto = opt6.ip6e_nxt; 6074 /* goto the next header */ 6075 break; 6076 } 6077 default: 6078 terminal++; 6079 break; 6080 } 6081 } while (!terminal); 6082 6083 /* if there's no routing header, use unmodified mbuf for checksumming */ 6084 if (!n) 6085 n = m; 6086 6087 switch (pd.proto) { 6088 6089 case IPPROTO_TCP: { 6090 struct tcphdr th; 6091 6092 pd.hdr.tcp = &th; 6093 if (!pf_pull_hdr(m, off, &th, sizeof(th), 6094 &action, &reason, AF_INET6)) { 6095 log = action != PF_PASS; 6096 goto done; 6097 } 6098 pd.p_len = pd.tot_len - off - (th.th_off << 2); 6099 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 6100 if (action == PF_DROP) 6101 goto done; 6102 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 6103 &reason); 6104 if (action == PF_PASS) { 6105 if (pfsync_update_state_ptr != NULL) 6106 pfsync_update_state_ptr(s); 6107 r = s->rule.ptr; 6108 a = s->anchor.ptr; 6109 log = s->log; 6110 } else if (s == NULL) 6111 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, 6112 &a, &ruleset, inp); 6113 break; 6114 } 6115 6116 case IPPROTO_UDP: { 6117 struct udphdr uh; 6118 6119 pd.hdr.udp = &uh; 6120 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 6121 &action, &reason, AF_INET6)) { 6122 log = action != PF_PASS; 6123 goto done; 6124 } 6125 if (uh.uh_dport == 0 || 6126 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 6127 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 6128 action = PF_DROP; 6129 REASON_SET(&reason, PFRES_SHORT); 6130 goto done; 6131 } 6132 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 6133 if (action == PF_PASS) { 6134 if (pfsync_update_state_ptr != NULL) 6135 pfsync_update_state_ptr(s); 6136 r = s->rule.ptr; 6137 a = s->anchor.ptr; 6138 log = s->log; 6139 } else if (s == NULL) 6140 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, 6141 &a, &ruleset, inp); 6142 break; 6143 } 6144 6145 case IPPROTO_ICMP: { 6146 action = PF_DROP; 6147 DPFPRINTF(PF_DEBUG_MISC, 6148 ("pf: dropping IPv6 packet with ICMPv4 payload\n")); 6149 goto done; 6150 } 6151 6152 case IPPROTO_ICMPV6: { 6153 struct icmp6_hdr ih; 6154 6155 pd.hdr.icmp6 = &ih; 6156 if (!pf_pull_hdr(m, off, &ih, sizeof(ih), 6157 &action, &reason, AF_INET6)) { 6158 log = action != PF_PASS; 6159 goto done; 6160 } 6161 action = pf_test_state_icmp(&s, dir, kif, 6162 m, off, h, &pd, &reason); 6163 if (action == PF_PASS) { 6164 if (pfsync_update_state_ptr != NULL) 6165 pfsync_update_state_ptr(s); 6166 r = s->rule.ptr; 6167 a = s->anchor.ptr; 6168 log = s->log; 6169 } else if (s == NULL) 6170 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, 6171 &a, &ruleset, inp); 6172 break; 6173 } 6174 6175 default: 6176 action = pf_test_state_other(&s, dir, kif, m, &pd); 6177 if (action == PF_PASS) { 6178 if (pfsync_update_state_ptr != NULL) 6179 pfsync_update_state_ptr(s); 6180 r = s->rule.ptr; 6181 a = s->anchor.ptr; 6182 log = s->log; 6183 } else if (s == NULL) 6184 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, 6185 &a, &ruleset, inp); 6186 break; 6187 } 6188 6189done: 6190 PF_RULES_RUNLOCK(); 6191 if (n != m) { 6192 m_freem(n); 6193 n = NULL; 6194 } 6195 6196 /* handle dangerous IPv6 extension headers. */ 6197 if (action == PF_PASS && rh_cnt && 6198 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 6199 action = PF_DROP; 6200 REASON_SET(&reason, PFRES_IPOPTIONS); 6201 log = 1; 6202 DPFPRINTF(PF_DEBUG_MISC, 6203 ("pf: dropping packet with dangerous v6 headers\n")); 6204 } 6205 6206 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) { 6207 action = PF_DROP; 6208 REASON_SET(&reason, PFRES_MEMORY); 6209 } 6210 if (r->rtableid >= 0) 6211 M_SETFIB(m, r->rtableid); 6212 6213#ifdef ALTQ 6214 if (action == PF_PASS && r->qid) { 6215 if (pd.pf_mtag == NULL && 6216 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) { 6217 action = PF_DROP; 6218 REASON_SET(&reason, PFRES_MEMORY); 6219 } 6220 if (pd.tos & IPTOS_LOWDELAY) 6221 pd.pf_mtag->qid = r->pqid; 6222 else 6223 pd.pf_mtag->qid = r->qid; 6224 /* add hints for ecn */ 6225 pd.pf_mtag->hdr = h; 6226 } 6227#endif /* ALTQ */ 6228 6229 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 6230 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 6231 (s->nat_rule.ptr->action == PF_RDR || 6232 s->nat_rule.ptr->action == PF_BINAT) && 6233 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6)) 6234 m->m_flags |= M_SKIP_FIREWALL; 6235 6236 /* XXX: Anybody working on it?! */ 6237 if (r->divert.port) 6238 printf("pf: divert(9) is not supported for IPv6\n"); 6239 6240 if (log) { 6241 struct pf_rule *lr; 6242 6243 if (s != NULL && s->nat_rule.ptr != NULL && 6244 s->nat_rule.ptr->log & PF_LOG_ALL) 6245 lr = s->nat_rule.ptr; 6246 else 6247 lr = r; 6248 PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset, 6249 &pd, (s == NULL)); 6250 } 6251 6252 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 6253 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++; 6254 6255 if (action == PF_PASS || r->action == PF_DROP) { 6256 dirndx = (dir == PF_OUT); 6257 r->packets[dirndx]++; 6258 r->bytes[dirndx] += pd.tot_len; 6259 if (a != NULL) { 6260 a->packets[dirndx]++; 6261 a->bytes[dirndx] += pd.tot_len; 6262 } 6263 if (s != NULL) { 6264 if (s->nat_rule.ptr != NULL) { 6265 s->nat_rule.ptr->packets[dirndx]++; 6266 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 6267 } 6268 if (s->src_node != NULL) { 6269 s->src_node->packets[dirndx]++; 6270 s->src_node->bytes[dirndx] += pd.tot_len; 6271 } 6272 if (s->nat_src_node != NULL) { 6273 s->nat_src_node->packets[dirndx]++; 6274 s->nat_src_node->bytes[dirndx] += pd.tot_len; 6275 } 6276 dirndx = (dir == s->direction) ? 0 : 1; 6277 s->packets[dirndx]++; 6278 s->bytes[dirndx] += pd.tot_len; 6279 } 6280 tr = r; 6281 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 6282 if (nr != NULL && r == &V_pf_default_rule) 6283 tr = nr; 6284 if (tr->src.addr.type == PF_ADDR_TABLE) 6285 pfr_update_stats(tr->src.addr.p.tbl, 6286 (s == NULL) ? pd.src : 6287 &s->key[(s->direction == PF_IN)]->addr[0], 6288 pd.af, pd.tot_len, dir == PF_OUT, 6289 r->action == PF_PASS, tr->src.neg); 6290 if (tr->dst.addr.type == PF_ADDR_TABLE) 6291 pfr_update_stats(tr->dst.addr.p.tbl, 6292 (s == NULL) ? pd.dst : 6293 &s->key[(s->direction == PF_IN)]->addr[1], 6294 pd.af, pd.tot_len, dir == PF_OUT, 6295 r->action == PF_PASS, tr->dst.neg); 6296 } 6297 6298 switch (action) { 6299 case PF_SYNPROXY_DROP: 6300 m_freem(*m0); 6301 case PF_DEFER: 6302 *m0 = NULL; 6303 action = PF_PASS; 6304 break; 6305 default: 6306 /* pf_route6() returns unlocked. */ 6307 if (r->rt) { 6308 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd); 6309 return (action); 6310 } 6311 break; 6312 } 6313 6314 if (s) 6315 PF_STATE_UNLOCK(s); 6316 6317 return (action); 6318} 6319#endif /* INET6 */ 6320