pf.c revision 243941
1/* $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $ */ 2 3/* 4 * Copyright (c) 2001 Daniel Hartmeier 5 * Copyright (c) 2002 - 2008 Henning Brauer 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * - Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * - Redistributions in binary form must reproduce the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer in the documentation and/or other materials provided 17 * with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 * 32 * Effort sponsored in part by the Defense Advanced Research Projects 33 * Agency (DARPA) and Air Force Research Laboratory, Air Force 34 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 35 * 36 */ 37 38#include <sys/cdefs.h> 39 40__FBSDID("$FreeBSD: head/sys/netpfil/pf/pf.c 243941 2012-12-06 08:32:28Z glebius $"); 41 42#include "opt_inet.h" 43#include "opt_inet6.h" 44#include "opt_bpf.h" 45#include "opt_pf.h" 46 47#include <sys/param.h> 48#include <sys/bus.h> 49#include <sys/endian.h> 50#include <sys/hash.h> 51#include <sys/interrupt.h> 52#include <sys/kernel.h> 53#include <sys/kthread.h> 54#include <sys/limits.h> 55#include <sys/mbuf.h> 56#include <sys/md5.h> 57#include <sys/random.h> 58#include <sys/refcount.h> 59#include <sys/socket.h> 60#include <sys/sysctl.h> 61#include <sys/taskqueue.h> 62#include <sys/ucred.h> 63 64#include <net/if.h> 65#include <net/if_types.h> 66#include <net/route.h> 67#include <net/radix_mpath.h> 68#include <net/vnet.h> 69 70#include <net/pfvar.h> 71#include <net/pf_mtag.h> 72#include <net/if_pflog.h> 73#include <net/if_pfsync.h> 74 75#include <netinet/in_pcb.h> 76#include <netinet/in_var.h> 77#include <netinet/ip.h> 78#include <netinet/ip_fw.h> 79#include <netinet/ip_icmp.h> 80#include <netinet/icmp_var.h> 81#include <netinet/ip_var.h> 82#include <netinet/tcp.h> 83#include <netinet/tcp_fsm.h> 84#include <netinet/tcp_seq.h> 85#include <netinet/tcp_timer.h> 86#include <netinet/tcp_var.h> 87#include <netinet/udp.h> 88#include <netinet/udp_var.h> 89 90#include <netpfil/ipfw/ip_fw_private.h> /* XXX: only for DIR_IN/DIR_OUT */ 91 92#ifdef INET6 93#include <netinet/ip6.h> 94#include <netinet/icmp6.h> 95#include <netinet6/nd6.h> 96#include <netinet6/ip6_var.h> 97#include <netinet6/in6_pcb.h> 98#endif /* INET6 */ 99 100#include <machine/in_cksum.h> 101#include <security/mac/mac_framework.h> 102 103#define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x 104 105/* 106 * Global variables 107 */ 108 109/* state tables */ 110VNET_DEFINE(struct pf_altqqueue, pf_altqs[2]); 111VNET_DEFINE(struct pf_palist, pf_pabuf); 112VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active); 113VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive); 114VNET_DEFINE(struct pf_status, pf_status); 115 116VNET_DEFINE(u_int32_t, ticket_altqs_active); 117VNET_DEFINE(u_int32_t, ticket_altqs_inactive); 118VNET_DEFINE(int, altqs_inactive_open); 119VNET_DEFINE(u_int32_t, ticket_pabuf); 120 121VNET_DEFINE(MD5_CTX, pf_tcp_secret_ctx); 122#define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx) 123VNET_DEFINE(u_char, pf_tcp_secret[16]); 124#define V_pf_tcp_secret VNET(pf_tcp_secret) 125VNET_DEFINE(int, pf_tcp_secret_init); 126#define V_pf_tcp_secret_init VNET(pf_tcp_secret_init) 127VNET_DEFINE(int, pf_tcp_iss_off); 128#define V_pf_tcp_iss_off VNET(pf_tcp_iss_off) 129 130/* 131 * Queue for pf_intr() sends. 132 */ 133static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations"); 134struct pf_send_entry { 135 STAILQ_ENTRY(pf_send_entry) pfse_next; 136 struct mbuf *pfse_m; 137 enum { 138 PFSE_IP, 139 PFSE_IP6, 140 PFSE_ICMP, 141 PFSE_ICMP6, 142 } pfse_type; 143 union { 144 struct route ro; 145 struct { 146 int type; 147 int code; 148 int mtu; 149 } icmpopts; 150 } u; 151#define pfse_ro u.ro 152#define pfse_icmp_type u.icmpopts.type 153#define pfse_icmp_code u.icmpopts.code 154#define pfse_icmp_mtu u.icmpopts.mtu 155}; 156 157STAILQ_HEAD(pf_send_head, pf_send_entry); 158static VNET_DEFINE(struct pf_send_head, pf_sendqueue); 159#define V_pf_sendqueue VNET(pf_sendqueue) 160 161static struct mtx pf_sendqueue_mtx; 162#define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx) 163#define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx) 164 165/* 166 * Queue for pf_overload_task() tasks. 167 */ 168struct pf_overload_entry { 169 SLIST_ENTRY(pf_overload_entry) next; 170 struct pf_addr addr; 171 sa_family_t af; 172 uint8_t dir; 173 struct pf_rule *rule; 174}; 175 176SLIST_HEAD(pf_overload_head, pf_overload_entry); 177static VNET_DEFINE(struct pf_overload_head, pf_overloadqueue); 178#define V_pf_overloadqueue VNET(pf_overloadqueue) 179static VNET_DEFINE(struct task, pf_overloadtask); 180#define V_pf_overloadtask VNET(pf_overloadtask) 181 182static struct mtx pf_overloadqueue_mtx; 183#define PF_OVERLOADQ_LOCK() mtx_lock(&pf_overloadqueue_mtx) 184#define PF_OVERLOADQ_UNLOCK() mtx_unlock(&pf_overloadqueue_mtx) 185 186VNET_DEFINE(struct pf_rulequeue, pf_unlinked_rules); 187struct mtx pf_unlnkdrules_mtx; 188 189static VNET_DEFINE(uma_zone_t, pf_sources_z); 190#define V_pf_sources_z VNET(pf_sources_z) 191static VNET_DEFINE(uma_zone_t, pf_mtag_z); 192#define V_pf_mtag_z VNET(pf_mtag_z) 193VNET_DEFINE(uma_zone_t, pf_state_z); 194VNET_DEFINE(uma_zone_t, pf_state_key_z); 195 196VNET_DEFINE(uint64_t, pf_stateid[MAXCPU]); 197#define PFID_CPUBITS 8 198#define PFID_CPUSHIFT (sizeof(uint64_t) * NBBY - PFID_CPUBITS) 199#define PFID_CPUMASK ((uint64_t)((1 << PFID_CPUBITS) - 1) << PFID_CPUSHIFT) 200#define PFID_MAXID (~PFID_CPUMASK) 201CTASSERT((1 << PFID_CPUBITS) > MAXCPU); 202 203static void pf_src_tree_remove_state(struct pf_state *); 204static void pf_init_threshold(struct pf_threshold *, u_int32_t, 205 u_int32_t); 206static void pf_add_threshold(struct pf_threshold *); 207static int pf_check_threshold(struct pf_threshold *); 208 209static void pf_change_ap(struct pf_addr *, u_int16_t *, 210 u_int16_t *, u_int16_t *, struct pf_addr *, 211 u_int16_t, u_int8_t, sa_family_t); 212static int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *, 213 struct tcphdr *, struct pf_state_peer *); 214static void pf_change_icmp(struct pf_addr *, u_int16_t *, 215 struct pf_addr *, struct pf_addr *, u_int16_t, 216 u_int16_t *, u_int16_t *, u_int16_t *, 217 u_int16_t *, u_int8_t, sa_family_t); 218static void pf_send_tcp(struct mbuf *, 219 const struct pf_rule *, sa_family_t, 220 const struct pf_addr *, const struct pf_addr *, 221 u_int16_t, u_int16_t, u_int32_t, u_int32_t, 222 u_int8_t, u_int16_t, u_int16_t, u_int8_t, int, 223 u_int16_t, struct ifnet *); 224static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t, 225 sa_family_t, struct pf_rule *); 226static void pf_detach_state(struct pf_state *); 227static int pf_state_key_attach(struct pf_state_key *, 228 struct pf_state_key *, struct pf_state *); 229static void pf_state_key_detach(struct pf_state *, int); 230static int pf_state_key_ctor(void *, int, void *, int); 231static u_int32_t pf_tcp_iss(struct pf_pdesc *); 232static int pf_test_rule(struct pf_rule **, struct pf_state **, 233 int, struct pfi_kif *, struct mbuf *, int, 234 struct pf_pdesc *, struct pf_rule **, 235 struct pf_ruleset **, struct inpcb *); 236static int pf_create_state(struct pf_rule *, struct pf_rule *, 237 struct pf_rule *, struct pf_pdesc *, 238 struct pf_src_node *, struct pf_state_key *, 239 struct pf_state_key *, struct mbuf *, int, 240 u_int16_t, u_int16_t, int *, struct pfi_kif *, 241 struct pf_state **, int, u_int16_t, u_int16_t, 242 int); 243static int pf_test_fragment(struct pf_rule **, int, 244 struct pfi_kif *, struct mbuf *, void *, 245 struct pf_pdesc *, struct pf_rule **, 246 struct pf_ruleset **); 247static int pf_tcp_track_full(struct pf_state_peer *, 248 struct pf_state_peer *, struct pf_state **, 249 struct pfi_kif *, struct mbuf *, int, 250 struct pf_pdesc *, u_short *, int *); 251static int pf_tcp_track_sloppy(struct pf_state_peer *, 252 struct pf_state_peer *, struct pf_state **, 253 struct pf_pdesc *, u_short *); 254static int pf_test_state_tcp(struct pf_state **, int, 255 struct pfi_kif *, struct mbuf *, int, 256 void *, struct pf_pdesc *, u_short *); 257static int pf_test_state_udp(struct pf_state **, int, 258 struct pfi_kif *, struct mbuf *, int, 259 void *, struct pf_pdesc *); 260static int pf_test_state_icmp(struct pf_state **, int, 261 struct pfi_kif *, struct mbuf *, int, 262 void *, struct pf_pdesc *, u_short *); 263static int pf_test_state_other(struct pf_state **, int, 264 struct pfi_kif *, struct mbuf *, struct pf_pdesc *); 265static u_int8_t pf_get_wscale(struct mbuf *, int, u_int16_t, 266 sa_family_t); 267static u_int16_t pf_get_mss(struct mbuf *, int, u_int16_t, 268 sa_family_t); 269static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t, 270 int, u_int16_t); 271static void pf_set_rt_ifp(struct pf_state *, 272 struct pf_addr *); 273static int pf_check_proto_cksum(struct mbuf *, int, int, 274 u_int8_t, sa_family_t); 275static void pf_print_state_parts(struct pf_state *, 276 struct pf_state_key *, struct pf_state_key *); 277static int pf_addr_wrap_neq(struct pf_addr_wrap *, 278 struct pf_addr_wrap *); 279static struct pf_state *pf_find_state(struct pfi_kif *, 280 struct pf_state_key_cmp *, u_int); 281static int pf_src_connlimit(struct pf_state **); 282static void pf_overload_task(void *c, int pending); 283static int pf_insert_src_node(struct pf_src_node **, 284 struct pf_rule *, struct pf_addr *, sa_family_t); 285static u_int pf_purge_expired_states(u_int, int); 286static void pf_purge_unlinked_rules(void); 287static int pf_mtag_init(void *, int, int); 288static void pf_mtag_free(struct m_tag *); 289#ifdef INET 290static void pf_route(struct mbuf **, struct pf_rule *, int, 291 struct ifnet *, struct pf_state *, 292 struct pf_pdesc *); 293#endif /* INET */ 294#ifdef INET6 295static void pf_change_a6(struct pf_addr *, u_int16_t *, 296 struct pf_addr *, u_int8_t); 297static void pf_route6(struct mbuf **, struct pf_rule *, int, 298 struct ifnet *, struct pf_state *, 299 struct pf_pdesc *); 300#endif /* INET6 */ 301 302int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len); 303 304VNET_DECLARE(int, pf_end_threads); 305 306VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]); 307 308#define PACKET_LOOPED(pd) ((pd)->pf_mtag && \ 309 (pd)->pf_mtag->flags & PF_PACKET_LOOPED) 310 311#define STATE_LOOKUP(i, k, d, s, pd) \ 312 do { \ 313 (s) = pf_find_state((i), (k), (d)); \ 314 if ((s) == NULL || (s)->timeout == PFTM_PURGE) \ 315 return (PF_DROP); \ 316 if (PACKET_LOOPED(pd)) \ 317 return (PF_PASS); \ 318 if ((d) == PF_OUT && \ 319 (((s)->rule.ptr->rt == PF_ROUTETO && \ 320 (s)->rule.ptr->direction == PF_OUT) || \ 321 ((s)->rule.ptr->rt == PF_REPLYTO && \ 322 (s)->rule.ptr->direction == PF_IN)) && \ 323 (s)->rt_kif != NULL && \ 324 (s)->rt_kif != (i)) \ 325 return (PF_PASS); \ 326 } while (0) 327 328#define BOUND_IFACE(r, k) \ 329 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all 330 331#define STATE_INC_COUNTERS(s) \ 332 do { \ 333 s->rule.ptr->states_cur++; \ 334 s->rule.ptr->states_tot++; \ 335 if (s->anchor.ptr != NULL) { \ 336 s->anchor.ptr->states_cur++; \ 337 s->anchor.ptr->states_tot++; \ 338 } \ 339 if (s->nat_rule.ptr != NULL) { \ 340 s->nat_rule.ptr->states_cur++; \ 341 s->nat_rule.ptr->states_tot++; \ 342 } \ 343 } while (0) 344 345#define STATE_DEC_COUNTERS(s) \ 346 do { \ 347 if (s->nat_rule.ptr != NULL) \ 348 s->nat_rule.ptr->states_cur--; \ 349 if (s->anchor.ptr != NULL) \ 350 s->anchor.ptr->states_cur--; \ 351 s->rule.ptr->states_cur--; \ 352 } while (0) 353 354static MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures"); 355VNET_DEFINE(struct pf_keyhash *, pf_keyhash); 356VNET_DEFINE(struct pf_idhash *, pf_idhash); 357VNET_DEFINE(u_long, pf_hashmask); 358VNET_DEFINE(struct pf_srchash *, pf_srchash); 359VNET_DEFINE(u_long, pf_srchashmask); 360 361SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW, 0, "pf(4)"); 362 363VNET_DEFINE(u_long, pf_hashsize); 364#define V_pf_hashsize VNET(pf_hashsize) 365SYSCTL_VNET_UINT(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN, 366 &VNET_NAME(pf_hashsize), 0, "Size of pf(4) states hashtable"); 367 368VNET_DEFINE(u_long, pf_srchashsize); 369#define V_pf_srchashsize VNET(pf_srchashsize) 370SYSCTL_VNET_UINT(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN, 371 &VNET_NAME(pf_srchashsize), 0, "Size of pf(4) source nodes hashtable"); 372 373VNET_DEFINE(void *, pf_swi_cookie); 374 375VNET_DEFINE(uint32_t, pf_hashseed); 376#define V_pf_hashseed VNET(pf_hashseed) 377 378static __inline uint32_t 379pf_hashkey(struct pf_state_key *sk) 380{ 381 uint32_t h; 382 383 h = jenkins_hash32((uint32_t *)sk, 384 sizeof(struct pf_state_key_cmp)/sizeof(uint32_t), 385 V_pf_hashseed); 386 387 return (h & V_pf_hashmask); 388} 389 390static __inline uint32_t 391pf_hashsrc(struct pf_addr *addr, sa_family_t af) 392{ 393 uint32_t h; 394 395 switch (af) { 396 case AF_INET: 397 h = jenkins_hash32((uint32_t *)&addr->v4, 398 sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed); 399 break; 400 case AF_INET6: 401 h = jenkins_hash32((uint32_t *)&addr->v6, 402 sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed); 403 break; 404 default: 405 panic("%s: unknown address family %u", __func__, af); 406 } 407 408 return (h & V_pf_srchashmask); 409} 410 411#ifdef INET6 412void 413pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af) 414{ 415 switch (af) { 416#ifdef INET 417 case AF_INET: 418 dst->addr32[0] = src->addr32[0]; 419 break; 420#endif /* INET */ 421 case AF_INET6: 422 dst->addr32[0] = src->addr32[0]; 423 dst->addr32[1] = src->addr32[1]; 424 dst->addr32[2] = src->addr32[2]; 425 dst->addr32[3] = src->addr32[3]; 426 break; 427 } 428} 429#endif /* INET6 */ 430 431static void 432pf_init_threshold(struct pf_threshold *threshold, 433 u_int32_t limit, u_int32_t seconds) 434{ 435 threshold->limit = limit * PF_THRESHOLD_MULT; 436 threshold->seconds = seconds; 437 threshold->count = 0; 438 threshold->last = time_uptime; 439} 440 441static void 442pf_add_threshold(struct pf_threshold *threshold) 443{ 444 u_int32_t t = time_uptime, diff = t - threshold->last; 445 446 if (diff >= threshold->seconds) 447 threshold->count = 0; 448 else 449 threshold->count -= threshold->count * diff / 450 threshold->seconds; 451 threshold->count += PF_THRESHOLD_MULT; 452 threshold->last = t; 453} 454 455static int 456pf_check_threshold(struct pf_threshold *threshold) 457{ 458 return (threshold->count > threshold->limit); 459} 460 461static int 462pf_src_connlimit(struct pf_state **state) 463{ 464 struct pf_overload_entry *pfoe; 465 int bad = 0; 466 467 PF_STATE_LOCK_ASSERT(*state); 468 469 (*state)->src_node->conn++; 470 (*state)->src.tcp_est = 1; 471 pf_add_threshold(&(*state)->src_node->conn_rate); 472 473 if ((*state)->rule.ptr->max_src_conn && 474 (*state)->rule.ptr->max_src_conn < 475 (*state)->src_node->conn) { 476 V_pf_status.lcounters[LCNT_SRCCONN]++; 477 bad++; 478 } 479 480 if ((*state)->rule.ptr->max_src_conn_rate.limit && 481 pf_check_threshold(&(*state)->src_node->conn_rate)) { 482 V_pf_status.lcounters[LCNT_SRCCONNRATE]++; 483 bad++; 484 } 485 486 if (!bad) 487 return (0); 488 489 /* Kill this state. */ 490 (*state)->timeout = PFTM_PURGE; 491 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; 492 493 if ((*state)->rule.ptr->overload_tbl == NULL) 494 return (1); 495 496 /* Schedule overloading and flushing task. */ 497 pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT); 498 if (pfoe == NULL) 499 return (1); /* too bad :( */ 500 501 bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr)); 502 pfoe->af = (*state)->key[PF_SK_WIRE]->af; 503 pfoe->rule = (*state)->rule.ptr; 504 pfoe->dir = (*state)->direction; 505 PF_OVERLOADQ_LOCK(); 506 SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next); 507 PF_OVERLOADQ_UNLOCK(); 508 taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask); 509 510 return (1); 511} 512 513static void 514pf_overload_task(void *c, int pending) 515{ 516 struct pf_overload_head queue; 517 struct pfr_addr p; 518 struct pf_overload_entry *pfoe, *pfoe1; 519 uint32_t killed = 0; 520 521 PF_OVERLOADQ_LOCK(); 522 queue = *(struct pf_overload_head *)c; 523 SLIST_INIT((struct pf_overload_head *)c); 524 PF_OVERLOADQ_UNLOCK(); 525 526 bzero(&p, sizeof(p)); 527 SLIST_FOREACH(pfoe, &queue, next) { 528 V_pf_status.lcounters[LCNT_OVERLOAD_TABLE]++; 529 if (V_pf_status.debug >= PF_DEBUG_MISC) { 530 printf("%s: blocking address ", __func__); 531 pf_print_host(&pfoe->addr, 0, pfoe->af); 532 printf("\n"); 533 } 534 535 p.pfra_af = pfoe->af; 536 switch (pfoe->af) { 537#ifdef INET 538 case AF_INET: 539 p.pfra_net = 32; 540 p.pfra_ip4addr = pfoe->addr.v4; 541 break; 542#endif 543#ifdef INET6 544 case AF_INET6: 545 p.pfra_net = 128; 546 p.pfra_ip6addr = pfoe->addr.v6; 547 break; 548#endif 549 } 550 551 PF_RULES_WLOCK(); 552 pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second); 553 PF_RULES_WUNLOCK(); 554 } 555 556 /* 557 * Remove those entries, that don't need flushing. 558 */ 559 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1) 560 if (pfoe->rule->flush == 0) { 561 SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next); 562 free(pfoe, M_PFTEMP); 563 } else 564 V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++; 565 566 /* If nothing to flush, return. */ 567 if (SLIST_EMPTY(&queue)) 568 return; 569 570 for (int i = 0; i <= V_pf_hashmask; i++) { 571 struct pf_idhash *ih = &V_pf_idhash[i]; 572 struct pf_state_key *sk; 573 struct pf_state *s; 574 575 PF_HASHROW_LOCK(ih); 576 LIST_FOREACH(s, &ih->states, entry) { 577 sk = s->key[PF_SK_WIRE]; 578 SLIST_FOREACH(pfoe, &queue, next) 579 if (sk->af == pfoe->af && 580 ((pfoe->rule->flush & PF_FLUSH_GLOBAL) || 581 pfoe->rule == s->rule.ptr) && 582 ((pfoe->dir == PF_OUT && 583 PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) || 584 (pfoe->dir == PF_IN && 585 PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) { 586 s->timeout = PFTM_PURGE; 587 s->src.state = s->dst.state = TCPS_CLOSED; 588 killed++; 589 } 590 } 591 PF_HASHROW_UNLOCK(ih); 592 } 593 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1) 594 free(pfoe, M_PFTEMP); 595 if (V_pf_status.debug >= PF_DEBUG_MISC) 596 printf("%s: %u states killed", __func__, killed); 597} 598 599/* 600 * Can return locked on failure, so that we can consistently 601 * allocate and insert a new one. 602 */ 603struct pf_src_node * 604pf_find_src_node(struct pf_addr *src, struct pf_rule *rule, sa_family_t af, 605 int returnlocked) 606{ 607 struct pf_srchash *sh; 608 struct pf_src_node *n; 609 610 V_pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; 611 612 sh = &V_pf_srchash[pf_hashsrc(src, af)]; 613 PF_HASHROW_LOCK(sh); 614 LIST_FOREACH(n, &sh->nodes, entry) 615 if (n->rule.ptr == rule && n->af == af && 616 ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) || 617 (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0))) 618 break; 619 if (n != NULL || returnlocked == 0) 620 PF_HASHROW_UNLOCK(sh); 621 622 return (n); 623} 624 625static int 626pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule, 627 struct pf_addr *src, sa_family_t af) 628{ 629 630 KASSERT((rule->rule_flag & PFRULE_RULESRCTRACK || 631 rule->rpool.opts & PF_POOL_STICKYADDR), 632 ("%s for non-tracking rule %p", __func__, rule)); 633 634 if (*sn == NULL) 635 *sn = pf_find_src_node(src, rule, af, 1); 636 637 if (*sn == NULL) { 638 struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)]; 639 640 PF_HASHROW_ASSERT(sh); 641 642 if (!rule->max_src_nodes || 643 rule->src_nodes < rule->max_src_nodes) 644 (*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO); 645 else 646 V_pf_status.lcounters[LCNT_SRCNODES]++; 647 if ((*sn) == NULL) { 648 PF_HASHROW_UNLOCK(sh); 649 return (-1); 650 } 651 652 pf_init_threshold(&(*sn)->conn_rate, 653 rule->max_src_conn_rate.limit, 654 rule->max_src_conn_rate.seconds); 655 656 (*sn)->af = af; 657 (*sn)->rule.ptr = rule; 658 PF_ACPY(&(*sn)->addr, src, af); 659 LIST_INSERT_HEAD(&sh->nodes, *sn, entry); 660 (*sn)->creation = time_uptime; 661 (*sn)->ruletype = rule->action; 662 if ((*sn)->rule.ptr != NULL) 663 (*sn)->rule.ptr->src_nodes++; 664 PF_HASHROW_UNLOCK(sh); 665 V_pf_status.scounters[SCNT_SRC_NODE_INSERT]++; 666 V_pf_status.src_nodes++; 667 } else { 668 if (rule->max_src_states && 669 (*sn)->states >= rule->max_src_states) { 670 V_pf_status.lcounters[LCNT_SRCSTATES]++; 671 return (-1); 672 } 673 } 674 return (0); 675} 676 677static void 678pf_remove_src_node(struct pf_src_node *src) 679{ 680 struct pf_srchash *sh; 681 682 sh = &V_pf_srchash[pf_hashsrc(&src->addr, src->af)]; 683 PF_HASHROW_LOCK(sh); 684 LIST_REMOVE(src, entry); 685 PF_HASHROW_UNLOCK(sh); 686 687 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 688 V_pf_status.src_nodes--; 689 690 uma_zfree(V_pf_sources_z, src); 691} 692 693/* Data storage structures initialization. */ 694void 695pf_initialize() 696{ 697 struct pf_keyhash *kh; 698 struct pf_idhash *ih; 699 struct pf_srchash *sh; 700 u_int i; 701 702 TUNABLE_ULONG_FETCH("net.pf.states_hashsize", &V_pf_hashsize); 703 if (V_pf_hashsize == 0 || !powerof2(V_pf_hashsize)) 704 V_pf_hashsize = PF_HASHSIZ; 705 TUNABLE_ULONG_FETCH("net.pf.source_nodes_hashsize", &V_pf_srchashsize); 706 if (V_pf_srchashsize == 0 || !powerof2(V_pf_srchashsize)) 707 V_pf_srchashsize = PF_HASHSIZ / 4; 708 709 V_pf_hashseed = arc4random(); 710 711 /* States and state keys storage. */ 712 V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_state), 713 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 714 V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z; 715 uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT); 716 717 V_pf_state_key_z = uma_zcreate("pf state keys", 718 sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL, 719 UMA_ALIGN_PTR, 0); 720 V_pf_keyhash = malloc(V_pf_hashsize * sizeof(struct pf_keyhash), 721 M_PFHASH, M_WAITOK | M_ZERO); 722 V_pf_idhash = malloc(V_pf_hashsize * sizeof(struct pf_idhash), 723 M_PFHASH, M_WAITOK | M_ZERO); 724 V_pf_hashmask = V_pf_hashsize - 1; 725 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask; 726 i++, kh++, ih++) { 727 mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF); 728 mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF); 729 } 730 731 /* Source nodes. */ 732 V_pf_sources_z = uma_zcreate("pf source nodes", 733 sizeof(struct pf_src_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 734 0); 735 V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z; 736 uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT); 737 V_pf_srchash = malloc(V_pf_srchashsize * sizeof(struct pf_srchash), 738 M_PFHASH, M_WAITOK|M_ZERO); 739 V_pf_srchashmask = V_pf_srchashsize - 1; 740 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) 741 mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF); 742 743 /* ALTQ */ 744 TAILQ_INIT(&V_pf_altqs[0]); 745 TAILQ_INIT(&V_pf_altqs[1]); 746 TAILQ_INIT(&V_pf_pabuf); 747 V_pf_altqs_active = &V_pf_altqs[0]; 748 V_pf_altqs_inactive = &V_pf_altqs[1]; 749 750 /* Mbuf tags */ 751 V_pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) + 752 sizeof(struct pf_mtag), NULL, NULL, pf_mtag_init, NULL, 753 UMA_ALIGN_PTR, 0); 754 755 /* Send & overload+flush queues. */ 756 STAILQ_INIT(&V_pf_sendqueue); 757 SLIST_INIT(&V_pf_overloadqueue); 758 TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, &V_pf_overloadqueue); 759 mtx_init(&pf_sendqueue_mtx, "pf send queue", NULL, MTX_DEF); 760 mtx_init(&pf_overloadqueue_mtx, "pf overload/flush queue", NULL, 761 MTX_DEF); 762 763 /* Unlinked, but may be referenced rules. */ 764 TAILQ_INIT(&V_pf_unlinked_rules); 765 mtx_init(&pf_unlnkdrules_mtx, "pf unlinked rules", NULL, MTX_DEF); 766} 767 768void 769pf_cleanup() 770{ 771 struct pf_keyhash *kh; 772 struct pf_idhash *ih; 773 struct pf_srchash *sh; 774 struct pf_send_entry *pfse, *next; 775 u_int i; 776 777 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= V_pf_hashmask; 778 i++, kh++, ih++) { 779 KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty", 780 __func__)); 781 KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty", 782 __func__)); 783 mtx_destroy(&kh->lock); 784 mtx_destroy(&ih->lock); 785 } 786 free(V_pf_keyhash, M_PFHASH); 787 free(V_pf_idhash, M_PFHASH); 788 789 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) { 790 KASSERT(LIST_EMPTY(&sh->nodes), 791 ("%s: source node hash not empty", __func__)); 792 mtx_destroy(&sh->lock); 793 } 794 free(V_pf_srchash, M_PFHASH); 795 796 STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) { 797 m_freem(pfse->pfse_m); 798 free(pfse, M_PFTEMP); 799 } 800 801 mtx_destroy(&pf_sendqueue_mtx); 802 mtx_destroy(&pf_overloadqueue_mtx); 803 mtx_destroy(&pf_unlnkdrules_mtx); 804 805 uma_zdestroy(V_pf_mtag_z); 806 uma_zdestroy(V_pf_sources_z); 807 uma_zdestroy(V_pf_state_z); 808 uma_zdestroy(V_pf_state_key_z); 809} 810 811static int 812pf_mtag_init(void *mem, int size, int how) 813{ 814 struct m_tag *t; 815 816 t = (struct m_tag *)mem; 817 t->m_tag_cookie = MTAG_ABI_COMPAT; 818 t->m_tag_id = PACKET_TAG_PF; 819 t->m_tag_len = sizeof(struct pf_mtag); 820 t->m_tag_free = pf_mtag_free; 821 822 return (0); 823} 824 825static void 826pf_mtag_free(struct m_tag *t) 827{ 828 829 uma_zfree(V_pf_mtag_z, t); 830} 831 832struct pf_mtag * 833pf_get_mtag(struct mbuf *m) 834{ 835 struct m_tag *mtag; 836 837 if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL) 838 return ((struct pf_mtag *)(mtag + 1)); 839 840 mtag = uma_zalloc(V_pf_mtag_z, M_NOWAIT); 841 if (mtag == NULL) 842 return (NULL); 843 bzero(mtag + 1, sizeof(struct pf_mtag)); 844 m_tag_prepend(m, mtag); 845 846 return ((struct pf_mtag *)(mtag + 1)); 847} 848 849static int 850pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks, 851 struct pf_state *s) 852{ 853 struct pf_keyhash *kh; 854 struct pf_state_key *sk, *cur; 855 struct pf_state *si, *olds = NULL; 856 int idx; 857 858 KASSERT(s->refs == 0, ("%s: state not pristine", __func__)); 859 KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__)); 860 KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__)); 861 862 /* 863 * First run: start with wire key. 864 */ 865 sk = skw; 866 idx = PF_SK_WIRE; 867 868keyattach: 869 kh = &V_pf_keyhash[pf_hashkey(sk)]; 870 871 PF_HASHROW_LOCK(kh); 872 LIST_FOREACH(cur, &kh->keys, entry) 873 if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0) 874 break; 875 876 if (cur != NULL) { 877 /* Key exists. Check for same kif, if none, add to key. */ 878 TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) { 879 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)]; 880 881 PF_HASHROW_LOCK(ih); 882 if (si->kif == s->kif && 883 si->direction == s->direction) { 884 if (sk->proto == IPPROTO_TCP && 885 si->src.state >= TCPS_FIN_WAIT_2 && 886 si->dst.state >= TCPS_FIN_WAIT_2) { 887 si->src.state = si->dst.state = 888 TCPS_CLOSED; 889 /* Unlink later or cur can go away. */ 890 pf_ref_state(si); 891 olds = si; 892 } else { 893 if (V_pf_status.debug >= PF_DEBUG_MISC) { 894 printf("pf: %s key attach " 895 "failed on %s: ", 896 (idx == PF_SK_WIRE) ? 897 "wire" : "stack", 898 s->kif->pfik_name); 899 pf_print_state_parts(s, 900 (idx == PF_SK_WIRE) ? 901 sk : NULL, 902 (idx == PF_SK_STACK) ? 903 sk : NULL); 904 printf(", existing: "); 905 pf_print_state_parts(si, 906 (idx == PF_SK_WIRE) ? 907 sk : NULL, 908 (idx == PF_SK_STACK) ? 909 sk : NULL); 910 printf("\n"); 911 } 912 PF_HASHROW_UNLOCK(ih); 913 PF_HASHROW_UNLOCK(kh); 914 uma_zfree(V_pf_state_key_z, sk); 915 if (idx == PF_SK_STACK) 916 pf_detach_state(s); 917 return (-1); /* collision! */ 918 } 919 } 920 PF_HASHROW_UNLOCK(ih); 921 } 922 uma_zfree(V_pf_state_key_z, sk); 923 s->key[idx] = cur; 924 } else { 925 LIST_INSERT_HEAD(&kh->keys, sk, entry); 926 s->key[idx] = sk; 927 } 928 929stateattach: 930 /* List is sorted, if-bound states before floating. */ 931 if (s->kif == V_pfi_all) 932 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]); 933 else 934 TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]); 935 936 /* 937 * Attach done. See how should we (or should not?) 938 * attach a second key. 939 */ 940 if (sks == skw) { 941 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE]; 942 idx = PF_SK_STACK; 943 sks = NULL; 944 goto stateattach; 945 } else if (sks != NULL) { 946 PF_HASHROW_UNLOCK(kh); 947 if (olds) { 948 pf_unlink_state(olds, 0); 949 pf_release_state(olds); 950 olds = NULL; 951 } 952 /* 953 * Continue attaching with stack key. 954 */ 955 sk = sks; 956 idx = PF_SK_STACK; 957 sks = NULL; 958 goto keyattach; 959 } else 960 PF_HASHROW_UNLOCK(kh); 961 962 if (olds) { 963 pf_unlink_state(olds, 0); 964 pf_release_state(olds); 965 } 966 967 KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL, 968 ("%s failure", __func__)); 969 970 return (0); 971} 972 973static void 974pf_detach_state(struct pf_state *s) 975{ 976 struct pf_state_key *sks = s->key[PF_SK_STACK]; 977 struct pf_keyhash *kh; 978 979 if (sks != NULL) { 980 kh = &V_pf_keyhash[pf_hashkey(sks)]; 981 PF_HASHROW_LOCK(kh); 982 if (s->key[PF_SK_STACK] != NULL) 983 pf_state_key_detach(s, PF_SK_STACK); 984 /* 985 * If both point to same key, then we are done. 986 */ 987 if (sks == s->key[PF_SK_WIRE]) { 988 pf_state_key_detach(s, PF_SK_WIRE); 989 PF_HASHROW_UNLOCK(kh); 990 return; 991 } 992 PF_HASHROW_UNLOCK(kh); 993 } 994 995 if (s->key[PF_SK_WIRE] != NULL) { 996 kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])]; 997 PF_HASHROW_LOCK(kh); 998 if (s->key[PF_SK_WIRE] != NULL) 999 pf_state_key_detach(s, PF_SK_WIRE); 1000 PF_HASHROW_UNLOCK(kh); 1001 } 1002} 1003 1004static void 1005pf_state_key_detach(struct pf_state *s, int idx) 1006{ 1007 struct pf_state_key *sk = s->key[idx]; 1008#ifdef INVARIANTS 1009 struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)]; 1010 1011 PF_HASHROW_ASSERT(kh); 1012#endif 1013 TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]); 1014 s->key[idx] = NULL; 1015 1016 if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) { 1017 LIST_REMOVE(sk, entry); 1018 uma_zfree(V_pf_state_key_z, sk); 1019 } 1020} 1021 1022static int 1023pf_state_key_ctor(void *mem, int size, void *arg, int flags) 1024{ 1025 struct pf_state_key *sk = mem; 1026 1027 bzero(sk, sizeof(struct pf_state_key_cmp)); 1028 TAILQ_INIT(&sk->states[PF_SK_WIRE]); 1029 TAILQ_INIT(&sk->states[PF_SK_STACK]); 1030 1031 return (0); 1032} 1033 1034struct pf_state_key * 1035pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr, 1036 struct pf_addr *daddr, u_int16_t sport, u_int16_t dport) 1037{ 1038 struct pf_state_key *sk; 1039 1040 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT); 1041 if (sk == NULL) 1042 return (NULL); 1043 1044 PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af); 1045 PF_ACPY(&sk->addr[pd->didx], daddr, pd->af); 1046 sk->port[pd->sidx] = sport; 1047 sk->port[pd->didx] = dport; 1048 sk->proto = pd->proto; 1049 sk->af = pd->af; 1050 1051 return (sk); 1052} 1053 1054struct pf_state_key * 1055pf_state_key_clone(struct pf_state_key *orig) 1056{ 1057 struct pf_state_key *sk; 1058 1059 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT); 1060 if (sk == NULL) 1061 return (NULL); 1062 1063 bcopy(orig, sk, sizeof(struct pf_state_key_cmp)); 1064 1065 return (sk); 1066} 1067 1068int 1069pf_state_insert(struct pfi_kif *kif, struct pf_state_key *skw, 1070 struct pf_state_key *sks, struct pf_state *s) 1071{ 1072 struct pf_idhash *ih; 1073 struct pf_state *cur; 1074 1075 KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]), 1076 ("%s: sks not pristine", __func__)); 1077 KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]), 1078 ("%s: skw not pristine", __func__)); 1079 KASSERT(s->refs == 0, ("%s: state not pristine", __func__)); 1080 1081 s->kif = kif; 1082 1083 if (pf_state_key_attach(skw, sks, s)) 1084 return (-1); 1085 1086 if (s->id == 0 && s->creatorid == 0) { 1087 /* XXX: should be atomic, but probability of collision low */ 1088 if ((s->id = V_pf_stateid[curcpu]++) == PFID_MAXID) 1089 V_pf_stateid[curcpu] = 1; 1090 s->id |= (uint64_t )curcpu << PFID_CPUSHIFT; 1091 s->id = htobe64(s->id); 1092 s->creatorid = V_pf_status.hostid; 1093 } 1094 1095 ih = &V_pf_idhash[PF_IDHASH(s)]; 1096 PF_HASHROW_LOCK(ih); 1097 LIST_FOREACH(cur, &ih->states, entry) 1098 if (cur->id == s->id && cur->creatorid == s->creatorid) 1099 break; 1100 1101 if (cur != NULL) { 1102 PF_HASHROW_UNLOCK(ih); 1103 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1104 printf("pf: state insert failed: " 1105 "id: %016llx creatorid: %08x", 1106 (unsigned long long)be64toh(s->id), 1107 ntohl(s->creatorid)); 1108 printf("\n"); 1109 } 1110 pf_detach_state(s); 1111 return (-1); 1112 } 1113 LIST_INSERT_HEAD(&ih->states, s, entry); 1114 /* One for keys, one for ID hash. */ 1115 refcount_init(&s->refs, 2); 1116 1117 V_pf_status.fcounters[FCNT_STATE_INSERT]++; 1118 if (pfsync_insert_state_ptr != NULL) 1119 pfsync_insert_state_ptr(s); 1120 1121 /* Returns locked. */ 1122 return (0); 1123} 1124 1125/* 1126 * Find state by ID: returns with locked row on success. 1127 */ 1128struct pf_state * 1129pf_find_state_byid(uint64_t id, uint32_t creatorid) 1130{ 1131 struct pf_idhash *ih; 1132 struct pf_state *s; 1133 1134 V_pf_status.fcounters[FCNT_STATE_SEARCH]++; 1135 1136 ih = &V_pf_idhash[(be64toh(id) % (V_pf_hashmask + 1))]; 1137 1138 PF_HASHROW_LOCK(ih); 1139 LIST_FOREACH(s, &ih->states, entry) 1140 if (s->id == id && s->creatorid == creatorid) 1141 break; 1142 1143 if (s == NULL) 1144 PF_HASHROW_UNLOCK(ih); 1145 1146 return (s); 1147} 1148 1149/* 1150 * Find state by key. 1151 * Returns with ID hash slot locked on success. 1152 */ 1153static struct pf_state * 1154pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir) 1155{ 1156 struct pf_keyhash *kh; 1157 struct pf_state_key *sk; 1158 struct pf_state *s; 1159 int idx; 1160 1161 V_pf_status.fcounters[FCNT_STATE_SEARCH]++; 1162 1163 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)]; 1164 1165 PF_HASHROW_LOCK(kh); 1166 LIST_FOREACH(sk, &kh->keys, entry) 1167 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0) 1168 break; 1169 if (sk == NULL) { 1170 PF_HASHROW_UNLOCK(kh); 1171 return (NULL); 1172 } 1173 1174 idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK); 1175 1176 /* List is sorted, if-bound states before floating ones. */ 1177 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) 1178 if (s->kif == V_pfi_all || s->kif == kif) { 1179 PF_STATE_LOCK(s); 1180 PF_HASHROW_UNLOCK(kh); 1181 if (s->timeout == PFTM_UNLINKED) { 1182 /* 1183 * State is being processed 1184 * by pf_unlink_state() in 1185 * an other thread. 1186 */ 1187 PF_STATE_UNLOCK(s); 1188 return (NULL); 1189 } 1190 return (s); 1191 } 1192 PF_HASHROW_UNLOCK(kh); 1193 1194 return (NULL); 1195} 1196 1197struct pf_state * 1198pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more) 1199{ 1200 struct pf_keyhash *kh; 1201 struct pf_state_key *sk; 1202 struct pf_state *s, *ret = NULL; 1203 int idx, inout = 0; 1204 1205 V_pf_status.fcounters[FCNT_STATE_SEARCH]++; 1206 1207 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)]; 1208 1209 PF_HASHROW_LOCK(kh); 1210 LIST_FOREACH(sk, &kh->keys, entry) 1211 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0) 1212 break; 1213 if (sk == NULL) { 1214 PF_HASHROW_UNLOCK(kh); 1215 return (NULL); 1216 } 1217 switch (dir) { 1218 case PF_IN: 1219 idx = PF_SK_WIRE; 1220 break; 1221 case PF_OUT: 1222 idx = PF_SK_STACK; 1223 break; 1224 case PF_INOUT: 1225 idx = PF_SK_WIRE; 1226 inout = 1; 1227 break; 1228 default: 1229 panic("%s: dir %u", __func__, dir); 1230 } 1231second_run: 1232 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) { 1233 if (more == NULL) { 1234 PF_HASHROW_UNLOCK(kh); 1235 return (s); 1236 } 1237 1238 if (ret) 1239 (*more)++; 1240 else 1241 ret = s; 1242 } 1243 if (inout == 1) { 1244 inout = 0; 1245 idx = PF_SK_STACK; 1246 goto second_run; 1247 } 1248 PF_HASHROW_UNLOCK(kh); 1249 1250 return (ret); 1251} 1252 1253/* END state table stuff */ 1254 1255static void 1256pf_send(struct pf_send_entry *pfse) 1257{ 1258 1259 PF_SENDQ_LOCK(); 1260 STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next); 1261 PF_SENDQ_UNLOCK(); 1262 swi_sched(V_pf_swi_cookie, 0); 1263} 1264 1265void 1266pf_intr(void *v) 1267{ 1268 struct pf_send_head queue; 1269 struct pf_send_entry *pfse, *next; 1270 1271 CURVNET_SET((struct vnet *)v); 1272 1273 PF_SENDQ_LOCK(); 1274 queue = V_pf_sendqueue; 1275 STAILQ_INIT(&V_pf_sendqueue); 1276 PF_SENDQ_UNLOCK(); 1277 1278 STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) { 1279 switch (pfse->pfse_type) { 1280#ifdef INET 1281 case PFSE_IP: 1282 ip_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL); 1283 break; 1284 case PFSE_ICMP: 1285 icmp_error(pfse->pfse_m, pfse->pfse_icmp_type, 1286 pfse->pfse_icmp_code, 0, pfse->pfse_icmp_mtu); 1287 break; 1288#endif /* INET */ 1289#ifdef INET6 1290 case PFSE_IP6: 1291 ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL, NULL, 1292 NULL); 1293 break; 1294 case PFSE_ICMP6: 1295 icmp6_error(pfse->pfse_m, pfse->pfse_icmp_type, 1296 pfse->pfse_icmp_code, pfse->pfse_icmp_mtu); 1297 break; 1298#endif /* INET6 */ 1299 default: 1300 panic("%s: unknown type", __func__); 1301 } 1302 free(pfse, M_PFTEMP); 1303 } 1304 CURVNET_RESTORE(); 1305} 1306 1307void 1308pf_purge_thread(void *v) 1309{ 1310 u_int idx = 0; 1311 1312 CURVNET_SET((struct vnet *)v); 1313 1314 for (;;) { 1315 PF_RULES_RLOCK(); 1316 rw_sleep(pf_purge_thread, &pf_rules_lock, 0, "pftm", hz / 10); 1317 1318 if (V_pf_end_threads) { 1319 /* 1320 * To cleanse up all kifs and rules we need 1321 * two runs: first one clears reference flags, 1322 * then pf_purge_expired_states() doesn't 1323 * raise them, and then second run frees. 1324 */ 1325 PF_RULES_RUNLOCK(); 1326 pf_purge_unlinked_rules(); 1327 pfi_kif_purge(); 1328 1329 /* 1330 * Now purge everything. 1331 */ 1332 pf_purge_expired_states(0, V_pf_hashmask); 1333 pf_purge_expired_fragments(); 1334 pf_purge_expired_src_nodes(); 1335 1336 /* 1337 * Now all kifs & rules should be unreferenced, 1338 * thus should be successfully freed. 1339 */ 1340 pf_purge_unlinked_rules(); 1341 pfi_kif_purge(); 1342 1343 /* 1344 * Announce success and exit. 1345 */ 1346 PF_RULES_RLOCK(); 1347 V_pf_end_threads++; 1348 PF_RULES_RUNLOCK(); 1349 wakeup(pf_purge_thread); 1350 kproc_exit(0); 1351 } 1352 PF_RULES_RUNLOCK(); 1353 1354 /* Process 1/interval fraction of the state table every run. */ 1355 idx = pf_purge_expired_states(idx, V_pf_hashmask / 1356 (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10)); 1357 1358 /* Purge other expired types every PFTM_INTERVAL seconds. */ 1359 if (idx == 0) { 1360 /* 1361 * Order is important: 1362 * - states and src nodes reference rules 1363 * - states and rules reference kifs 1364 */ 1365 pf_purge_expired_fragments(); 1366 pf_purge_expired_src_nodes(); 1367 pf_purge_unlinked_rules(); 1368 pfi_kif_purge(); 1369 } 1370 } 1371 /* not reached */ 1372 CURVNET_RESTORE(); 1373} 1374 1375u_int32_t 1376pf_state_expires(const struct pf_state *state) 1377{ 1378 u_int32_t timeout; 1379 u_int32_t start; 1380 u_int32_t end; 1381 u_int32_t states; 1382 1383 /* handle all PFTM_* > PFTM_MAX here */ 1384 if (state->timeout == PFTM_PURGE) 1385 return (time_uptime); 1386 if (state->timeout == PFTM_UNTIL_PACKET) 1387 return (0); 1388 KASSERT(state->timeout != PFTM_UNLINKED, 1389 ("pf_state_expires: timeout == PFTM_UNLINKED")); 1390 KASSERT((state->timeout < PFTM_MAX), 1391 ("pf_state_expires: timeout > PFTM_MAX")); 1392 timeout = state->rule.ptr->timeout[state->timeout]; 1393 if (!timeout) 1394 timeout = V_pf_default_rule.timeout[state->timeout]; 1395 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START]; 1396 if (start) { 1397 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END]; 1398 states = state->rule.ptr->states_cur; /* XXXGL */ 1399 } else { 1400 start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START]; 1401 end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END]; 1402 states = V_pf_status.states; 1403 } 1404 if (end && states > start && start < end) { 1405 if (states < end) 1406 return (state->expire + timeout * (end - states) / 1407 (end - start)); 1408 else 1409 return (time_uptime); 1410 } 1411 return (state->expire + timeout); 1412} 1413 1414void 1415pf_purge_expired_src_nodes() 1416{ 1417 struct pf_srchash *sh; 1418 struct pf_src_node *cur, *next; 1419 int i; 1420 1421 for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask; i++, sh++) { 1422 PF_HASHROW_LOCK(sh); 1423 LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next) 1424 if (cur->states <= 0 && cur->expire <= time_uptime) { 1425 if (cur->rule.ptr != NULL) 1426 cur->rule.ptr->src_nodes--; 1427 LIST_REMOVE(cur, entry); 1428 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; 1429 V_pf_status.src_nodes--; 1430 uma_zfree(V_pf_sources_z, cur); 1431 } else if (cur->rule.ptr != NULL) 1432 cur->rule.ptr->rule_flag |= PFRULE_REFS; 1433 PF_HASHROW_UNLOCK(sh); 1434 } 1435} 1436 1437static void 1438pf_src_tree_remove_state(struct pf_state *s) 1439{ 1440 u_int32_t timeout; 1441 1442 if (s->src_node != NULL) { 1443 if (s->src.tcp_est) 1444 --s->src_node->conn; 1445 if (--s->src_node->states <= 0) { 1446 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1447 if (!timeout) 1448 timeout = 1449 V_pf_default_rule.timeout[PFTM_SRC_NODE]; 1450 s->src_node->expire = time_uptime + timeout; 1451 } 1452 } 1453 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) { 1454 if (--s->nat_src_node->states <= 0) { 1455 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE]; 1456 if (!timeout) 1457 timeout = 1458 V_pf_default_rule.timeout[PFTM_SRC_NODE]; 1459 s->nat_src_node->expire = time_uptime + timeout; 1460 } 1461 } 1462 s->src_node = s->nat_src_node = NULL; 1463} 1464 1465/* 1466 * Unlink and potentilly free a state. Function may be 1467 * called with ID hash row locked, but always returns 1468 * unlocked, since it needs to go through key hash locking. 1469 */ 1470int 1471pf_unlink_state(struct pf_state *s, u_int flags) 1472{ 1473 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)]; 1474 1475 if ((flags & PF_ENTER_LOCKED) == 0) 1476 PF_HASHROW_LOCK(ih); 1477 else 1478 PF_HASHROW_ASSERT(ih); 1479 1480 if (s->timeout == PFTM_UNLINKED) { 1481 /* 1482 * State is being processed 1483 * by pf_unlink_state() in 1484 * an other thread. 1485 */ 1486 PF_HASHROW_UNLOCK(ih); 1487 return (0); /* XXXGL: undefined actually */ 1488 } 1489 1490 if (s->src.state == PF_TCPS_PROXY_DST) { 1491 /* XXX wire key the right one? */ 1492 pf_send_tcp(NULL, s->rule.ptr, s->key[PF_SK_WIRE]->af, 1493 &s->key[PF_SK_WIRE]->addr[1], 1494 &s->key[PF_SK_WIRE]->addr[0], 1495 s->key[PF_SK_WIRE]->port[1], 1496 s->key[PF_SK_WIRE]->port[0], 1497 s->src.seqhi, s->src.seqlo + 1, 1498 TH_RST|TH_ACK, 0, 0, 0, 1, s->tag, NULL); 1499 } 1500 1501 LIST_REMOVE(s, entry); 1502 pf_src_tree_remove_state(s); 1503 1504 if (pfsync_delete_state_ptr != NULL) 1505 pfsync_delete_state_ptr(s); 1506 1507 s->timeout = PFTM_UNLINKED; 1508 1509 PF_HASHROW_UNLOCK(ih); 1510 1511 pf_detach_state(s); 1512 refcount_release(&s->refs); 1513 1514 return (pf_release_state(s)); 1515} 1516 1517void 1518pf_free_state(struct pf_state *cur) 1519{ 1520 1521 KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur)); 1522 KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__, 1523 cur->timeout)); 1524 --cur->rule.ptr->states_cur; 1525 if (cur->nat_rule.ptr != NULL) 1526 --cur->nat_rule.ptr->states_cur; 1527 if (cur->anchor.ptr != NULL) 1528 --cur->anchor.ptr->states_cur; 1529 pf_normalize_tcp_cleanup(cur); 1530 uma_zfree(V_pf_state_z, cur); 1531 V_pf_status.fcounters[FCNT_STATE_REMOVALS]++; 1532} 1533 1534/* 1535 * Called only from pf_purge_thread(), thus serialized. 1536 */ 1537static u_int 1538pf_purge_expired_states(u_int i, int maxcheck) 1539{ 1540 struct pf_idhash *ih; 1541 struct pf_state *s; 1542 1543 V_pf_status.states = uma_zone_get_cur(V_pf_state_z); 1544 1545 /* 1546 * Go through hash and unlink states that expire now. 1547 */ 1548 while (maxcheck > 0) { 1549 1550 ih = &V_pf_idhash[i]; 1551relock: 1552 PF_HASHROW_LOCK(ih); 1553 LIST_FOREACH(s, &ih->states, entry) { 1554 if (pf_state_expires(s) <= time_uptime) { 1555 V_pf_status.states -= 1556 pf_unlink_state(s, PF_ENTER_LOCKED); 1557 goto relock; 1558 } 1559 s->rule.ptr->rule_flag |= PFRULE_REFS; 1560 if (s->nat_rule.ptr != NULL) 1561 s->nat_rule.ptr->rule_flag |= PFRULE_REFS; 1562 if (s->anchor.ptr != NULL) 1563 s->anchor.ptr->rule_flag |= PFRULE_REFS; 1564 s->kif->pfik_flags |= PFI_IFLAG_REFS; 1565 if (s->rt_kif) 1566 s->rt_kif->pfik_flags |= PFI_IFLAG_REFS; 1567 } 1568 PF_HASHROW_UNLOCK(ih); 1569 1570 /* Return when we hit end of hash. */ 1571 if (++i > V_pf_hashmask) { 1572 V_pf_status.states = uma_zone_get_cur(V_pf_state_z); 1573 return (0); 1574 } 1575 1576 maxcheck--; 1577 } 1578 1579 V_pf_status.states = uma_zone_get_cur(V_pf_state_z); 1580 1581 return (i); 1582} 1583 1584static void 1585pf_purge_unlinked_rules() 1586{ 1587 struct pf_rulequeue tmpq; 1588 struct pf_rule *r, *r1; 1589 1590 /* 1591 * If we have overloading task pending, then we'd 1592 * better skip purging this time. There is a tiny 1593 * probability that overloading task references 1594 * an already unlinked rule. 1595 */ 1596 PF_OVERLOADQ_LOCK(); 1597 if (!SLIST_EMPTY(&V_pf_overloadqueue)) { 1598 PF_OVERLOADQ_UNLOCK(); 1599 return; 1600 } 1601 PF_OVERLOADQ_UNLOCK(); 1602 1603 /* 1604 * Do naive mark-and-sweep garbage collecting of old rules. 1605 * Reference flag is raised by pf_purge_expired_states() 1606 * and pf_purge_expired_src_nodes(). 1607 * 1608 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK, 1609 * use a temporary queue. 1610 */ 1611 TAILQ_INIT(&tmpq); 1612 PF_UNLNKDRULES_LOCK(); 1613 TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) { 1614 if (!(r->rule_flag & PFRULE_REFS)) { 1615 TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries); 1616 TAILQ_INSERT_TAIL(&tmpq, r, entries); 1617 } else 1618 r->rule_flag &= ~PFRULE_REFS; 1619 } 1620 PF_UNLNKDRULES_UNLOCK(); 1621 1622 if (!TAILQ_EMPTY(&tmpq)) { 1623 PF_RULES_WLOCK(); 1624 TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) { 1625 TAILQ_REMOVE(&tmpq, r, entries); 1626 pf_free_rule(r); 1627 } 1628 PF_RULES_WUNLOCK(); 1629 } 1630} 1631 1632void 1633pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af) 1634{ 1635 switch (af) { 1636#ifdef INET 1637 case AF_INET: { 1638 u_int32_t a = ntohl(addr->addr32[0]); 1639 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255, 1640 (a>>8)&255, a&255); 1641 if (p) { 1642 p = ntohs(p); 1643 printf(":%u", p); 1644 } 1645 break; 1646 } 1647#endif /* INET */ 1648#ifdef INET6 1649 case AF_INET6: { 1650 u_int16_t b; 1651 u_int8_t i, curstart, curend, maxstart, maxend; 1652 curstart = curend = maxstart = maxend = 255; 1653 for (i = 0; i < 8; i++) { 1654 if (!addr->addr16[i]) { 1655 if (curstart == 255) 1656 curstart = i; 1657 curend = i; 1658 } else { 1659 if ((curend - curstart) > 1660 (maxend - maxstart)) { 1661 maxstart = curstart; 1662 maxend = curend; 1663 } 1664 curstart = curend = 255; 1665 } 1666 } 1667 if ((curend - curstart) > 1668 (maxend - maxstart)) { 1669 maxstart = curstart; 1670 maxend = curend; 1671 } 1672 for (i = 0; i < 8; i++) { 1673 if (i >= maxstart && i <= maxend) { 1674 if (i == 0) 1675 printf(":"); 1676 if (i == maxend) 1677 printf(":"); 1678 } else { 1679 b = ntohs(addr->addr16[i]); 1680 printf("%x", b); 1681 if (i < 7) 1682 printf(":"); 1683 } 1684 } 1685 if (p) { 1686 p = ntohs(p); 1687 printf("[%u]", p); 1688 } 1689 break; 1690 } 1691#endif /* INET6 */ 1692 } 1693} 1694 1695void 1696pf_print_state(struct pf_state *s) 1697{ 1698 pf_print_state_parts(s, NULL, NULL); 1699} 1700 1701static void 1702pf_print_state_parts(struct pf_state *s, 1703 struct pf_state_key *skwp, struct pf_state_key *sksp) 1704{ 1705 struct pf_state_key *skw, *sks; 1706 u_int8_t proto, dir; 1707 1708 /* Do our best to fill these, but they're skipped if NULL */ 1709 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL); 1710 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL); 1711 proto = skw ? skw->proto : (sks ? sks->proto : 0); 1712 dir = s ? s->direction : 0; 1713 1714 switch (proto) { 1715 case IPPROTO_IPV4: 1716 printf("IPv4"); 1717 break; 1718 case IPPROTO_IPV6: 1719 printf("IPv6"); 1720 break; 1721 case IPPROTO_TCP: 1722 printf("TCP"); 1723 break; 1724 case IPPROTO_UDP: 1725 printf("UDP"); 1726 break; 1727 case IPPROTO_ICMP: 1728 printf("ICMP"); 1729 break; 1730 case IPPROTO_ICMPV6: 1731 printf("ICMPv6"); 1732 break; 1733 default: 1734 printf("%u", skw->proto); 1735 break; 1736 } 1737 switch (dir) { 1738 case PF_IN: 1739 printf(" in"); 1740 break; 1741 case PF_OUT: 1742 printf(" out"); 1743 break; 1744 } 1745 if (skw) { 1746 printf(" wire: "); 1747 pf_print_host(&skw->addr[0], skw->port[0], skw->af); 1748 printf(" "); 1749 pf_print_host(&skw->addr[1], skw->port[1], skw->af); 1750 } 1751 if (sks) { 1752 printf(" stack: "); 1753 if (sks != skw) { 1754 pf_print_host(&sks->addr[0], sks->port[0], sks->af); 1755 printf(" "); 1756 pf_print_host(&sks->addr[1], sks->port[1], sks->af); 1757 } else 1758 printf("-"); 1759 } 1760 if (s) { 1761 if (proto == IPPROTO_TCP) { 1762 printf(" [lo=%u high=%u win=%u modulator=%u", 1763 s->src.seqlo, s->src.seqhi, 1764 s->src.max_win, s->src.seqdiff); 1765 if (s->src.wscale && s->dst.wscale) 1766 printf(" wscale=%u", 1767 s->src.wscale & PF_WSCALE_MASK); 1768 printf("]"); 1769 printf(" [lo=%u high=%u win=%u modulator=%u", 1770 s->dst.seqlo, s->dst.seqhi, 1771 s->dst.max_win, s->dst.seqdiff); 1772 if (s->src.wscale && s->dst.wscale) 1773 printf(" wscale=%u", 1774 s->dst.wscale & PF_WSCALE_MASK); 1775 printf("]"); 1776 } 1777 printf(" %u:%u", s->src.state, s->dst.state); 1778 } 1779} 1780 1781void 1782pf_print_flags(u_int8_t f) 1783{ 1784 if (f) 1785 printf(" "); 1786 if (f & TH_FIN) 1787 printf("F"); 1788 if (f & TH_SYN) 1789 printf("S"); 1790 if (f & TH_RST) 1791 printf("R"); 1792 if (f & TH_PUSH) 1793 printf("P"); 1794 if (f & TH_ACK) 1795 printf("A"); 1796 if (f & TH_URG) 1797 printf("U"); 1798 if (f & TH_ECE) 1799 printf("E"); 1800 if (f & TH_CWR) 1801 printf("W"); 1802} 1803 1804#define PF_SET_SKIP_STEPS(i) \ 1805 do { \ 1806 while (head[i] != cur) { \ 1807 head[i]->skip[i].ptr = cur; \ 1808 head[i] = TAILQ_NEXT(head[i], entries); \ 1809 } \ 1810 } while (0) 1811 1812void 1813pf_calc_skip_steps(struct pf_rulequeue *rules) 1814{ 1815 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT]; 1816 int i; 1817 1818 cur = TAILQ_FIRST(rules); 1819 prev = cur; 1820 for (i = 0; i < PF_SKIP_COUNT; ++i) 1821 head[i] = cur; 1822 while (cur != NULL) { 1823 1824 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) 1825 PF_SET_SKIP_STEPS(PF_SKIP_IFP); 1826 if (cur->direction != prev->direction) 1827 PF_SET_SKIP_STEPS(PF_SKIP_DIR); 1828 if (cur->af != prev->af) 1829 PF_SET_SKIP_STEPS(PF_SKIP_AF); 1830 if (cur->proto != prev->proto) 1831 PF_SET_SKIP_STEPS(PF_SKIP_PROTO); 1832 if (cur->src.neg != prev->src.neg || 1833 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr)) 1834 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR); 1835 if (cur->src.port[0] != prev->src.port[0] || 1836 cur->src.port[1] != prev->src.port[1] || 1837 cur->src.port_op != prev->src.port_op) 1838 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT); 1839 if (cur->dst.neg != prev->dst.neg || 1840 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr)) 1841 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR); 1842 if (cur->dst.port[0] != prev->dst.port[0] || 1843 cur->dst.port[1] != prev->dst.port[1] || 1844 cur->dst.port_op != prev->dst.port_op) 1845 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT); 1846 1847 prev = cur; 1848 cur = TAILQ_NEXT(cur, entries); 1849 } 1850 for (i = 0; i < PF_SKIP_COUNT; ++i) 1851 PF_SET_SKIP_STEPS(i); 1852} 1853 1854static int 1855pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2) 1856{ 1857 if (aw1->type != aw2->type) 1858 return (1); 1859 switch (aw1->type) { 1860 case PF_ADDR_ADDRMASK: 1861 case PF_ADDR_RANGE: 1862 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0)) 1863 return (1); 1864 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0)) 1865 return (1); 1866 return (0); 1867 case PF_ADDR_DYNIFTL: 1868 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt); 1869 case PF_ADDR_NOROUTE: 1870 case PF_ADDR_URPFFAILED: 1871 return (0); 1872 case PF_ADDR_TABLE: 1873 return (aw1->p.tbl != aw2->p.tbl); 1874 default: 1875 printf("invalid address type: %d\n", aw1->type); 1876 return (1); 1877 } 1878} 1879 1880u_int16_t 1881pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp) 1882{ 1883 u_int32_t l; 1884 1885 if (udp && !cksum) 1886 return (0x0000); 1887 l = cksum + old - new; 1888 l = (l >> 16) + (l & 65535); 1889 l = l & 65535; 1890 if (udp && !l) 1891 return (0xFFFF); 1892 return (l); 1893} 1894 1895static void 1896pf_change_ap(struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc, 1897 struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af) 1898{ 1899 struct pf_addr ao; 1900 u_int16_t po = *p; 1901 1902 PF_ACPY(&ao, a, af); 1903 PF_ACPY(a, an, af); 1904 1905 *p = pn; 1906 1907 switch (af) { 1908#ifdef INET 1909 case AF_INET: 1910 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 1911 ao.addr16[0], an->addr16[0], 0), 1912 ao.addr16[1], an->addr16[1], 0); 1913 *p = pn; 1914 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1915 ao.addr16[0], an->addr16[0], u), 1916 ao.addr16[1], an->addr16[1], u), 1917 po, pn, u); 1918 break; 1919#endif /* INET */ 1920#ifdef INET6 1921 case AF_INET6: 1922 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1923 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1924 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, 1925 ao.addr16[0], an->addr16[0], u), 1926 ao.addr16[1], an->addr16[1], u), 1927 ao.addr16[2], an->addr16[2], u), 1928 ao.addr16[3], an->addr16[3], u), 1929 ao.addr16[4], an->addr16[4], u), 1930 ao.addr16[5], an->addr16[5], u), 1931 ao.addr16[6], an->addr16[6], u), 1932 ao.addr16[7], an->addr16[7], u), 1933 po, pn, u); 1934 break; 1935#endif /* INET6 */ 1936 } 1937} 1938 1939 1940/* Changes a u_int32_t. Uses a void * so there are no align restrictions */ 1941void 1942pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u) 1943{ 1944 u_int32_t ao; 1945 1946 memcpy(&ao, a, sizeof(ao)); 1947 memcpy(a, &an, sizeof(u_int32_t)); 1948 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u), 1949 ao % 65536, an % 65536, u); 1950} 1951 1952#ifdef INET6 1953static void 1954pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u) 1955{ 1956 struct pf_addr ao; 1957 1958 PF_ACPY(&ao, a, AF_INET6); 1959 PF_ACPY(a, an, AF_INET6); 1960 1961 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1962 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 1963 pf_cksum_fixup(pf_cksum_fixup(*c, 1964 ao.addr16[0], an->addr16[0], u), 1965 ao.addr16[1], an->addr16[1], u), 1966 ao.addr16[2], an->addr16[2], u), 1967 ao.addr16[3], an->addr16[3], u), 1968 ao.addr16[4], an->addr16[4], u), 1969 ao.addr16[5], an->addr16[5], u), 1970 ao.addr16[6], an->addr16[6], u), 1971 ao.addr16[7], an->addr16[7], u); 1972} 1973#endif /* INET6 */ 1974 1975static void 1976pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, 1977 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c, 1978 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af) 1979{ 1980 struct pf_addr oia, ooa; 1981 1982 PF_ACPY(&oia, ia, af); 1983 if (oa) 1984 PF_ACPY(&ooa, oa, af); 1985 1986 /* Change inner protocol port, fix inner protocol checksum. */ 1987 if (ip != NULL) { 1988 u_int16_t oip = *ip; 1989 u_int32_t opc; 1990 1991 if (pc != NULL) 1992 opc = *pc; 1993 *ip = np; 1994 if (pc != NULL) 1995 *pc = pf_cksum_fixup(*pc, oip, *ip, u); 1996 *ic = pf_cksum_fixup(*ic, oip, *ip, 0); 1997 if (pc != NULL) 1998 *ic = pf_cksum_fixup(*ic, opc, *pc, 0); 1999 } 2000 /* Change inner ip address, fix inner ip and icmp checksums. */ 2001 PF_ACPY(ia, na, af); 2002 switch (af) { 2003#ifdef INET 2004 case AF_INET: { 2005 u_int32_t oh2c = *h2c; 2006 2007 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c, 2008 oia.addr16[0], ia->addr16[0], 0), 2009 oia.addr16[1], ia->addr16[1], 0); 2010 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, 2011 oia.addr16[0], ia->addr16[0], 0), 2012 oia.addr16[1], ia->addr16[1], 0); 2013 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0); 2014 break; 2015 } 2016#endif /* INET */ 2017#ifdef INET6 2018 case AF_INET6: 2019 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 2020 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 2021 pf_cksum_fixup(pf_cksum_fixup(*ic, 2022 oia.addr16[0], ia->addr16[0], u), 2023 oia.addr16[1], ia->addr16[1], u), 2024 oia.addr16[2], ia->addr16[2], u), 2025 oia.addr16[3], ia->addr16[3], u), 2026 oia.addr16[4], ia->addr16[4], u), 2027 oia.addr16[5], ia->addr16[5], u), 2028 oia.addr16[6], ia->addr16[6], u), 2029 oia.addr16[7], ia->addr16[7], u); 2030 break; 2031#endif /* INET6 */ 2032 } 2033 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */ 2034 if (oa) { 2035 PF_ACPY(oa, na, af); 2036 switch (af) { 2037#ifdef INET 2038 case AF_INET: 2039 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc, 2040 ooa.addr16[0], oa->addr16[0], 0), 2041 ooa.addr16[1], oa->addr16[1], 0); 2042 break; 2043#endif /* INET */ 2044#ifdef INET6 2045 case AF_INET6: 2046 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 2047 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( 2048 pf_cksum_fixup(pf_cksum_fixup(*ic, 2049 ooa.addr16[0], oa->addr16[0], u), 2050 ooa.addr16[1], oa->addr16[1], u), 2051 ooa.addr16[2], oa->addr16[2], u), 2052 ooa.addr16[3], oa->addr16[3], u), 2053 ooa.addr16[4], oa->addr16[4], u), 2054 ooa.addr16[5], oa->addr16[5], u), 2055 ooa.addr16[6], oa->addr16[6], u), 2056 ooa.addr16[7], oa->addr16[7], u); 2057 break; 2058#endif /* INET6 */ 2059 } 2060 } 2061} 2062 2063 2064/* 2065 * Need to modulate the sequence numbers in the TCP SACK option 2066 * (credits to Krzysztof Pfaff for report and patch) 2067 */ 2068static int 2069pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd, 2070 struct tcphdr *th, struct pf_state_peer *dst) 2071{ 2072 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen; 2073 u_int8_t opts[TCP_MAXOLEN], *opt = opts; 2074 int copyback = 0, i, olen; 2075 struct sackblk sack; 2076 2077#define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2) 2078 if (hlen < TCPOLEN_SACKLEN || 2079 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af)) 2080 return 0; 2081 2082 while (hlen >= TCPOLEN_SACKLEN) { 2083 olen = opt[1]; 2084 switch (*opt) { 2085 case TCPOPT_EOL: /* FALLTHROUGH */ 2086 case TCPOPT_NOP: 2087 opt++; 2088 hlen--; 2089 break; 2090 case TCPOPT_SACK: 2091 if (olen > hlen) 2092 olen = hlen; 2093 if (olen >= TCPOLEN_SACKLEN) { 2094 for (i = 2; i + TCPOLEN_SACK <= olen; 2095 i += TCPOLEN_SACK) { 2096 memcpy(&sack, &opt[i], sizeof(sack)); 2097 pf_change_a(&sack.start, &th->th_sum, 2098 htonl(ntohl(sack.start) - 2099 dst->seqdiff), 0); 2100 pf_change_a(&sack.end, &th->th_sum, 2101 htonl(ntohl(sack.end) - 2102 dst->seqdiff), 0); 2103 memcpy(&opt[i], &sack, sizeof(sack)); 2104 } 2105 copyback = 1; 2106 } 2107 /* FALLTHROUGH */ 2108 default: 2109 if (olen < 2) 2110 olen = 2; 2111 hlen -= olen; 2112 opt += olen; 2113 } 2114 } 2115 2116 if (copyback) 2117 m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts); 2118 return (copyback); 2119} 2120 2121static void 2122pf_send_tcp(struct mbuf *replyto, const struct pf_rule *r, sa_family_t af, 2123 const struct pf_addr *saddr, const struct pf_addr *daddr, 2124 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack, 2125 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag, 2126 u_int16_t rtag, struct ifnet *ifp) 2127{ 2128 struct pf_send_entry *pfse; 2129 struct mbuf *m; 2130 int len, tlen; 2131#ifdef INET 2132 struct ip *h = NULL; 2133#endif /* INET */ 2134#ifdef INET6 2135 struct ip6_hdr *h6 = NULL; 2136#endif /* INET6 */ 2137 struct tcphdr *th; 2138 char *opt; 2139 struct pf_mtag *pf_mtag; 2140 2141 len = 0; 2142 th = NULL; 2143 2144 /* maximum segment size tcp option */ 2145 tlen = sizeof(struct tcphdr); 2146 if (mss) 2147 tlen += 4; 2148 2149 switch (af) { 2150#ifdef INET 2151 case AF_INET: 2152 len = sizeof(struct ip) + tlen; 2153 break; 2154#endif /* INET */ 2155#ifdef INET6 2156 case AF_INET6: 2157 len = sizeof(struct ip6_hdr) + tlen; 2158 break; 2159#endif /* INET6 */ 2160 default: 2161 panic("%s: unsupported af %d", __func__, af); 2162 } 2163 2164 /* Allocate outgoing queue entry, mbuf and mbuf tag. */ 2165 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT); 2166 if (pfse == NULL) 2167 return; 2168 m = m_gethdr(M_NOWAIT, MT_HEADER); 2169 if (m == NULL) { 2170 free(pfse, M_PFTEMP); 2171 return; 2172 } 2173#ifdef MAC 2174 mac_netinet_firewall_send(m); 2175#endif 2176 if ((pf_mtag = pf_get_mtag(m)) == NULL) { 2177 free(pfse, M_PFTEMP); 2178 m_freem(m); 2179 return; 2180 } 2181 if (tag) 2182 m->m_flags |= M_SKIP_FIREWALL; 2183 pf_mtag->tag = rtag; 2184 2185 if (r != NULL && r->rtableid >= 0) 2186 M_SETFIB(m, r->rtableid); 2187 2188#ifdef ALTQ 2189 if (r != NULL && r->qid) { 2190 pf_mtag->qid = r->qid; 2191 2192 /* add hints for ecn */ 2193 pf_mtag->hdr = mtod(m, struct ip *); 2194 } 2195#endif /* ALTQ */ 2196 m->m_data += max_linkhdr; 2197 m->m_pkthdr.len = m->m_len = len; 2198 m->m_pkthdr.rcvif = NULL; 2199 bzero(m->m_data, len); 2200 switch (af) { 2201#ifdef INET 2202 case AF_INET: 2203 h = mtod(m, struct ip *); 2204 2205 /* IP header fields included in the TCP checksum */ 2206 h->ip_p = IPPROTO_TCP; 2207 h->ip_len = htons(tlen); 2208 h->ip_src.s_addr = saddr->v4.s_addr; 2209 h->ip_dst.s_addr = daddr->v4.s_addr; 2210 2211 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip)); 2212 break; 2213#endif /* INET */ 2214#ifdef INET6 2215 case AF_INET6: 2216 h6 = mtod(m, struct ip6_hdr *); 2217 2218 /* IP header fields included in the TCP checksum */ 2219 h6->ip6_nxt = IPPROTO_TCP; 2220 h6->ip6_plen = htons(tlen); 2221 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr)); 2222 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr)); 2223 2224 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr)); 2225 break; 2226#endif /* INET6 */ 2227 } 2228 2229 /* TCP header */ 2230 th->th_sport = sport; 2231 th->th_dport = dport; 2232 th->th_seq = htonl(seq); 2233 th->th_ack = htonl(ack); 2234 th->th_off = tlen >> 2; 2235 th->th_flags = flags; 2236 th->th_win = htons(win); 2237 2238 if (mss) { 2239 opt = (char *)(th + 1); 2240 opt[0] = TCPOPT_MAXSEG; 2241 opt[1] = 4; 2242 HTONS(mss); 2243 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2); 2244 } 2245 2246 switch (af) { 2247#ifdef INET 2248 case AF_INET: 2249 /* TCP checksum */ 2250 th->th_sum = in_cksum(m, len); 2251 2252 /* Finish the IP header */ 2253 h->ip_v = 4; 2254 h->ip_hl = sizeof(*h) >> 2; 2255 h->ip_tos = IPTOS_LOWDELAY; 2256 h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0); 2257 h->ip_len = htons(len); 2258 h->ip_ttl = ttl ? ttl : V_ip_defttl; 2259 h->ip_sum = 0; 2260 2261 pfse->pfse_type = PFSE_IP; 2262 break; 2263#endif /* INET */ 2264#ifdef INET6 2265 case AF_INET6: 2266 /* TCP checksum */ 2267 th->th_sum = in6_cksum(m, IPPROTO_TCP, 2268 sizeof(struct ip6_hdr), tlen); 2269 2270 h6->ip6_vfc |= IPV6_VERSION; 2271 h6->ip6_hlim = IPV6_DEFHLIM; 2272 2273 pfse->pfse_type = PFSE_IP6; 2274 break; 2275#endif /* INET6 */ 2276 } 2277 pfse->pfse_m = m; 2278 pf_send(pfse); 2279} 2280 2281static void 2282pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af, 2283 struct pf_rule *r) 2284{ 2285 struct pf_send_entry *pfse; 2286 struct mbuf *m0; 2287 struct pf_mtag *pf_mtag; 2288 2289 /* Allocate outgoing queue entry, mbuf and mbuf tag. */ 2290 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT); 2291 if (pfse == NULL) 2292 return; 2293 2294 if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) { 2295 free(pfse, M_PFTEMP); 2296 return; 2297 } 2298 2299 if ((pf_mtag = pf_get_mtag(m0)) == NULL) { 2300 free(pfse, M_PFTEMP); 2301 return; 2302 } 2303 /* XXX: revisit */ 2304 m0->m_flags |= M_SKIP_FIREWALL; 2305 2306 if (r->rtableid >= 0) 2307 M_SETFIB(m0, r->rtableid); 2308 2309#ifdef ALTQ 2310 if (r->qid) { 2311 pf_mtag->qid = r->qid; 2312 /* add hints for ecn */ 2313 pf_mtag->hdr = mtod(m0, struct ip *); 2314 } 2315#endif /* ALTQ */ 2316 2317 switch (af) { 2318#ifdef INET 2319 case AF_INET: 2320 pfse->pfse_type = PFSE_ICMP; 2321 break; 2322#endif /* INET */ 2323#ifdef INET6 2324 case AF_INET6: 2325 pfse->pfse_type = PFSE_ICMP6; 2326 break; 2327#endif /* INET6 */ 2328 } 2329 pfse->pfse_m = m0; 2330 pfse->pfse_icmp_type = type; 2331 pfse->pfse_icmp_code = code; 2332 pf_send(pfse); 2333} 2334 2335/* 2336 * Return 1 if the addresses a and b match (with mask m), otherwise return 0. 2337 * If n is 0, they match if they are equal. If n is != 0, they match if they 2338 * are different. 2339 */ 2340int 2341pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m, 2342 struct pf_addr *b, sa_family_t af) 2343{ 2344 int match = 0; 2345 2346 switch (af) { 2347#ifdef INET 2348 case AF_INET: 2349 if ((a->addr32[0] & m->addr32[0]) == 2350 (b->addr32[0] & m->addr32[0])) 2351 match++; 2352 break; 2353#endif /* INET */ 2354#ifdef INET6 2355 case AF_INET6: 2356 if (((a->addr32[0] & m->addr32[0]) == 2357 (b->addr32[0] & m->addr32[0])) && 2358 ((a->addr32[1] & m->addr32[1]) == 2359 (b->addr32[1] & m->addr32[1])) && 2360 ((a->addr32[2] & m->addr32[2]) == 2361 (b->addr32[2] & m->addr32[2])) && 2362 ((a->addr32[3] & m->addr32[3]) == 2363 (b->addr32[3] & m->addr32[3]))) 2364 match++; 2365 break; 2366#endif /* INET6 */ 2367 } 2368 if (match) { 2369 if (n) 2370 return (0); 2371 else 2372 return (1); 2373 } else { 2374 if (n) 2375 return (1); 2376 else 2377 return (0); 2378 } 2379} 2380 2381/* 2382 * Return 1 if b <= a <= e, otherwise return 0. 2383 */ 2384int 2385pf_match_addr_range(struct pf_addr *b, struct pf_addr *e, 2386 struct pf_addr *a, sa_family_t af) 2387{ 2388 switch (af) { 2389#ifdef INET 2390 case AF_INET: 2391 if ((a->addr32[0] < b->addr32[0]) || 2392 (a->addr32[0] > e->addr32[0])) 2393 return (0); 2394 break; 2395#endif /* INET */ 2396#ifdef INET6 2397 case AF_INET6: { 2398 int i; 2399 2400 /* check a >= b */ 2401 for (i = 0; i < 4; ++i) 2402 if (a->addr32[i] > b->addr32[i]) 2403 break; 2404 else if (a->addr32[i] < b->addr32[i]) 2405 return (0); 2406 /* check a <= e */ 2407 for (i = 0; i < 4; ++i) 2408 if (a->addr32[i] < e->addr32[i]) 2409 break; 2410 else if (a->addr32[i] > e->addr32[i]) 2411 return (0); 2412 break; 2413 } 2414#endif /* INET6 */ 2415 } 2416 return (1); 2417} 2418 2419static int 2420pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p) 2421{ 2422 switch (op) { 2423 case PF_OP_IRG: 2424 return ((p > a1) && (p < a2)); 2425 case PF_OP_XRG: 2426 return ((p < a1) || (p > a2)); 2427 case PF_OP_RRG: 2428 return ((p >= a1) && (p <= a2)); 2429 case PF_OP_EQ: 2430 return (p == a1); 2431 case PF_OP_NE: 2432 return (p != a1); 2433 case PF_OP_LT: 2434 return (p < a1); 2435 case PF_OP_LE: 2436 return (p <= a1); 2437 case PF_OP_GT: 2438 return (p > a1); 2439 case PF_OP_GE: 2440 return (p >= a1); 2441 } 2442 return (0); /* never reached */ 2443} 2444 2445int 2446pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p) 2447{ 2448 NTOHS(a1); 2449 NTOHS(a2); 2450 NTOHS(p); 2451 return (pf_match(op, a1, a2, p)); 2452} 2453 2454static int 2455pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u) 2456{ 2457 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2458 return (0); 2459 return (pf_match(op, a1, a2, u)); 2460} 2461 2462static int 2463pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g) 2464{ 2465 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE) 2466 return (0); 2467 return (pf_match(op, a1, a2, g)); 2468} 2469 2470int 2471pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag, int mtag) 2472{ 2473 if (*tag == -1) 2474 *tag = mtag; 2475 2476 return ((!r->match_tag_not && r->match_tag == *tag) || 2477 (r->match_tag_not && r->match_tag != *tag)); 2478} 2479 2480int 2481pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag) 2482{ 2483 2484 KASSERT(tag > 0, ("%s: tag %d", __func__, tag)); 2485 2486 if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL)) 2487 return (ENOMEM); 2488 2489 pd->pf_mtag->tag = tag; 2490 2491 return (0); 2492} 2493 2494#define PF_ANCHOR_STACKSIZE 32 2495struct pf_anchor_stackframe { 2496 struct pf_ruleset *rs; 2497 struct pf_rule *r; /* XXX: + match bit */ 2498 struct pf_anchor *child; 2499}; 2500 2501/* 2502 * XXX: We rely on malloc(9) returning pointer aligned addresses. 2503 */ 2504#define PF_ANCHORSTACK_MATCH 0x00000001 2505#define PF_ANCHORSTACK_MASK (PF_ANCHORSTACK_MATCH) 2506 2507#define PF_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH) 2508#define PF_ANCHOR_RULE(f) (struct pf_rule *) \ 2509 ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK) 2510#define PF_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \ 2511 ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \ 2512} while (0) 2513 2514void 2515pf_step_into_anchor(struct pf_anchor_stackframe *stack, int *depth, 2516 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a, 2517 int *match) 2518{ 2519 struct pf_anchor_stackframe *f; 2520 2521 PF_RULES_RASSERT(); 2522 2523 if (match) 2524 *match = 0; 2525 if (*depth >= PF_ANCHOR_STACKSIZE) { 2526 printf("%s: anchor stack overflow on %s\n", 2527 __func__, (*r)->anchor->name); 2528 *r = TAILQ_NEXT(*r, entries); 2529 return; 2530 } else if (*depth == 0 && a != NULL) 2531 *a = *r; 2532 f = stack + (*depth)++; 2533 f->rs = *rs; 2534 f->r = *r; 2535 if ((*r)->anchor_wildcard) { 2536 struct pf_anchor_node *parent = &(*r)->anchor->children; 2537 2538 if ((f->child = RB_MIN(pf_anchor_node, parent)) == NULL) { 2539 *r = NULL; 2540 return; 2541 } 2542 *rs = &f->child->ruleset; 2543 } else { 2544 f->child = NULL; 2545 *rs = &(*r)->anchor->ruleset; 2546 } 2547 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2548} 2549 2550int 2551pf_step_out_of_anchor(struct pf_anchor_stackframe *stack, int *depth, 2552 struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a, 2553 int *match) 2554{ 2555 struct pf_anchor_stackframe *f; 2556 struct pf_rule *fr; 2557 int quick = 0; 2558 2559 PF_RULES_RASSERT(); 2560 2561 do { 2562 if (*depth <= 0) 2563 break; 2564 f = stack + *depth - 1; 2565 fr = PF_ANCHOR_RULE(f); 2566 if (f->child != NULL) { 2567 struct pf_anchor_node *parent; 2568 2569 /* 2570 * This block traverses through 2571 * a wildcard anchor. 2572 */ 2573 parent = &fr->anchor->children; 2574 if (match != NULL && *match) { 2575 /* 2576 * If any of "*" matched, then 2577 * "foo/ *" matched, mark frame 2578 * appropriately. 2579 */ 2580 PF_ANCHOR_SET_MATCH(f); 2581 *match = 0; 2582 } 2583 f->child = RB_NEXT(pf_anchor_node, parent, f->child); 2584 if (f->child != NULL) { 2585 *rs = &f->child->ruleset; 2586 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); 2587 if (*r == NULL) 2588 continue; 2589 else 2590 break; 2591 } 2592 } 2593 (*depth)--; 2594 if (*depth == 0 && a != NULL) 2595 *a = NULL; 2596 *rs = f->rs; 2597 if (PF_ANCHOR_MATCH(f) || (match != NULL && *match)) 2598 quick = fr->quick; 2599 *r = TAILQ_NEXT(fr, entries); 2600 } while (*r == NULL); 2601 2602 return (quick); 2603} 2604 2605#ifdef INET6 2606void 2607pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr, 2608 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af) 2609{ 2610 switch (af) { 2611#ifdef INET 2612 case AF_INET: 2613 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2614 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2615 break; 2616#endif /* INET */ 2617 case AF_INET6: 2618 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) | 2619 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]); 2620 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) | 2621 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]); 2622 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) | 2623 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]); 2624 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) | 2625 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]); 2626 break; 2627 } 2628} 2629 2630void 2631pf_addr_inc(struct pf_addr *addr, sa_family_t af) 2632{ 2633 switch (af) { 2634#ifdef INET 2635 case AF_INET: 2636 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1); 2637 break; 2638#endif /* INET */ 2639 case AF_INET6: 2640 if (addr->addr32[3] == 0xffffffff) { 2641 addr->addr32[3] = 0; 2642 if (addr->addr32[2] == 0xffffffff) { 2643 addr->addr32[2] = 0; 2644 if (addr->addr32[1] == 0xffffffff) { 2645 addr->addr32[1] = 0; 2646 addr->addr32[0] = 2647 htonl(ntohl(addr->addr32[0]) + 1); 2648 } else 2649 addr->addr32[1] = 2650 htonl(ntohl(addr->addr32[1]) + 1); 2651 } else 2652 addr->addr32[2] = 2653 htonl(ntohl(addr->addr32[2]) + 1); 2654 } else 2655 addr->addr32[3] = 2656 htonl(ntohl(addr->addr32[3]) + 1); 2657 break; 2658 } 2659} 2660#endif /* INET6 */ 2661 2662int 2663pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m) 2664{ 2665 struct pf_addr *saddr, *daddr; 2666 u_int16_t sport, dport; 2667 struct inpcbinfo *pi; 2668 struct inpcb *inp; 2669 2670 pd->lookup.uid = UID_MAX; 2671 pd->lookup.gid = GID_MAX; 2672 2673 switch (pd->proto) { 2674 case IPPROTO_TCP: 2675 if (pd->hdr.tcp == NULL) 2676 return (-1); 2677 sport = pd->hdr.tcp->th_sport; 2678 dport = pd->hdr.tcp->th_dport; 2679 pi = &V_tcbinfo; 2680 break; 2681 case IPPROTO_UDP: 2682 if (pd->hdr.udp == NULL) 2683 return (-1); 2684 sport = pd->hdr.udp->uh_sport; 2685 dport = pd->hdr.udp->uh_dport; 2686 pi = &V_udbinfo; 2687 break; 2688 default: 2689 return (-1); 2690 } 2691 if (direction == PF_IN) { 2692 saddr = pd->src; 2693 daddr = pd->dst; 2694 } else { 2695 u_int16_t p; 2696 2697 p = sport; 2698 sport = dport; 2699 dport = p; 2700 saddr = pd->dst; 2701 daddr = pd->src; 2702 } 2703 switch (pd->af) { 2704#ifdef INET 2705 case AF_INET: 2706 inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4, 2707 dport, INPLOOKUP_RLOCKPCB, NULL, m); 2708 if (inp == NULL) { 2709 inp = in_pcblookup_mbuf(pi, saddr->v4, sport, 2710 daddr->v4, dport, INPLOOKUP_WILDCARD | 2711 INPLOOKUP_RLOCKPCB, NULL, m); 2712 if (inp == NULL) 2713 return (-1); 2714 } 2715 break; 2716#endif /* INET */ 2717#ifdef INET6 2718 case AF_INET6: 2719 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6, 2720 dport, INPLOOKUP_RLOCKPCB, NULL, m); 2721 if (inp == NULL) { 2722 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, 2723 &daddr->v6, dport, INPLOOKUP_WILDCARD | 2724 INPLOOKUP_RLOCKPCB, NULL, m); 2725 if (inp == NULL) 2726 return (-1); 2727 } 2728 break; 2729#endif /* INET6 */ 2730 2731 default: 2732 return (-1); 2733 } 2734 INP_RLOCK_ASSERT(inp); 2735 pd->lookup.uid = inp->inp_cred->cr_uid; 2736 pd->lookup.gid = inp->inp_cred->cr_groups[0]; 2737 INP_RUNLOCK(inp); 2738 2739 return (1); 2740} 2741 2742static u_int8_t 2743pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 2744{ 2745 int hlen; 2746 u_int8_t hdr[60]; 2747 u_int8_t *opt, optlen; 2748 u_int8_t wscale = 0; 2749 2750 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 2751 if (hlen <= sizeof(struct tcphdr)) 2752 return (0); 2753 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 2754 return (0); 2755 opt = hdr + sizeof(struct tcphdr); 2756 hlen -= sizeof(struct tcphdr); 2757 while (hlen >= 3) { 2758 switch (*opt) { 2759 case TCPOPT_EOL: 2760 case TCPOPT_NOP: 2761 ++opt; 2762 --hlen; 2763 break; 2764 case TCPOPT_WINDOW: 2765 wscale = opt[2]; 2766 if (wscale > TCP_MAX_WINSHIFT) 2767 wscale = TCP_MAX_WINSHIFT; 2768 wscale |= PF_WSCALE_FLAG; 2769 /* FALLTHROUGH */ 2770 default: 2771 optlen = opt[1]; 2772 if (optlen < 2) 2773 optlen = 2; 2774 hlen -= optlen; 2775 opt += optlen; 2776 break; 2777 } 2778 } 2779 return (wscale); 2780} 2781 2782static u_int16_t 2783pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af) 2784{ 2785 int hlen; 2786 u_int8_t hdr[60]; 2787 u_int8_t *opt, optlen; 2788 u_int16_t mss = V_tcp_mssdflt; 2789 2790 hlen = th_off << 2; /* hlen <= sizeof(hdr) */ 2791 if (hlen <= sizeof(struct tcphdr)) 2792 return (0); 2793 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af)) 2794 return (0); 2795 opt = hdr + sizeof(struct tcphdr); 2796 hlen -= sizeof(struct tcphdr); 2797 while (hlen >= TCPOLEN_MAXSEG) { 2798 switch (*opt) { 2799 case TCPOPT_EOL: 2800 case TCPOPT_NOP: 2801 ++opt; 2802 --hlen; 2803 break; 2804 case TCPOPT_MAXSEG: 2805 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2); 2806 NTOHS(mss); 2807 /* FALLTHROUGH */ 2808 default: 2809 optlen = opt[1]; 2810 if (optlen < 2) 2811 optlen = 2; 2812 hlen -= optlen; 2813 opt += optlen; 2814 break; 2815 } 2816 } 2817 return (mss); 2818} 2819 2820static u_int16_t 2821pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer) 2822{ 2823#ifdef INET 2824 struct sockaddr_in *dst; 2825 struct route ro; 2826#endif /* INET */ 2827#ifdef INET6 2828 struct sockaddr_in6 *dst6; 2829 struct route_in6 ro6; 2830#endif /* INET6 */ 2831 struct rtentry *rt = NULL; 2832 int hlen = 0; 2833 u_int16_t mss = V_tcp_mssdflt; 2834 2835 switch (af) { 2836#ifdef INET 2837 case AF_INET: 2838 hlen = sizeof(struct ip); 2839 bzero(&ro, sizeof(ro)); 2840 dst = (struct sockaddr_in *)&ro.ro_dst; 2841 dst->sin_family = AF_INET; 2842 dst->sin_len = sizeof(*dst); 2843 dst->sin_addr = addr->v4; 2844 in_rtalloc_ign(&ro, 0, rtableid); 2845 rt = ro.ro_rt; 2846 break; 2847#endif /* INET */ 2848#ifdef INET6 2849 case AF_INET6: 2850 hlen = sizeof(struct ip6_hdr); 2851 bzero(&ro6, sizeof(ro6)); 2852 dst6 = (struct sockaddr_in6 *)&ro6.ro_dst; 2853 dst6->sin6_family = AF_INET6; 2854 dst6->sin6_len = sizeof(*dst6); 2855 dst6->sin6_addr = addr->v6; 2856 in6_rtalloc_ign(&ro6, 0, rtableid); 2857 rt = ro6.ro_rt; 2858 break; 2859#endif /* INET6 */ 2860 } 2861 2862 if (rt && rt->rt_ifp) { 2863 mss = rt->rt_ifp->if_mtu - hlen - sizeof(struct tcphdr); 2864 mss = max(V_tcp_mssdflt, mss); 2865 RTFREE(rt); 2866 } 2867 mss = min(mss, offer); 2868 mss = max(mss, 64); /* sanity - at least max opt space */ 2869 return (mss); 2870} 2871 2872static void 2873pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr) 2874{ 2875 struct pf_rule *r = s->rule.ptr; 2876 struct pf_src_node *sn = NULL; 2877 2878 s->rt_kif = NULL; 2879 if (!r->rt || r->rt == PF_FASTROUTE) 2880 return; 2881 switch (s->key[PF_SK_WIRE]->af) { 2882#ifdef INET 2883 case AF_INET: 2884 pf_map_addr(AF_INET, r, saddr, &s->rt_addr, NULL, &sn); 2885 s->rt_kif = r->rpool.cur->kif; 2886 break; 2887#endif /* INET */ 2888#ifdef INET6 2889 case AF_INET6: 2890 pf_map_addr(AF_INET6, r, saddr, &s->rt_addr, NULL, &sn); 2891 s->rt_kif = r->rpool.cur->kif; 2892 break; 2893#endif /* INET6 */ 2894 } 2895} 2896 2897static u_int32_t 2898pf_tcp_iss(struct pf_pdesc *pd) 2899{ 2900 MD5_CTX ctx; 2901 u_int32_t digest[4]; 2902 2903 if (V_pf_tcp_secret_init == 0) { 2904 read_random(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret)); 2905 MD5Init(&V_pf_tcp_secret_ctx); 2906 MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret, 2907 sizeof(V_pf_tcp_secret)); 2908 V_pf_tcp_secret_init = 1; 2909 } 2910 2911 ctx = V_pf_tcp_secret_ctx; 2912 2913 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short)); 2914 MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short)); 2915 if (pd->af == AF_INET6) { 2916 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr)); 2917 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr)); 2918 } else { 2919 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr)); 2920 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr)); 2921 } 2922 MD5Final((u_char *)digest, &ctx); 2923 V_pf_tcp_iss_off += 4096; 2924#define ISN_RANDOM_INCREMENT (4096 - 1) 2925 return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) + 2926 V_pf_tcp_iss_off); 2927#undef ISN_RANDOM_INCREMENT 2928} 2929 2930static int 2931pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, 2932 struct pfi_kif *kif, struct mbuf *m, int off, struct pf_pdesc *pd, 2933 struct pf_rule **am, struct pf_ruleset **rsm, struct inpcb *inp) 2934{ 2935 struct pf_rule *nr = NULL; 2936 struct pf_addr * const saddr = pd->src; 2937 struct pf_addr * const daddr = pd->dst; 2938 sa_family_t af = pd->af; 2939 struct pf_rule *r, *a = NULL; 2940 struct pf_ruleset *ruleset = NULL; 2941 struct pf_src_node *nsn = NULL; 2942 struct tcphdr *th = pd->hdr.tcp; 2943 struct pf_state_key *sk = NULL, *nk = NULL; 2944 u_short reason; 2945 int rewrite = 0, hdrlen = 0; 2946 int tag = -1, rtableid = -1; 2947 int asd = 0; 2948 int match = 0; 2949 int state_icmp = 0; 2950 u_int16_t sport = 0, dport = 0; 2951 u_int16_t bproto_sum = 0, bip_sum = 0; 2952 u_int8_t icmptype = 0, icmpcode = 0; 2953 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE]; 2954 2955 PF_RULES_RASSERT(); 2956 2957 if (inp != NULL) { 2958 INP_LOCK_ASSERT(inp); 2959 pd->lookup.uid = inp->inp_cred->cr_uid; 2960 pd->lookup.gid = inp->inp_cred->cr_groups[0]; 2961 pd->lookup.done = 1; 2962 } 2963 2964 switch (pd->proto) { 2965 case IPPROTO_TCP: 2966 sport = th->th_sport; 2967 dport = th->th_dport; 2968 hdrlen = sizeof(*th); 2969 break; 2970 case IPPROTO_UDP: 2971 sport = pd->hdr.udp->uh_sport; 2972 dport = pd->hdr.udp->uh_dport; 2973 hdrlen = sizeof(*pd->hdr.udp); 2974 break; 2975#ifdef INET 2976 case IPPROTO_ICMP: 2977 if (pd->af != AF_INET) 2978 break; 2979 sport = dport = pd->hdr.icmp->icmp_id; 2980 hdrlen = sizeof(*pd->hdr.icmp); 2981 icmptype = pd->hdr.icmp->icmp_type; 2982 icmpcode = pd->hdr.icmp->icmp_code; 2983 2984 if (icmptype == ICMP_UNREACH || 2985 icmptype == ICMP_SOURCEQUENCH || 2986 icmptype == ICMP_REDIRECT || 2987 icmptype == ICMP_TIMXCEED || 2988 icmptype == ICMP_PARAMPROB) 2989 state_icmp++; 2990 break; 2991#endif /* INET */ 2992#ifdef INET6 2993 case IPPROTO_ICMPV6: 2994 if (af != AF_INET6) 2995 break; 2996 sport = dport = pd->hdr.icmp6->icmp6_id; 2997 hdrlen = sizeof(*pd->hdr.icmp6); 2998 icmptype = pd->hdr.icmp6->icmp6_type; 2999 icmpcode = pd->hdr.icmp6->icmp6_code; 3000 3001 if (icmptype == ICMP6_DST_UNREACH || 3002 icmptype == ICMP6_PACKET_TOO_BIG || 3003 icmptype == ICMP6_TIME_EXCEEDED || 3004 icmptype == ICMP6_PARAM_PROB) 3005 state_icmp++; 3006 break; 3007#endif /* INET6 */ 3008 default: 3009 sport = dport = hdrlen = 0; 3010 break; 3011 } 3012 3013 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 3014 3015 /* check packet for BINAT/NAT/RDR */ 3016 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk, 3017 &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) { 3018 KASSERT(sk != NULL, ("%s: null sk", __func__)); 3019 KASSERT(nk != NULL, ("%s: null nk", __func__)); 3020 3021 if (pd->ip_sum) 3022 bip_sum = *pd->ip_sum; 3023 3024 switch (pd->proto) { 3025 case IPPROTO_TCP: 3026 bproto_sum = th->th_sum; 3027 pd->proto_sum = &th->th_sum; 3028 3029 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3030 nk->port[pd->sidx] != sport) { 3031 pf_change_ap(saddr, &th->th_sport, pd->ip_sum, 3032 &th->th_sum, &nk->addr[pd->sidx], 3033 nk->port[pd->sidx], 0, af); 3034 pd->sport = &th->th_sport; 3035 sport = th->th_sport; 3036 } 3037 3038 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3039 nk->port[pd->didx] != dport) { 3040 pf_change_ap(daddr, &th->th_dport, pd->ip_sum, 3041 &th->th_sum, &nk->addr[pd->didx], 3042 nk->port[pd->didx], 0, af); 3043 dport = th->th_dport; 3044 pd->dport = &th->th_dport; 3045 } 3046 rewrite++; 3047 break; 3048 case IPPROTO_UDP: 3049 bproto_sum = pd->hdr.udp->uh_sum; 3050 pd->proto_sum = &pd->hdr.udp->uh_sum; 3051 3052 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) || 3053 nk->port[pd->sidx] != sport) { 3054 pf_change_ap(saddr, &pd->hdr.udp->uh_sport, 3055 pd->ip_sum, &pd->hdr.udp->uh_sum, 3056 &nk->addr[pd->sidx], 3057 nk->port[pd->sidx], 1, af); 3058 sport = pd->hdr.udp->uh_sport; 3059 pd->sport = &pd->hdr.udp->uh_sport; 3060 } 3061 3062 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) || 3063 nk->port[pd->didx] != dport) { 3064 pf_change_ap(daddr, &pd->hdr.udp->uh_dport, 3065 pd->ip_sum, &pd->hdr.udp->uh_sum, 3066 &nk->addr[pd->didx], 3067 nk->port[pd->didx], 1, af); 3068 dport = pd->hdr.udp->uh_dport; 3069 pd->dport = &pd->hdr.udp->uh_dport; 3070 } 3071 rewrite++; 3072 break; 3073#ifdef INET 3074 case IPPROTO_ICMP: 3075 nk->port[0] = nk->port[1]; 3076 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET)) 3077 pf_change_a(&saddr->v4.s_addr, pd->ip_sum, 3078 nk->addr[pd->sidx].v4.s_addr, 0); 3079 3080 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET)) 3081 pf_change_a(&daddr->v4.s_addr, pd->ip_sum, 3082 nk->addr[pd->didx].v4.s_addr, 0); 3083 3084 if (nk->port[1] != pd->hdr.icmp->icmp_id) { 3085 pd->hdr.icmp->icmp_cksum = pf_cksum_fixup( 3086 pd->hdr.icmp->icmp_cksum, sport, 3087 nk->port[1], 0); 3088 pd->hdr.icmp->icmp_id = nk->port[1]; 3089 pd->sport = &pd->hdr.icmp->icmp_id; 3090 } 3091 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 3092 break; 3093#endif /* INET */ 3094#ifdef INET6 3095 case IPPROTO_ICMPV6: 3096 nk->port[0] = nk->port[1]; 3097 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6)) 3098 pf_change_a6(saddr, &pd->hdr.icmp6->icmp6_cksum, 3099 &nk->addr[pd->sidx], 0); 3100 3101 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6)) 3102 pf_change_a6(daddr, &pd->hdr.icmp6->icmp6_cksum, 3103 &nk->addr[pd->didx], 0); 3104 rewrite++; 3105 break; 3106#endif /* INET */ 3107 default: 3108 switch (af) { 3109#ifdef INET 3110 case AF_INET: 3111 if (PF_ANEQ(saddr, 3112 &nk->addr[pd->sidx], AF_INET)) 3113 pf_change_a(&saddr->v4.s_addr, 3114 pd->ip_sum, 3115 nk->addr[pd->sidx].v4.s_addr, 0); 3116 3117 if (PF_ANEQ(daddr, 3118 &nk->addr[pd->didx], AF_INET)) 3119 pf_change_a(&daddr->v4.s_addr, 3120 pd->ip_sum, 3121 nk->addr[pd->didx].v4.s_addr, 0); 3122 break; 3123#endif /* INET */ 3124#ifdef INET6 3125 case AF_INET6: 3126 if (PF_ANEQ(saddr, 3127 &nk->addr[pd->sidx], AF_INET6)) 3128 PF_ACPY(saddr, &nk->addr[pd->sidx], af); 3129 3130 if (PF_ANEQ(daddr, 3131 &nk->addr[pd->didx], AF_INET6)) 3132 PF_ACPY(saddr, &nk->addr[pd->didx], af); 3133 break; 3134#endif /* INET */ 3135 } 3136 break; 3137 } 3138 if (nr->natpass) 3139 r = NULL; 3140 pd->nat_rule = nr; 3141 } 3142 3143 while (r != NULL) { 3144 r->evaluations++; 3145 if (pfi_kif_match(r->kif, kif) == r->ifnot) 3146 r = r->skip[PF_SKIP_IFP].ptr; 3147 else if (r->direction && r->direction != direction) 3148 r = r->skip[PF_SKIP_DIR].ptr; 3149 else if (r->af && r->af != af) 3150 r = r->skip[PF_SKIP_AF].ptr; 3151 else if (r->proto && r->proto != pd->proto) 3152 r = r->skip[PF_SKIP_PROTO].ptr; 3153 else if (PF_MISMATCHAW(&r->src.addr, saddr, af, 3154 r->src.neg, kif, M_GETFIB(m))) 3155 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 3156 /* tcp/udp only. port_op always 0 in other cases */ 3157 else if (r->src.port_op && !pf_match_port(r->src.port_op, 3158 r->src.port[0], r->src.port[1], sport)) 3159 r = r->skip[PF_SKIP_SRC_PORT].ptr; 3160 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, 3161 r->dst.neg, NULL, M_GETFIB(m))) 3162 r = r->skip[PF_SKIP_DST_ADDR].ptr; 3163 /* tcp/udp only. port_op always 0 in other cases */ 3164 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 3165 r->dst.port[0], r->dst.port[1], dport)) 3166 r = r->skip[PF_SKIP_DST_PORT].ptr; 3167 /* icmp only. type always 0 in other cases */ 3168 else if (r->type && r->type != icmptype + 1) 3169 r = TAILQ_NEXT(r, entries); 3170 /* icmp only. type always 0 in other cases */ 3171 else if (r->code && r->code != icmpcode + 1) 3172 r = TAILQ_NEXT(r, entries); 3173 else if (r->tos && !(r->tos == pd->tos)) 3174 r = TAILQ_NEXT(r, entries); 3175 else if (r->rule_flag & PFRULE_FRAGMENT) 3176 r = TAILQ_NEXT(r, entries); 3177 else if (pd->proto == IPPROTO_TCP && 3178 (r->flagset & th->th_flags) != r->flags) 3179 r = TAILQ_NEXT(r, entries); 3180 /* tcp/udp only. uid.op always 0 in other cases */ 3181 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done = 3182 pf_socket_lookup(direction, pd, m), 1)) && 3183 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1], 3184 pd->lookup.uid)) 3185 r = TAILQ_NEXT(r, entries); 3186 /* tcp/udp only. gid.op always 0 in other cases */ 3187 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done = 3188 pf_socket_lookup(direction, pd, m), 1)) && 3189 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1], 3190 pd->lookup.gid)) 3191 r = TAILQ_NEXT(r, entries); 3192 else if (r->prob && 3193 r->prob <= arc4random()) 3194 r = TAILQ_NEXT(r, entries); 3195 else if (r->match_tag && !pf_match_tag(m, r, &tag, 3196 pd->pf_mtag ? pd->pf_mtag->tag : 0)) 3197 r = TAILQ_NEXT(r, entries); 3198 else if (r->os_fingerprint != PF_OSFP_ANY && 3199 (pd->proto != IPPROTO_TCP || !pf_osfp_match( 3200 pf_osfp_fingerprint(pd, m, off, th), 3201 r->os_fingerprint))) 3202 r = TAILQ_NEXT(r, entries); 3203 else { 3204 if (r->tag) 3205 tag = r->tag; 3206 if (r->rtableid >= 0) 3207 rtableid = r->rtableid; 3208 if (r->anchor == NULL) { 3209 match = 1; 3210 *rm = r; 3211 *am = a; 3212 *rsm = ruleset; 3213 if ((*rm)->quick) 3214 break; 3215 r = TAILQ_NEXT(r, entries); 3216 } else 3217 pf_step_into_anchor(anchor_stack, &asd, 3218 &ruleset, PF_RULESET_FILTER, &r, &a, 3219 &match); 3220 } 3221 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd, 3222 &ruleset, PF_RULESET_FILTER, &r, &a, &match)) 3223 break; 3224 } 3225 r = *rm; 3226 a = *am; 3227 ruleset = *rsm; 3228 3229 REASON_SET(&reason, PFRES_MATCH); 3230 3231 if (r->log || (nr != NULL && nr->log)) { 3232 if (rewrite) 3233 m_copyback(m, off, hdrlen, pd->hdr.any); 3234 PFLOG_PACKET(kif, m, af, direction, reason, r->log ? r : nr, a, 3235 ruleset, pd, 1); 3236 } 3237 3238 if ((r->action == PF_DROP) && 3239 ((r->rule_flag & PFRULE_RETURNRST) || 3240 (r->rule_flag & PFRULE_RETURNICMP) || 3241 (r->rule_flag & PFRULE_RETURN))) { 3242 /* undo NAT changes, if they have taken place */ 3243 if (nr != NULL) { 3244 PF_ACPY(saddr, &sk->addr[pd->sidx], af); 3245 PF_ACPY(daddr, &sk->addr[pd->didx], af); 3246 if (pd->sport) 3247 *pd->sport = sk->port[pd->sidx]; 3248 if (pd->dport) 3249 *pd->dport = sk->port[pd->didx]; 3250 if (pd->proto_sum) 3251 *pd->proto_sum = bproto_sum; 3252 if (pd->ip_sum) 3253 *pd->ip_sum = bip_sum; 3254 m_copyback(m, off, hdrlen, pd->hdr.any); 3255 } 3256 if (pd->proto == IPPROTO_TCP && 3257 ((r->rule_flag & PFRULE_RETURNRST) || 3258 (r->rule_flag & PFRULE_RETURN)) && 3259 !(th->th_flags & TH_RST)) { 3260 u_int32_t ack = ntohl(th->th_seq) + pd->p_len; 3261 int len = 0; 3262#ifdef INET 3263 struct ip *h4; 3264#endif 3265#ifdef INET6 3266 struct ip6_hdr *h6; 3267#endif 3268 3269 switch (af) { 3270#ifdef INET 3271 case AF_INET: 3272 h4 = mtod(m, struct ip *); 3273 len = ntohs(h4->ip_len) - off; 3274 break; 3275#endif 3276#ifdef INET6 3277 case AF_INET6: 3278 h6 = mtod(m, struct ip6_hdr *); 3279 len = ntohs(h6->ip6_plen) - (off - sizeof(*h6)); 3280 break; 3281#endif 3282 } 3283 3284 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af)) 3285 REASON_SET(&reason, PFRES_PROTCKSUM); 3286 else { 3287 if (th->th_flags & TH_SYN) 3288 ack++; 3289 if (th->th_flags & TH_FIN) 3290 ack++; 3291 pf_send_tcp(m, r, af, pd->dst, 3292 pd->src, th->th_dport, th->th_sport, 3293 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0, 3294 r->return_ttl, 1, 0, kif->pfik_ifp); 3295 } 3296 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET && 3297 r->return_icmp) 3298 pf_send_icmp(m, r->return_icmp >> 8, 3299 r->return_icmp & 255, af, r); 3300 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 && 3301 r->return_icmp6) 3302 pf_send_icmp(m, r->return_icmp6 >> 8, 3303 r->return_icmp6 & 255, af, r); 3304 } 3305 3306 if (r->action == PF_DROP) 3307 goto cleanup; 3308 3309 if (tag > 0 && pf_tag_packet(m, pd, tag)) { 3310 REASON_SET(&reason, PFRES_MEMORY); 3311 goto cleanup; 3312 } 3313 if (rtableid >= 0) 3314 M_SETFIB(m, rtableid); 3315 3316 if (!state_icmp && (r->keep_state || nr != NULL || 3317 (pd->flags & PFDESC_TCP_NORM))) { 3318 int action; 3319 action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off, 3320 sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum, 3321 hdrlen); 3322 if (action != PF_PASS) 3323 return (action); 3324 } else { 3325 if (sk != NULL) 3326 uma_zfree(V_pf_state_key_z, sk); 3327 if (nk != NULL) 3328 uma_zfree(V_pf_state_key_z, nk); 3329 } 3330 3331 /* copy back packet headers if we performed NAT operations */ 3332 if (rewrite) 3333 m_copyback(m, off, hdrlen, pd->hdr.any); 3334 3335 if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) && 3336 direction == PF_OUT && 3337 pfsync_defer_ptr != NULL && pfsync_defer_ptr(*sm, m)) 3338 /* 3339 * We want the state created, but we dont 3340 * want to send this in case a partner 3341 * firewall has to know about it to allow 3342 * replies through it. 3343 */ 3344 return (PF_DEFER); 3345 3346 return (PF_PASS); 3347 3348cleanup: 3349 if (sk != NULL) 3350 uma_zfree(V_pf_state_key_z, sk); 3351 if (nk != NULL) 3352 uma_zfree(V_pf_state_key_z, nk); 3353 return (PF_DROP); 3354} 3355 3356static int 3357pf_create_state(struct pf_rule *r, struct pf_rule *nr, struct pf_rule *a, 3358 struct pf_pdesc *pd, struct pf_src_node *nsn, struct pf_state_key *nk, 3359 struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport, 3360 u_int16_t dport, int *rewrite, struct pfi_kif *kif, struct pf_state **sm, 3361 int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen) 3362{ 3363 struct pf_state *s = NULL; 3364 struct pf_src_node *sn = NULL; 3365 struct tcphdr *th = pd->hdr.tcp; 3366 u_int16_t mss = V_tcp_mssdflt; 3367 u_short reason; 3368 3369 /* check maximums */ 3370 if (r->max_states && (r->states_cur >= r->max_states)) { 3371 V_pf_status.lcounters[LCNT_STATES]++; 3372 REASON_SET(&reason, PFRES_MAXSTATES); 3373 return (PF_DROP); 3374 } 3375 /* src node for filter rule */ 3376 if ((r->rule_flag & PFRULE_SRCTRACK || 3377 r->rpool.opts & PF_POOL_STICKYADDR) && 3378 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) { 3379 REASON_SET(&reason, PFRES_SRCLIMIT); 3380 goto csfailed; 3381 } 3382 /* src node for translation rule */ 3383 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) && 3384 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) { 3385 REASON_SET(&reason, PFRES_SRCLIMIT); 3386 goto csfailed; 3387 } 3388 s = uma_zalloc(V_pf_state_z, M_NOWAIT | M_ZERO); 3389 if (s == NULL) { 3390 REASON_SET(&reason, PFRES_MEMORY); 3391 goto csfailed; 3392 } 3393 s->rule.ptr = r; 3394 s->nat_rule.ptr = nr; 3395 s->anchor.ptr = a; 3396 STATE_INC_COUNTERS(s); 3397 if (r->allow_opts) 3398 s->state_flags |= PFSTATE_ALLOWOPTS; 3399 if (r->rule_flag & PFRULE_STATESLOPPY) 3400 s->state_flags |= PFSTATE_SLOPPY; 3401 s->log = r->log & PF_LOG_ALL; 3402 s->sync_state = PFSYNC_S_NONE; 3403 if (nr != NULL) 3404 s->log |= nr->log & PF_LOG_ALL; 3405 switch (pd->proto) { 3406 case IPPROTO_TCP: 3407 s->src.seqlo = ntohl(th->th_seq); 3408 s->src.seqhi = s->src.seqlo + pd->p_len + 1; 3409 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN && 3410 r->keep_state == PF_STATE_MODULATE) { 3411 /* Generate sequence number modulator */ 3412 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) == 3413 0) 3414 s->src.seqdiff = 1; 3415 pf_change_a(&th->th_seq, &th->th_sum, 3416 htonl(s->src.seqlo + s->src.seqdiff), 0); 3417 *rewrite = 1; 3418 } else 3419 s->src.seqdiff = 0; 3420 if (th->th_flags & TH_SYN) { 3421 s->src.seqhi++; 3422 s->src.wscale = pf_get_wscale(m, off, 3423 th->th_off, pd->af); 3424 } 3425 s->src.max_win = MAX(ntohs(th->th_win), 1); 3426 if (s->src.wscale & PF_WSCALE_MASK) { 3427 /* Remove scale factor from initial window */ 3428 int win = s->src.max_win; 3429 win += 1 << (s->src.wscale & PF_WSCALE_MASK); 3430 s->src.max_win = (win - 1) >> 3431 (s->src.wscale & PF_WSCALE_MASK); 3432 } 3433 if (th->th_flags & TH_FIN) 3434 s->src.seqhi++; 3435 s->dst.seqhi = 1; 3436 s->dst.max_win = 1; 3437 s->src.state = TCPS_SYN_SENT; 3438 s->dst.state = TCPS_CLOSED; 3439 s->timeout = PFTM_TCP_FIRST_PACKET; 3440 break; 3441 case IPPROTO_UDP: 3442 s->src.state = PFUDPS_SINGLE; 3443 s->dst.state = PFUDPS_NO_TRAFFIC; 3444 s->timeout = PFTM_UDP_FIRST_PACKET; 3445 break; 3446 case IPPROTO_ICMP: 3447#ifdef INET6 3448 case IPPROTO_ICMPV6: 3449#endif 3450 s->timeout = PFTM_ICMP_FIRST_PACKET; 3451 break; 3452 default: 3453 s->src.state = PFOTHERS_SINGLE; 3454 s->dst.state = PFOTHERS_NO_TRAFFIC; 3455 s->timeout = PFTM_OTHER_FIRST_PACKET; 3456 } 3457 3458 s->creation = time_uptime; 3459 s->expire = time_uptime; 3460 3461 if (sn != NULL) { 3462 s->src_node = sn; 3463 s->src_node->states++; 3464 } 3465 if (nsn != NULL) { 3466 /* XXX We only modify one side for now. */ 3467 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af); 3468 s->nat_src_node = nsn; 3469 s->nat_src_node->states++; 3470 } 3471 if (pd->proto == IPPROTO_TCP) { 3472 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m, 3473 off, pd, th, &s->src, &s->dst)) { 3474 REASON_SET(&reason, PFRES_MEMORY); 3475 pf_src_tree_remove_state(s); 3476 STATE_DEC_COUNTERS(s); 3477 uma_zfree(V_pf_state_z, s); 3478 return (PF_DROP); 3479 } 3480 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub && 3481 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s, 3482 &s->src, &s->dst, rewrite)) { 3483 /* This really shouldn't happen!!! */ 3484 DPFPRINTF(PF_DEBUG_URGENT, 3485 ("pf_normalize_tcp_stateful failed on first pkt")); 3486 pf_normalize_tcp_cleanup(s); 3487 pf_src_tree_remove_state(s); 3488 STATE_DEC_COUNTERS(s); 3489 uma_zfree(V_pf_state_z, s); 3490 return (PF_DROP); 3491 } 3492 } 3493 s->direction = pd->dir; 3494 3495 /* 3496 * sk/nk could already been setup by pf_get_translation(). 3497 */ 3498 if (nr == NULL) { 3499 KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p", 3500 __func__, nr, sk, nk)); 3501 sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport); 3502 if (sk == NULL) 3503 goto csfailed; 3504 nk = sk; 3505 } else 3506 KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p", 3507 __func__, nr, sk, nk)); 3508 3509 /* Swap sk/nk for PF_OUT. */ 3510 if (pf_state_insert(BOUND_IFACE(r, kif), 3511 (pd->dir == PF_IN) ? sk : nk, 3512 (pd->dir == PF_IN) ? nk : sk, s)) { 3513 if (pd->proto == IPPROTO_TCP) 3514 pf_normalize_tcp_cleanup(s); 3515 REASON_SET(&reason, PFRES_STATEINS); 3516 pf_src_tree_remove_state(s); 3517 STATE_DEC_COUNTERS(s); 3518 uma_zfree(V_pf_state_z, s); 3519 return (PF_DROP); 3520 } else 3521 *sm = s; 3522 3523 pf_set_rt_ifp(s, pd->src); /* needs s->state_key set */ 3524 if (tag > 0) 3525 s->tag = tag; 3526 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) == 3527 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) { 3528 s->src.state = PF_TCPS_PROXY_SRC; 3529 /* undo NAT changes, if they have taken place */ 3530 if (nr != NULL) { 3531 struct pf_state_key *skt = s->key[PF_SK_WIRE]; 3532 if (pd->dir == PF_OUT) 3533 skt = s->key[PF_SK_STACK]; 3534 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af); 3535 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af); 3536 if (pd->sport) 3537 *pd->sport = skt->port[pd->sidx]; 3538 if (pd->dport) 3539 *pd->dport = skt->port[pd->didx]; 3540 if (pd->proto_sum) 3541 *pd->proto_sum = bproto_sum; 3542 if (pd->ip_sum) 3543 *pd->ip_sum = bip_sum; 3544 m_copyback(m, off, hdrlen, pd->hdr.any); 3545 } 3546 s->src.seqhi = htonl(arc4random()); 3547 /* Find mss option */ 3548 int rtid = M_GETFIB(m); 3549 mss = pf_get_mss(m, off, th->th_off, pd->af); 3550 mss = pf_calc_mss(pd->src, pd->af, rtid, mss); 3551 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss); 3552 s->src.mss = mss; 3553 pf_send_tcp(NULL, r, pd->af, pd->dst, pd->src, th->th_dport, 3554 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1, 3555 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL); 3556 REASON_SET(&reason, PFRES_SYNPROXY); 3557 return (PF_SYNPROXY_DROP); 3558 } 3559 3560 return (PF_PASS); 3561 3562csfailed: 3563 if (sk != NULL) 3564 uma_zfree(V_pf_state_key_z, sk); 3565 if (nk != NULL) 3566 uma_zfree(V_pf_state_key_z, nk); 3567 3568 if (sn != NULL && sn->states == 0 && sn->expire == 0) 3569 pf_remove_src_node(sn); 3570 3571 if (nsn != sn && nsn != NULL && nsn->states == 0 && nsn->expire == 0) 3572 pf_remove_src_node(nsn); 3573 3574 return (PF_DROP); 3575} 3576 3577static int 3578pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif, 3579 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_rule **am, 3580 struct pf_ruleset **rsm) 3581{ 3582 struct pf_rule *r, *a = NULL; 3583 struct pf_ruleset *ruleset = NULL; 3584 sa_family_t af = pd->af; 3585 u_short reason; 3586 int tag = -1; 3587 int asd = 0; 3588 int match = 0; 3589 struct pf_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE]; 3590 3591 PF_RULES_RASSERT(); 3592 3593 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); 3594 while (r != NULL) { 3595 r->evaluations++; 3596 if (pfi_kif_match(r->kif, kif) == r->ifnot) 3597 r = r->skip[PF_SKIP_IFP].ptr; 3598 else if (r->direction && r->direction != direction) 3599 r = r->skip[PF_SKIP_DIR].ptr; 3600 else if (r->af && r->af != af) 3601 r = r->skip[PF_SKIP_AF].ptr; 3602 else if (r->proto && r->proto != pd->proto) 3603 r = r->skip[PF_SKIP_PROTO].ptr; 3604 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 3605 r->src.neg, kif, M_GETFIB(m))) 3606 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 3607 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 3608 r->dst.neg, NULL, M_GETFIB(m))) 3609 r = r->skip[PF_SKIP_DST_ADDR].ptr; 3610 else if (r->tos && !(r->tos == pd->tos)) 3611 r = TAILQ_NEXT(r, entries); 3612 else if (r->os_fingerprint != PF_OSFP_ANY) 3613 r = TAILQ_NEXT(r, entries); 3614 else if (pd->proto == IPPROTO_UDP && 3615 (r->src.port_op || r->dst.port_op)) 3616 r = TAILQ_NEXT(r, entries); 3617 else if (pd->proto == IPPROTO_TCP && 3618 (r->src.port_op || r->dst.port_op || r->flagset)) 3619 r = TAILQ_NEXT(r, entries); 3620 else if ((pd->proto == IPPROTO_ICMP || 3621 pd->proto == IPPROTO_ICMPV6) && 3622 (r->type || r->code)) 3623 r = TAILQ_NEXT(r, entries); 3624 else if (r->prob && r->prob <= 3625 (arc4random() % (UINT_MAX - 1) + 1)) 3626 r = TAILQ_NEXT(r, entries); 3627 else if (r->match_tag && !pf_match_tag(m, r, &tag, 3628 pd->pf_mtag ? pd->pf_mtag->tag : 0)) 3629 r = TAILQ_NEXT(r, entries); 3630 else { 3631 if (r->anchor == NULL) { 3632 match = 1; 3633 *rm = r; 3634 *am = a; 3635 *rsm = ruleset; 3636 if ((*rm)->quick) 3637 break; 3638 r = TAILQ_NEXT(r, entries); 3639 } else 3640 pf_step_into_anchor(anchor_stack, &asd, 3641 &ruleset, PF_RULESET_FILTER, &r, &a, 3642 &match); 3643 } 3644 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd, 3645 &ruleset, PF_RULESET_FILTER, &r, &a, &match)) 3646 break; 3647 } 3648 r = *rm; 3649 a = *am; 3650 ruleset = *rsm; 3651 3652 REASON_SET(&reason, PFRES_MATCH); 3653 3654 if (r->log) 3655 PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd, 3656 1); 3657 3658 if (r->action != PF_PASS) 3659 return (PF_DROP); 3660 3661 if (tag > 0 && pf_tag_packet(m, pd, tag)) { 3662 REASON_SET(&reason, PFRES_MEMORY); 3663 return (PF_DROP); 3664 } 3665 3666 return (PF_PASS); 3667} 3668 3669static int 3670pf_tcp_track_full(struct pf_state_peer *src, struct pf_state_peer *dst, 3671 struct pf_state **state, struct pfi_kif *kif, struct mbuf *m, int off, 3672 struct pf_pdesc *pd, u_short *reason, int *copyback) 3673{ 3674 struct tcphdr *th = pd->hdr.tcp; 3675 u_int16_t win = ntohs(th->th_win); 3676 u_int32_t ack, end, seq, orig_seq; 3677 u_int8_t sws, dws; 3678 int ackskew; 3679 3680 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) { 3681 sws = src->wscale & PF_WSCALE_MASK; 3682 dws = dst->wscale & PF_WSCALE_MASK; 3683 } else 3684 sws = dws = 0; 3685 3686 /* 3687 * Sequence tracking algorithm from Guido van Rooij's paper: 3688 * http://www.madison-gurkha.com/publications/tcp_filtering/ 3689 * tcp_filtering.ps 3690 */ 3691 3692 orig_seq = seq = ntohl(th->th_seq); 3693 if (src->seqlo == 0) { 3694 /* First packet from this end. Set its state */ 3695 3696 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) && 3697 src->scrub == NULL) { 3698 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) { 3699 REASON_SET(reason, PFRES_MEMORY); 3700 return (PF_DROP); 3701 } 3702 } 3703 3704 /* Deferred generation of sequence number modulator */ 3705 if (dst->seqdiff && !src->seqdiff) { 3706 /* use random iss for the TCP server */ 3707 while ((src->seqdiff = arc4random() - seq) == 0) 3708 ; 3709 ack = ntohl(th->th_ack) - dst->seqdiff; 3710 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 3711 src->seqdiff), 0); 3712 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 3713 *copyback = 1; 3714 } else { 3715 ack = ntohl(th->th_ack); 3716 } 3717 3718 end = seq + pd->p_len; 3719 if (th->th_flags & TH_SYN) { 3720 end++; 3721 if (dst->wscale & PF_WSCALE_FLAG) { 3722 src->wscale = pf_get_wscale(m, off, th->th_off, 3723 pd->af); 3724 if (src->wscale & PF_WSCALE_FLAG) { 3725 /* Remove scale factor from initial 3726 * window */ 3727 sws = src->wscale & PF_WSCALE_MASK; 3728 win = ((u_int32_t)win + (1 << sws) - 1) 3729 >> sws; 3730 dws = dst->wscale & PF_WSCALE_MASK; 3731 } else { 3732 /* fixup other window */ 3733 dst->max_win <<= dst->wscale & 3734 PF_WSCALE_MASK; 3735 /* in case of a retrans SYN|ACK */ 3736 dst->wscale = 0; 3737 } 3738 } 3739 } 3740 if (th->th_flags & TH_FIN) 3741 end++; 3742 3743 src->seqlo = seq; 3744 if (src->state < TCPS_SYN_SENT) 3745 src->state = TCPS_SYN_SENT; 3746 3747 /* 3748 * May need to slide the window (seqhi may have been set by 3749 * the crappy stack check or if we picked up the connection 3750 * after establishment) 3751 */ 3752 if (src->seqhi == 1 || 3753 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi)) 3754 src->seqhi = end + MAX(1, dst->max_win << dws); 3755 if (win > src->max_win) 3756 src->max_win = win; 3757 3758 } else { 3759 ack = ntohl(th->th_ack) - dst->seqdiff; 3760 if (src->seqdiff) { 3761 /* Modulate sequence numbers */ 3762 pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + 3763 src->seqdiff), 0); 3764 pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); 3765 *copyback = 1; 3766 } 3767 end = seq + pd->p_len; 3768 if (th->th_flags & TH_SYN) 3769 end++; 3770 if (th->th_flags & TH_FIN) 3771 end++; 3772 } 3773 3774 if ((th->th_flags & TH_ACK) == 0) { 3775 /* Let it pass through the ack skew check */ 3776 ack = dst->seqlo; 3777 } else if ((ack == 0 && 3778 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) || 3779 /* broken tcp stacks do not set ack */ 3780 (dst->state < TCPS_SYN_SENT)) { 3781 /* 3782 * Many stacks (ours included) will set the ACK number in an 3783 * FIN|ACK if the SYN times out -- no sequence to ACK. 3784 */ 3785 ack = dst->seqlo; 3786 } 3787 3788 if (seq == end) { 3789 /* Ease sequencing restrictions on no data packets */ 3790 seq = src->seqlo; 3791 end = seq; 3792 } 3793 3794 ackskew = dst->seqlo - ack; 3795 3796 3797 /* 3798 * Need to demodulate the sequence numbers in any TCP SACK options 3799 * (Selective ACK). We could optionally validate the SACK values 3800 * against the current ACK window, either forwards or backwards, but 3801 * I'm not confident that SACK has been implemented properly 3802 * everywhere. It wouldn't surprise me if several stacks accidently 3803 * SACK too far backwards of previously ACKed data. There really aren't 3804 * any security implications of bad SACKing unless the target stack 3805 * doesn't validate the option length correctly. Someone trying to 3806 * spoof into a TCP connection won't bother blindly sending SACK 3807 * options anyway. 3808 */ 3809 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) { 3810 if (pf_modulate_sack(m, off, pd, th, dst)) 3811 *copyback = 1; 3812 } 3813 3814 3815#define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */ 3816 if (SEQ_GEQ(src->seqhi, end) && 3817 /* Last octet inside other's window space */ 3818 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) && 3819 /* Retrans: not more than one window back */ 3820 (ackskew >= -MAXACKWINDOW) && 3821 /* Acking not more than one reassembled fragment backwards */ 3822 (ackskew <= (MAXACKWINDOW << sws)) && 3823 /* Acking not more than one window forward */ 3824 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo || 3825 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) || 3826 (pd->flags & PFDESC_IP_REAS) == 0)) { 3827 /* Require an exact/+1 sequence match on resets when possible */ 3828 3829 if (dst->scrub || src->scrub) { 3830 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 3831 *state, src, dst, copyback)) 3832 return (PF_DROP); 3833 } 3834 3835 /* update max window */ 3836 if (src->max_win < win) 3837 src->max_win = win; 3838 /* synchronize sequencing */ 3839 if (SEQ_GT(end, src->seqlo)) 3840 src->seqlo = end; 3841 /* slide the window of what the other end can send */ 3842 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 3843 dst->seqhi = ack + MAX((win << sws), 1); 3844 3845 3846 /* update states */ 3847 if (th->th_flags & TH_SYN) 3848 if (src->state < TCPS_SYN_SENT) 3849 src->state = TCPS_SYN_SENT; 3850 if (th->th_flags & TH_FIN) 3851 if (src->state < TCPS_CLOSING) 3852 src->state = TCPS_CLOSING; 3853 if (th->th_flags & TH_ACK) { 3854 if (dst->state == TCPS_SYN_SENT) { 3855 dst->state = TCPS_ESTABLISHED; 3856 if (src->state == TCPS_ESTABLISHED && 3857 (*state)->src_node != NULL && 3858 pf_src_connlimit(state)) { 3859 REASON_SET(reason, PFRES_SRCLIMIT); 3860 return (PF_DROP); 3861 } 3862 } else if (dst->state == TCPS_CLOSING) 3863 dst->state = TCPS_FIN_WAIT_2; 3864 } 3865 if (th->th_flags & TH_RST) 3866 src->state = dst->state = TCPS_TIME_WAIT; 3867 3868 /* update expire time */ 3869 (*state)->expire = time_uptime; 3870 if (src->state >= TCPS_FIN_WAIT_2 && 3871 dst->state >= TCPS_FIN_WAIT_2) 3872 (*state)->timeout = PFTM_TCP_CLOSED; 3873 else if (src->state >= TCPS_CLOSING && 3874 dst->state >= TCPS_CLOSING) 3875 (*state)->timeout = PFTM_TCP_FIN_WAIT; 3876 else if (src->state < TCPS_ESTABLISHED || 3877 dst->state < TCPS_ESTABLISHED) 3878 (*state)->timeout = PFTM_TCP_OPENING; 3879 else if (src->state >= TCPS_CLOSING || 3880 dst->state >= TCPS_CLOSING) 3881 (*state)->timeout = PFTM_TCP_CLOSING; 3882 else 3883 (*state)->timeout = PFTM_TCP_ESTABLISHED; 3884 3885 /* Fall through to PASS packet */ 3886 3887 } else if ((dst->state < TCPS_SYN_SENT || 3888 dst->state >= TCPS_FIN_WAIT_2 || 3889 src->state >= TCPS_FIN_WAIT_2) && 3890 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) && 3891 /* Within a window forward of the originating packet */ 3892 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) { 3893 /* Within a window backward of the originating packet */ 3894 3895 /* 3896 * This currently handles three situations: 3897 * 1) Stupid stacks will shotgun SYNs before their peer 3898 * replies. 3899 * 2) When PF catches an already established stream (the 3900 * firewall rebooted, the state table was flushed, routes 3901 * changed...) 3902 * 3) Packets get funky immediately after the connection 3903 * closes (this should catch Solaris spurious ACK|FINs 3904 * that web servers like to spew after a close) 3905 * 3906 * This must be a little more careful than the above code 3907 * since packet floods will also be caught here. We don't 3908 * update the TTL here to mitigate the damage of a packet 3909 * flood and so the same code can handle awkward establishment 3910 * and a loosened connection close. 3911 * In the establishment case, a correct peer response will 3912 * validate the connection, go through the normal state code 3913 * and keep updating the state TTL. 3914 */ 3915 3916 if (V_pf_status.debug >= PF_DEBUG_MISC) { 3917 printf("pf: loose state match: "); 3918 pf_print_state(*state); 3919 pf_print_flags(th->th_flags); 3920 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 3921 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack, 3922 pd->p_len, ackskew, (unsigned long long)(*state)->packets[0], 3923 (unsigned long long)(*state)->packets[1], 3924 pd->dir == PF_IN ? "in" : "out", 3925 pd->dir == (*state)->direction ? "fwd" : "rev"); 3926 } 3927 3928 if (dst->scrub || src->scrub) { 3929 if (pf_normalize_tcp_stateful(m, off, pd, reason, th, 3930 *state, src, dst, copyback)) 3931 return (PF_DROP); 3932 } 3933 3934 /* update max window */ 3935 if (src->max_win < win) 3936 src->max_win = win; 3937 /* synchronize sequencing */ 3938 if (SEQ_GT(end, src->seqlo)) 3939 src->seqlo = end; 3940 /* slide the window of what the other end can send */ 3941 if (SEQ_GEQ(ack + (win << sws), dst->seqhi)) 3942 dst->seqhi = ack + MAX((win << sws), 1); 3943 3944 /* 3945 * Cannot set dst->seqhi here since this could be a shotgunned 3946 * SYN and not an already established connection. 3947 */ 3948 3949 if (th->th_flags & TH_FIN) 3950 if (src->state < TCPS_CLOSING) 3951 src->state = TCPS_CLOSING; 3952 if (th->th_flags & TH_RST) 3953 src->state = dst->state = TCPS_TIME_WAIT; 3954 3955 /* Fall through to PASS packet */ 3956 3957 } else { 3958 if ((*state)->dst.state == TCPS_SYN_SENT && 3959 (*state)->src.state == TCPS_SYN_SENT) { 3960 /* Send RST for state mismatches during handshake */ 3961 if (!(th->th_flags & TH_RST)) 3962 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, 3963 pd->dst, pd->src, th->th_dport, 3964 th->th_sport, ntohl(th->th_ack), 0, 3965 TH_RST, 0, 0, 3966 (*state)->rule.ptr->return_ttl, 1, 0, 3967 kif->pfik_ifp); 3968 src->seqlo = 0; 3969 src->seqhi = 1; 3970 src->max_win = 1; 3971 } else if (V_pf_status.debug >= PF_DEBUG_MISC) { 3972 printf("pf: BAD state: "); 3973 pf_print_state(*state); 3974 pf_print_flags(th->th_flags); 3975 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d " 3976 "pkts=%llu:%llu dir=%s,%s\n", 3977 seq, orig_seq, ack, pd->p_len, ackskew, 3978 (unsigned long long)(*state)->packets[0], 3979 (unsigned long long)(*state)->packets[1], 3980 pd->dir == PF_IN ? "in" : "out", 3981 pd->dir == (*state)->direction ? "fwd" : "rev"); 3982 printf("pf: State failure on: %c %c %c %c | %c %c\n", 3983 SEQ_GEQ(src->seqhi, end) ? ' ' : '1', 3984 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ? 3985 ' ': '2', 3986 (ackskew >= -MAXACKWINDOW) ? ' ' : '3', 3987 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4', 3988 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5', 3989 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6'); 3990 } 3991 REASON_SET(reason, PFRES_BADSTATE); 3992 return (PF_DROP); 3993 } 3994 3995 return (PF_PASS); 3996} 3997 3998static int 3999pf_tcp_track_sloppy(struct pf_state_peer *src, struct pf_state_peer *dst, 4000 struct pf_state **state, struct pf_pdesc *pd, u_short *reason) 4001{ 4002 struct tcphdr *th = pd->hdr.tcp; 4003 4004 if (th->th_flags & TH_SYN) 4005 if (src->state < TCPS_SYN_SENT) 4006 src->state = TCPS_SYN_SENT; 4007 if (th->th_flags & TH_FIN) 4008 if (src->state < TCPS_CLOSING) 4009 src->state = TCPS_CLOSING; 4010 if (th->th_flags & TH_ACK) { 4011 if (dst->state == TCPS_SYN_SENT) { 4012 dst->state = TCPS_ESTABLISHED; 4013 if (src->state == TCPS_ESTABLISHED && 4014 (*state)->src_node != NULL && 4015 pf_src_connlimit(state)) { 4016 REASON_SET(reason, PFRES_SRCLIMIT); 4017 return (PF_DROP); 4018 } 4019 } else if (dst->state == TCPS_CLOSING) { 4020 dst->state = TCPS_FIN_WAIT_2; 4021 } else if (src->state == TCPS_SYN_SENT && 4022 dst->state < TCPS_SYN_SENT) { 4023 /* 4024 * Handle a special sloppy case where we only see one 4025 * half of the connection. If there is a ACK after 4026 * the initial SYN without ever seeing a packet from 4027 * the destination, set the connection to established. 4028 */ 4029 dst->state = src->state = TCPS_ESTABLISHED; 4030 if ((*state)->src_node != NULL && 4031 pf_src_connlimit(state)) { 4032 REASON_SET(reason, PFRES_SRCLIMIT); 4033 return (PF_DROP); 4034 } 4035 } else if (src->state == TCPS_CLOSING && 4036 dst->state == TCPS_ESTABLISHED && 4037 dst->seqlo == 0) { 4038 /* 4039 * Handle the closing of half connections where we 4040 * don't see the full bidirectional FIN/ACK+ACK 4041 * handshake. 4042 */ 4043 dst->state = TCPS_CLOSING; 4044 } 4045 } 4046 if (th->th_flags & TH_RST) 4047 src->state = dst->state = TCPS_TIME_WAIT; 4048 4049 /* update expire time */ 4050 (*state)->expire = time_uptime; 4051 if (src->state >= TCPS_FIN_WAIT_2 && 4052 dst->state >= TCPS_FIN_WAIT_2) 4053 (*state)->timeout = PFTM_TCP_CLOSED; 4054 else if (src->state >= TCPS_CLOSING && 4055 dst->state >= TCPS_CLOSING) 4056 (*state)->timeout = PFTM_TCP_FIN_WAIT; 4057 else if (src->state < TCPS_ESTABLISHED || 4058 dst->state < TCPS_ESTABLISHED) 4059 (*state)->timeout = PFTM_TCP_OPENING; 4060 else if (src->state >= TCPS_CLOSING || 4061 dst->state >= TCPS_CLOSING) 4062 (*state)->timeout = PFTM_TCP_CLOSING; 4063 else 4064 (*state)->timeout = PFTM_TCP_ESTABLISHED; 4065 4066 return (PF_PASS); 4067} 4068 4069static int 4070pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, 4071 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, 4072 u_short *reason) 4073{ 4074 struct pf_state_key_cmp key; 4075 struct tcphdr *th = pd->hdr.tcp; 4076 int copyback = 0; 4077 struct pf_state_peer *src, *dst; 4078 struct pf_state_key *sk; 4079 4080 bzero(&key, sizeof(key)); 4081 key.af = pd->af; 4082 key.proto = IPPROTO_TCP; 4083 if (direction == PF_IN) { /* wire side, straight */ 4084 PF_ACPY(&key.addr[0], pd->src, key.af); 4085 PF_ACPY(&key.addr[1], pd->dst, key.af); 4086 key.port[0] = th->th_sport; 4087 key.port[1] = th->th_dport; 4088 } else { /* stack side, reverse */ 4089 PF_ACPY(&key.addr[1], pd->src, key.af); 4090 PF_ACPY(&key.addr[0], pd->dst, key.af); 4091 key.port[1] = th->th_sport; 4092 key.port[0] = th->th_dport; 4093 } 4094 4095 STATE_LOOKUP(kif, &key, direction, *state, pd); 4096 4097 if (direction == (*state)->direction) { 4098 src = &(*state)->src; 4099 dst = &(*state)->dst; 4100 } else { 4101 src = &(*state)->dst; 4102 dst = &(*state)->src; 4103 } 4104 4105 sk = (*state)->key[pd->didx]; 4106 4107 if ((*state)->src.state == PF_TCPS_PROXY_SRC) { 4108 if (direction != (*state)->direction) { 4109 REASON_SET(reason, PFRES_SYNPROXY); 4110 return (PF_SYNPROXY_DROP); 4111 } 4112 if (th->th_flags & TH_SYN) { 4113 if (ntohl(th->th_seq) != (*state)->src.seqlo) { 4114 REASON_SET(reason, PFRES_SYNPROXY); 4115 return (PF_DROP); 4116 } 4117 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst, 4118 pd->src, th->th_dport, th->th_sport, 4119 (*state)->src.seqhi, ntohl(th->th_seq) + 1, 4120 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0, NULL); 4121 REASON_SET(reason, PFRES_SYNPROXY); 4122 return (PF_SYNPROXY_DROP); 4123 } else if (!(th->th_flags & TH_ACK) || 4124 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4125 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4126 REASON_SET(reason, PFRES_SYNPROXY); 4127 return (PF_DROP); 4128 } else if ((*state)->src_node != NULL && 4129 pf_src_connlimit(state)) { 4130 REASON_SET(reason, PFRES_SRCLIMIT); 4131 return (PF_DROP); 4132 } else 4133 (*state)->src.state = PF_TCPS_PROXY_DST; 4134 } 4135 if ((*state)->src.state == PF_TCPS_PROXY_DST) { 4136 if (direction == (*state)->direction) { 4137 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) || 4138 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) || 4139 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) { 4140 REASON_SET(reason, PFRES_SYNPROXY); 4141 return (PF_DROP); 4142 } 4143 (*state)->src.max_win = MAX(ntohs(th->th_win), 1); 4144 if ((*state)->dst.seqhi == 1) 4145 (*state)->dst.seqhi = htonl(arc4random()); 4146 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, 4147 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4148 sk->port[pd->sidx], sk->port[pd->didx], 4149 (*state)->dst.seqhi, 0, TH_SYN, 0, 4150 (*state)->src.mss, 0, 0, (*state)->tag, NULL); 4151 REASON_SET(reason, PFRES_SYNPROXY); 4152 return (PF_SYNPROXY_DROP); 4153 } else if (((th->th_flags & (TH_SYN|TH_ACK)) != 4154 (TH_SYN|TH_ACK)) || 4155 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) { 4156 REASON_SET(reason, PFRES_SYNPROXY); 4157 return (PF_DROP); 4158 } else { 4159 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1); 4160 (*state)->dst.seqlo = ntohl(th->th_seq); 4161 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, pd->dst, 4162 pd->src, th->th_dport, th->th_sport, 4163 ntohl(th->th_ack), ntohl(th->th_seq) + 1, 4164 TH_ACK, (*state)->src.max_win, 0, 0, 0, 4165 (*state)->tag, NULL); 4166 pf_send_tcp(NULL, (*state)->rule.ptr, pd->af, 4167 &sk->addr[pd->sidx], &sk->addr[pd->didx], 4168 sk->port[pd->sidx], sk->port[pd->didx], 4169 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1, 4170 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0, NULL); 4171 (*state)->src.seqdiff = (*state)->dst.seqhi - 4172 (*state)->src.seqlo; 4173 (*state)->dst.seqdiff = (*state)->src.seqhi - 4174 (*state)->dst.seqlo; 4175 (*state)->src.seqhi = (*state)->src.seqlo + 4176 (*state)->dst.max_win; 4177 (*state)->dst.seqhi = (*state)->dst.seqlo + 4178 (*state)->src.max_win; 4179 (*state)->src.wscale = (*state)->dst.wscale = 0; 4180 (*state)->src.state = (*state)->dst.state = 4181 TCPS_ESTABLISHED; 4182 REASON_SET(reason, PFRES_SYNPROXY); 4183 return (PF_SYNPROXY_DROP); 4184 } 4185 } 4186 4187 if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) && 4188 dst->state >= TCPS_FIN_WAIT_2 && 4189 src->state >= TCPS_FIN_WAIT_2) { 4190 if (V_pf_status.debug >= PF_DEBUG_MISC) { 4191 printf("pf: state reuse "); 4192 pf_print_state(*state); 4193 pf_print_flags(th->th_flags); 4194 printf("\n"); 4195 } 4196 /* XXX make sure it's the same direction ?? */ 4197 (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; 4198 pf_unlink_state(*state, PF_ENTER_LOCKED); 4199 *state = NULL; 4200 return (PF_DROP); 4201 } 4202 4203 if ((*state)->state_flags & PFSTATE_SLOPPY) { 4204 if (pf_tcp_track_sloppy(src, dst, state, pd, reason) == PF_DROP) 4205 return (PF_DROP); 4206 } else { 4207 if (pf_tcp_track_full(src, dst, state, kif, m, off, pd, reason, 4208 ©back) == PF_DROP) 4209 return (PF_DROP); 4210 } 4211 4212 /* translate source/destination address, if necessary */ 4213 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4214 struct pf_state_key *nk = (*state)->key[pd->didx]; 4215 4216 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 4217 nk->port[pd->sidx] != th->th_sport) 4218 pf_change_ap(pd->src, &th->th_sport, pd->ip_sum, 4219 &th->th_sum, &nk->addr[pd->sidx], 4220 nk->port[pd->sidx], 0, pd->af); 4221 4222 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 4223 nk->port[pd->didx] != th->th_dport) 4224 pf_change_ap(pd->dst, &th->th_dport, pd->ip_sum, 4225 &th->th_sum, &nk->addr[pd->didx], 4226 nk->port[pd->didx], 0, pd->af); 4227 copyback = 1; 4228 } 4229 4230 /* Copyback sequence modulation or stateful scrub changes if needed */ 4231 if (copyback) 4232 m_copyback(m, off, sizeof(*th), (caddr_t)th); 4233 4234 return (PF_PASS); 4235} 4236 4237static int 4238pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif, 4239 struct mbuf *m, int off, void *h, struct pf_pdesc *pd) 4240{ 4241 struct pf_state_peer *src, *dst; 4242 struct pf_state_key_cmp key; 4243 struct udphdr *uh = pd->hdr.udp; 4244 4245 bzero(&key, sizeof(key)); 4246 key.af = pd->af; 4247 key.proto = IPPROTO_UDP; 4248 if (direction == PF_IN) { /* wire side, straight */ 4249 PF_ACPY(&key.addr[0], pd->src, key.af); 4250 PF_ACPY(&key.addr[1], pd->dst, key.af); 4251 key.port[0] = uh->uh_sport; 4252 key.port[1] = uh->uh_dport; 4253 } else { /* stack side, reverse */ 4254 PF_ACPY(&key.addr[1], pd->src, key.af); 4255 PF_ACPY(&key.addr[0], pd->dst, key.af); 4256 key.port[1] = uh->uh_sport; 4257 key.port[0] = uh->uh_dport; 4258 } 4259 4260 STATE_LOOKUP(kif, &key, direction, *state, pd); 4261 4262 if (direction == (*state)->direction) { 4263 src = &(*state)->src; 4264 dst = &(*state)->dst; 4265 } else { 4266 src = &(*state)->dst; 4267 dst = &(*state)->src; 4268 } 4269 4270 /* update states */ 4271 if (src->state < PFUDPS_SINGLE) 4272 src->state = PFUDPS_SINGLE; 4273 if (dst->state == PFUDPS_SINGLE) 4274 dst->state = PFUDPS_MULTIPLE; 4275 4276 /* update expire time */ 4277 (*state)->expire = time_uptime; 4278 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE) 4279 (*state)->timeout = PFTM_UDP_MULTIPLE; 4280 else 4281 (*state)->timeout = PFTM_UDP_SINGLE; 4282 4283 /* translate source/destination address, if necessary */ 4284 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4285 struct pf_state_key *nk = (*state)->key[pd->didx]; 4286 4287 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) || 4288 nk->port[pd->sidx] != uh->uh_sport) 4289 pf_change_ap(pd->src, &uh->uh_sport, pd->ip_sum, 4290 &uh->uh_sum, &nk->addr[pd->sidx], 4291 nk->port[pd->sidx], 1, pd->af); 4292 4293 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) || 4294 nk->port[pd->didx] != uh->uh_dport) 4295 pf_change_ap(pd->dst, &uh->uh_dport, pd->ip_sum, 4296 &uh->uh_sum, &nk->addr[pd->didx], 4297 nk->port[pd->didx], 1, pd->af); 4298 m_copyback(m, off, sizeof(*uh), (caddr_t)uh); 4299 } 4300 4301 return (PF_PASS); 4302} 4303 4304static int 4305pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, 4306 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason) 4307{ 4308 struct pf_addr *saddr = pd->src, *daddr = pd->dst; 4309 u_int16_t icmpid = 0, *icmpsum; 4310 u_int8_t icmptype; 4311 int state_icmp = 0; 4312 struct pf_state_key_cmp key; 4313 4314 bzero(&key, sizeof(key)); 4315 switch (pd->proto) { 4316#ifdef INET 4317 case IPPROTO_ICMP: 4318 icmptype = pd->hdr.icmp->icmp_type; 4319 icmpid = pd->hdr.icmp->icmp_id; 4320 icmpsum = &pd->hdr.icmp->icmp_cksum; 4321 4322 if (icmptype == ICMP_UNREACH || 4323 icmptype == ICMP_SOURCEQUENCH || 4324 icmptype == ICMP_REDIRECT || 4325 icmptype == ICMP_TIMXCEED || 4326 icmptype == ICMP_PARAMPROB) 4327 state_icmp++; 4328 break; 4329#endif /* INET */ 4330#ifdef INET6 4331 case IPPROTO_ICMPV6: 4332 icmptype = pd->hdr.icmp6->icmp6_type; 4333 icmpid = pd->hdr.icmp6->icmp6_id; 4334 icmpsum = &pd->hdr.icmp6->icmp6_cksum; 4335 4336 if (icmptype == ICMP6_DST_UNREACH || 4337 icmptype == ICMP6_PACKET_TOO_BIG || 4338 icmptype == ICMP6_TIME_EXCEEDED || 4339 icmptype == ICMP6_PARAM_PROB) 4340 state_icmp++; 4341 break; 4342#endif /* INET6 */ 4343 } 4344 4345 if (!state_icmp) { 4346 4347 /* 4348 * ICMP query/reply message not related to a TCP/UDP packet. 4349 * Search for an ICMP state. 4350 */ 4351 key.af = pd->af; 4352 key.proto = pd->proto; 4353 key.port[0] = key.port[1] = icmpid; 4354 if (direction == PF_IN) { /* wire side, straight */ 4355 PF_ACPY(&key.addr[0], pd->src, key.af); 4356 PF_ACPY(&key.addr[1], pd->dst, key.af); 4357 } else { /* stack side, reverse */ 4358 PF_ACPY(&key.addr[1], pd->src, key.af); 4359 PF_ACPY(&key.addr[0], pd->dst, key.af); 4360 } 4361 4362 STATE_LOOKUP(kif, &key, direction, *state, pd); 4363 4364 (*state)->expire = time_uptime; 4365 (*state)->timeout = PFTM_ICMP_ERROR_REPLY; 4366 4367 /* translate source/destination address, if necessary */ 4368 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4369 struct pf_state_key *nk = (*state)->key[pd->didx]; 4370 4371 switch (pd->af) { 4372#ifdef INET 4373 case AF_INET: 4374 if (PF_ANEQ(pd->src, 4375 &nk->addr[pd->sidx], AF_INET)) 4376 pf_change_a(&saddr->v4.s_addr, 4377 pd->ip_sum, 4378 nk->addr[pd->sidx].v4.s_addr, 0); 4379 4380 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], 4381 AF_INET)) 4382 pf_change_a(&daddr->v4.s_addr, 4383 pd->ip_sum, 4384 nk->addr[pd->didx].v4.s_addr, 0); 4385 4386 if (nk->port[0] != 4387 pd->hdr.icmp->icmp_id) { 4388 pd->hdr.icmp->icmp_cksum = 4389 pf_cksum_fixup( 4390 pd->hdr.icmp->icmp_cksum, icmpid, 4391 nk->port[pd->sidx], 0); 4392 pd->hdr.icmp->icmp_id = 4393 nk->port[pd->sidx]; 4394 } 4395 4396 m_copyback(m, off, ICMP_MINLEN, 4397 (caddr_t )pd->hdr.icmp); 4398 break; 4399#endif /* INET */ 4400#ifdef INET6 4401 case AF_INET6: 4402 if (PF_ANEQ(pd->src, 4403 &nk->addr[pd->sidx], AF_INET6)) 4404 pf_change_a6(saddr, 4405 &pd->hdr.icmp6->icmp6_cksum, 4406 &nk->addr[pd->sidx], 0); 4407 4408 if (PF_ANEQ(pd->dst, 4409 &nk->addr[pd->didx], AF_INET6)) 4410 pf_change_a6(daddr, 4411 &pd->hdr.icmp6->icmp6_cksum, 4412 &nk->addr[pd->didx], 0); 4413 4414 m_copyback(m, off, sizeof(struct icmp6_hdr), 4415 (caddr_t )pd->hdr.icmp6); 4416 break; 4417#endif /* INET6 */ 4418 } 4419 } 4420 return (PF_PASS); 4421 4422 } else { 4423 /* 4424 * ICMP error message in response to a TCP/UDP packet. 4425 * Extract the inner TCP/UDP header and search for that state. 4426 */ 4427 4428 struct pf_pdesc pd2; 4429 bzero(&pd2, sizeof pd2); 4430#ifdef INET 4431 struct ip h2; 4432#endif /* INET */ 4433#ifdef INET6 4434 struct ip6_hdr h2_6; 4435 int terminal = 0; 4436#endif /* INET6 */ 4437 int ipoff2 = 0; 4438 int off2 = 0; 4439 4440 pd2.af = pd->af; 4441 /* Payload packet is from the opposite direction. */ 4442 pd2.sidx = (direction == PF_IN) ? 1 : 0; 4443 pd2.didx = (direction == PF_IN) ? 0 : 1; 4444 switch (pd->af) { 4445#ifdef INET 4446 case AF_INET: 4447 /* offset of h2 in mbuf chain */ 4448 ipoff2 = off + ICMP_MINLEN; 4449 4450 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2), 4451 NULL, reason, pd2.af)) { 4452 DPFPRINTF(PF_DEBUG_MISC, 4453 ("pf: ICMP error message too short " 4454 "(ip)\n")); 4455 return (PF_DROP); 4456 } 4457 /* 4458 * ICMP error messages don't refer to non-first 4459 * fragments 4460 */ 4461 if (h2.ip_off & htons(IP_OFFMASK)) { 4462 REASON_SET(reason, PFRES_FRAG); 4463 return (PF_DROP); 4464 } 4465 4466 /* offset of protocol header that follows h2 */ 4467 off2 = ipoff2 + (h2.ip_hl << 2); 4468 4469 pd2.proto = h2.ip_p; 4470 pd2.src = (struct pf_addr *)&h2.ip_src; 4471 pd2.dst = (struct pf_addr *)&h2.ip_dst; 4472 pd2.ip_sum = &h2.ip_sum; 4473 break; 4474#endif /* INET */ 4475#ifdef INET6 4476 case AF_INET6: 4477 ipoff2 = off + sizeof(struct icmp6_hdr); 4478 4479 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6), 4480 NULL, reason, pd2.af)) { 4481 DPFPRINTF(PF_DEBUG_MISC, 4482 ("pf: ICMP error message too short " 4483 "(ip6)\n")); 4484 return (PF_DROP); 4485 } 4486 pd2.proto = h2_6.ip6_nxt; 4487 pd2.src = (struct pf_addr *)&h2_6.ip6_src; 4488 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst; 4489 pd2.ip_sum = NULL; 4490 off2 = ipoff2 + sizeof(h2_6); 4491 do { 4492 switch (pd2.proto) { 4493 case IPPROTO_FRAGMENT: 4494 /* 4495 * ICMPv6 error messages for 4496 * non-first fragments 4497 */ 4498 REASON_SET(reason, PFRES_FRAG); 4499 return (PF_DROP); 4500 case IPPROTO_AH: 4501 case IPPROTO_HOPOPTS: 4502 case IPPROTO_ROUTING: 4503 case IPPROTO_DSTOPTS: { 4504 /* get next header and header length */ 4505 struct ip6_ext opt6; 4506 4507 if (!pf_pull_hdr(m, off2, &opt6, 4508 sizeof(opt6), NULL, reason, 4509 pd2.af)) { 4510 DPFPRINTF(PF_DEBUG_MISC, 4511 ("pf: ICMPv6 short opt\n")); 4512 return (PF_DROP); 4513 } 4514 if (pd2.proto == IPPROTO_AH) 4515 off2 += (opt6.ip6e_len + 2) * 4; 4516 else 4517 off2 += (opt6.ip6e_len + 1) * 8; 4518 pd2.proto = opt6.ip6e_nxt; 4519 /* goto the next header */ 4520 break; 4521 } 4522 default: 4523 terminal++; 4524 break; 4525 } 4526 } while (!terminal); 4527 break; 4528#endif /* INET6 */ 4529 } 4530 4531 switch (pd2.proto) { 4532 case IPPROTO_TCP: { 4533 struct tcphdr th; 4534 u_int32_t seq; 4535 struct pf_state_peer *src, *dst; 4536 u_int8_t dws; 4537 int copyback = 0; 4538 4539 /* 4540 * Only the first 8 bytes of the TCP header can be 4541 * expected. Don't access any TCP header fields after 4542 * th_seq, an ackskew test is not possible. 4543 */ 4544 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason, 4545 pd2.af)) { 4546 DPFPRINTF(PF_DEBUG_MISC, 4547 ("pf: ICMP error message too short " 4548 "(tcp)\n")); 4549 return (PF_DROP); 4550 } 4551 4552 key.af = pd2.af; 4553 key.proto = IPPROTO_TCP; 4554 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4555 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4556 key.port[pd2.sidx] = th.th_sport; 4557 key.port[pd2.didx] = th.th_dport; 4558 4559 STATE_LOOKUP(kif, &key, direction, *state, pd); 4560 4561 if (direction == (*state)->direction) { 4562 src = &(*state)->dst; 4563 dst = &(*state)->src; 4564 } else { 4565 src = &(*state)->src; 4566 dst = &(*state)->dst; 4567 } 4568 4569 if (src->wscale && dst->wscale) 4570 dws = dst->wscale & PF_WSCALE_MASK; 4571 else 4572 dws = 0; 4573 4574 /* Demodulate sequence number */ 4575 seq = ntohl(th.th_seq) - src->seqdiff; 4576 if (src->seqdiff) { 4577 pf_change_a(&th.th_seq, icmpsum, 4578 htonl(seq), 0); 4579 copyback = 1; 4580 } 4581 4582 if (!((*state)->state_flags & PFSTATE_SLOPPY) && 4583 (!SEQ_GEQ(src->seqhi, seq) || 4584 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) { 4585 if (V_pf_status.debug >= PF_DEBUG_MISC) { 4586 printf("pf: BAD ICMP %d:%d ", 4587 icmptype, pd->hdr.icmp->icmp_code); 4588 pf_print_host(pd->src, 0, pd->af); 4589 printf(" -> "); 4590 pf_print_host(pd->dst, 0, pd->af); 4591 printf(" state: "); 4592 pf_print_state(*state); 4593 printf(" seq=%u\n", seq); 4594 } 4595 REASON_SET(reason, PFRES_BADSTATE); 4596 return (PF_DROP); 4597 } else { 4598 if (V_pf_status.debug >= PF_DEBUG_MISC) { 4599 printf("pf: OK ICMP %d:%d ", 4600 icmptype, pd->hdr.icmp->icmp_code); 4601 pf_print_host(pd->src, 0, pd->af); 4602 printf(" -> "); 4603 pf_print_host(pd->dst, 0, pd->af); 4604 printf(" state: "); 4605 pf_print_state(*state); 4606 printf(" seq=%u\n", seq); 4607 } 4608 } 4609 4610 /* translate source/destination address, if necessary */ 4611 if ((*state)->key[PF_SK_WIRE] != 4612 (*state)->key[PF_SK_STACK]) { 4613 struct pf_state_key *nk = 4614 (*state)->key[pd->didx]; 4615 4616 if (PF_ANEQ(pd2.src, 4617 &nk->addr[pd2.sidx], pd2.af) || 4618 nk->port[pd2.sidx] != th.th_sport) 4619 pf_change_icmp(pd2.src, &th.th_sport, 4620 daddr, &nk->addr[pd2.sidx], 4621 nk->port[pd2.sidx], NULL, 4622 pd2.ip_sum, icmpsum, 4623 pd->ip_sum, 0, pd2.af); 4624 4625 if (PF_ANEQ(pd2.dst, 4626 &nk->addr[pd2.didx], pd2.af) || 4627 nk->port[pd2.didx] != th.th_dport) 4628 pf_change_icmp(pd2.dst, &th.th_dport, 4629 NULL, /* XXX Inbound NAT? */ 4630 &nk->addr[pd2.didx], 4631 nk->port[pd2.didx], NULL, 4632 pd2.ip_sum, icmpsum, 4633 pd->ip_sum, 0, pd2.af); 4634 copyback = 1; 4635 } 4636 4637 if (copyback) { 4638 switch (pd2.af) { 4639#ifdef INET 4640 case AF_INET: 4641 m_copyback(m, off, ICMP_MINLEN, 4642 (caddr_t )pd->hdr.icmp); 4643 m_copyback(m, ipoff2, sizeof(h2), 4644 (caddr_t )&h2); 4645 break; 4646#endif /* INET */ 4647#ifdef INET6 4648 case AF_INET6: 4649 m_copyback(m, off, 4650 sizeof(struct icmp6_hdr), 4651 (caddr_t )pd->hdr.icmp6); 4652 m_copyback(m, ipoff2, sizeof(h2_6), 4653 (caddr_t )&h2_6); 4654 break; 4655#endif /* INET6 */ 4656 } 4657 m_copyback(m, off2, 8, (caddr_t)&th); 4658 } 4659 4660 return (PF_PASS); 4661 break; 4662 } 4663 case IPPROTO_UDP: { 4664 struct udphdr uh; 4665 4666 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh), 4667 NULL, reason, pd2.af)) { 4668 DPFPRINTF(PF_DEBUG_MISC, 4669 ("pf: ICMP error message too short " 4670 "(udp)\n")); 4671 return (PF_DROP); 4672 } 4673 4674 key.af = pd2.af; 4675 key.proto = IPPROTO_UDP; 4676 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4677 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4678 key.port[pd2.sidx] = uh.uh_sport; 4679 key.port[pd2.didx] = uh.uh_dport; 4680 4681 STATE_LOOKUP(kif, &key, direction, *state, pd); 4682 4683 /* translate source/destination address, if necessary */ 4684 if ((*state)->key[PF_SK_WIRE] != 4685 (*state)->key[PF_SK_STACK]) { 4686 struct pf_state_key *nk = 4687 (*state)->key[pd->didx]; 4688 4689 if (PF_ANEQ(pd2.src, 4690 &nk->addr[pd2.sidx], pd2.af) || 4691 nk->port[pd2.sidx] != uh.uh_sport) 4692 pf_change_icmp(pd2.src, &uh.uh_sport, 4693 daddr, &nk->addr[pd2.sidx], 4694 nk->port[pd2.sidx], &uh.uh_sum, 4695 pd2.ip_sum, icmpsum, 4696 pd->ip_sum, 1, pd2.af); 4697 4698 if (PF_ANEQ(pd2.dst, 4699 &nk->addr[pd2.didx], pd2.af) || 4700 nk->port[pd2.didx] != uh.uh_dport) 4701 pf_change_icmp(pd2.dst, &uh.uh_dport, 4702 NULL, /* XXX Inbound NAT? */ 4703 &nk->addr[pd2.didx], 4704 nk->port[pd2.didx], &uh.uh_sum, 4705 pd2.ip_sum, icmpsum, 4706 pd->ip_sum, 1, pd2.af); 4707 4708 switch (pd2.af) { 4709#ifdef INET 4710 case AF_INET: 4711 m_copyback(m, off, ICMP_MINLEN, 4712 (caddr_t )pd->hdr.icmp); 4713 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 4714 break; 4715#endif /* INET */ 4716#ifdef INET6 4717 case AF_INET6: 4718 m_copyback(m, off, 4719 sizeof(struct icmp6_hdr), 4720 (caddr_t )pd->hdr.icmp6); 4721 m_copyback(m, ipoff2, sizeof(h2_6), 4722 (caddr_t )&h2_6); 4723 break; 4724#endif /* INET6 */ 4725 } 4726 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh); 4727 } 4728 return (PF_PASS); 4729 break; 4730 } 4731#ifdef INET 4732 case IPPROTO_ICMP: { 4733 struct icmp iih; 4734 4735 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN, 4736 NULL, reason, pd2.af)) { 4737 DPFPRINTF(PF_DEBUG_MISC, 4738 ("pf: ICMP error message too short i" 4739 "(icmp)\n")); 4740 return (PF_DROP); 4741 } 4742 4743 key.af = pd2.af; 4744 key.proto = IPPROTO_ICMP; 4745 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4746 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4747 key.port[0] = key.port[1] = iih.icmp_id; 4748 4749 STATE_LOOKUP(kif, &key, direction, *state, pd); 4750 4751 /* translate source/destination address, if necessary */ 4752 if ((*state)->key[PF_SK_WIRE] != 4753 (*state)->key[PF_SK_STACK]) { 4754 struct pf_state_key *nk = 4755 (*state)->key[pd->didx]; 4756 4757 if (PF_ANEQ(pd2.src, 4758 &nk->addr[pd2.sidx], pd2.af) || 4759 nk->port[pd2.sidx] != iih.icmp_id) 4760 pf_change_icmp(pd2.src, &iih.icmp_id, 4761 daddr, &nk->addr[pd2.sidx], 4762 nk->port[pd2.sidx], NULL, 4763 pd2.ip_sum, icmpsum, 4764 pd->ip_sum, 0, AF_INET); 4765 4766 if (PF_ANEQ(pd2.dst, 4767 &nk->addr[pd2.didx], pd2.af) || 4768 nk->port[pd2.didx] != iih.icmp_id) 4769 pf_change_icmp(pd2.dst, &iih.icmp_id, 4770 NULL, /* XXX Inbound NAT? */ 4771 &nk->addr[pd2.didx], 4772 nk->port[pd2.didx], NULL, 4773 pd2.ip_sum, icmpsum, 4774 pd->ip_sum, 0, AF_INET); 4775 4776 m_copyback(m, off, ICMP_MINLEN, (caddr_t)pd->hdr.icmp); 4777 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 4778 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih); 4779 } 4780 return (PF_PASS); 4781 break; 4782 } 4783#endif /* INET */ 4784#ifdef INET6 4785 case IPPROTO_ICMPV6: { 4786 struct icmp6_hdr iih; 4787 4788 if (!pf_pull_hdr(m, off2, &iih, 4789 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) { 4790 DPFPRINTF(PF_DEBUG_MISC, 4791 ("pf: ICMP error message too short " 4792 "(icmp6)\n")); 4793 return (PF_DROP); 4794 } 4795 4796 key.af = pd2.af; 4797 key.proto = IPPROTO_ICMPV6; 4798 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4799 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4800 key.port[0] = key.port[1] = iih.icmp6_id; 4801 4802 STATE_LOOKUP(kif, &key, direction, *state, pd); 4803 4804 /* translate source/destination address, if necessary */ 4805 if ((*state)->key[PF_SK_WIRE] != 4806 (*state)->key[PF_SK_STACK]) { 4807 struct pf_state_key *nk = 4808 (*state)->key[pd->didx]; 4809 4810 if (PF_ANEQ(pd2.src, 4811 &nk->addr[pd2.sidx], pd2.af) || 4812 nk->port[pd2.sidx] != iih.icmp6_id) 4813 pf_change_icmp(pd2.src, &iih.icmp6_id, 4814 daddr, &nk->addr[pd2.sidx], 4815 nk->port[pd2.sidx], NULL, 4816 pd2.ip_sum, icmpsum, 4817 pd->ip_sum, 0, AF_INET6); 4818 4819 if (PF_ANEQ(pd2.dst, 4820 &nk->addr[pd2.didx], pd2.af) || 4821 nk->port[pd2.didx] != iih.icmp6_id) 4822 pf_change_icmp(pd2.dst, &iih.icmp6_id, 4823 NULL, /* XXX Inbound NAT? */ 4824 &nk->addr[pd2.didx], 4825 nk->port[pd2.didx], NULL, 4826 pd2.ip_sum, icmpsum, 4827 pd->ip_sum, 0, AF_INET6); 4828 4829 m_copyback(m, off, sizeof(struct icmp6_hdr), 4830 (caddr_t)pd->hdr.icmp6); 4831 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6); 4832 m_copyback(m, off2, sizeof(struct icmp6_hdr), 4833 (caddr_t)&iih); 4834 } 4835 return (PF_PASS); 4836 break; 4837 } 4838#endif /* INET6 */ 4839 default: { 4840 key.af = pd2.af; 4841 key.proto = pd2.proto; 4842 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af); 4843 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af); 4844 key.port[0] = key.port[1] = 0; 4845 4846 STATE_LOOKUP(kif, &key, direction, *state, pd); 4847 4848 /* translate source/destination address, if necessary */ 4849 if ((*state)->key[PF_SK_WIRE] != 4850 (*state)->key[PF_SK_STACK]) { 4851 struct pf_state_key *nk = 4852 (*state)->key[pd->didx]; 4853 4854 if (PF_ANEQ(pd2.src, 4855 &nk->addr[pd2.sidx], pd2.af)) 4856 pf_change_icmp(pd2.src, NULL, daddr, 4857 &nk->addr[pd2.sidx], 0, NULL, 4858 pd2.ip_sum, icmpsum, 4859 pd->ip_sum, 0, pd2.af); 4860 4861 if (PF_ANEQ(pd2.dst, 4862 &nk->addr[pd2.didx], pd2.af)) 4863 pf_change_icmp(pd2.src, NULL, 4864 NULL, /* XXX Inbound NAT? */ 4865 &nk->addr[pd2.didx], 0, NULL, 4866 pd2.ip_sum, icmpsum, 4867 pd->ip_sum, 0, pd2.af); 4868 4869 switch (pd2.af) { 4870#ifdef INET 4871 case AF_INET: 4872 m_copyback(m, off, ICMP_MINLEN, 4873 (caddr_t)pd->hdr.icmp); 4874 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2); 4875 break; 4876#endif /* INET */ 4877#ifdef INET6 4878 case AF_INET6: 4879 m_copyback(m, off, 4880 sizeof(struct icmp6_hdr), 4881 (caddr_t )pd->hdr.icmp6); 4882 m_copyback(m, ipoff2, sizeof(h2_6), 4883 (caddr_t )&h2_6); 4884 break; 4885#endif /* INET6 */ 4886 } 4887 } 4888 return (PF_PASS); 4889 break; 4890 } 4891 } 4892 } 4893} 4894 4895static int 4896pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif, 4897 struct mbuf *m, struct pf_pdesc *pd) 4898{ 4899 struct pf_state_peer *src, *dst; 4900 struct pf_state_key_cmp key; 4901 4902 bzero(&key, sizeof(key)); 4903 key.af = pd->af; 4904 key.proto = pd->proto; 4905 if (direction == PF_IN) { 4906 PF_ACPY(&key.addr[0], pd->src, key.af); 4907 PF_ACPY(&key.addr[1], pd->dst, key.af); 4908 key.port[0] = key.port[1] = 0; 4909 } else { 4910 PF_ACPY(&key.addr[1], pd->src, key.af); 4911 PF_ACPY(&key.addr[0], pd->dst, key.af); 4912 key.port[1] = key.port[0] = 0; 4913 } 4914 4915 STATE_LOOKUP(kif, &key, direction, *state, pd); 4916 4917 if (direction == (*state)->direction) { 4918 src = &(*state)->src; 4919 dst = &(*state)->dst; 4920 } else { 4921 src = &(*state)->dst; 4922 dst = &(*state)->src; 4923 } 4924 4925 /* update states */ 4926 if (src->state < PFOTHERS_SINGLE) 4927 src->state = PFOTHERS_SINGLE; 4928 if (dst->state == PFOTHERS_SINGLE) 4929 dst->state = PFOTHERS_MULTIPLE; 4930 4931 /* update expire time */ 4932 (*state)->expire = time_uptime; 4933 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE) 4934 (*state)->timeout = PFTM_OTHER_MULTIPLE; 4935 else 4936 (*state)->timeout = PFTM_OTHER_SINGLE; 4937 4938 /* translate source/destination address, if necessary */ 4939 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) { 4940 struct pf_state_key *nk = (*state)->key[pd->didx]; 4941 4942 KASSERT(nk, ("%s: nk is null", __func__)); 4943 KASSERT(pd, ("%s: pd is null", __func__)); 4944 KASSERT(pd->src, ("%s: pd->src is null", __func__)); 4945 KASSERT(pd->dst, ("%s: pd->dst is null", __func__)); 4946 switch (pd->af) { 4947#ifdef INET 4948 case AF_INET: 4949 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 4950 pf_change_a(&pd->src->v4.s_addr, 4951 pd->ip_sum, 4952 nk->addr[pd->sidx].v4.s_addr, 4953 0); 4954 4955 4956 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 4957 pf_change_a(&pd->dst->v4.s_addr, 4958 pd->ip_sum, 4959 nk->addr[pd->didx].v4.s_addr, 4960 0); 4961 4962 break; 4963#endif /* INET */ 4964#ifdef INET6 4965 case AF_INET6: 4966 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET)) 4967 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af); 4968 4969 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET)) 4970 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af); 4971#endif /* INET6 */ 4972 } 4973 } 4974 return (PF_PASS); 4975} 4976 4977/* 4978 * ipoff and off are measured from the start of the mbuf chain. 4979 * h must be at "ipoff" on the mbuf chain. 4980 */ 4981void * 4982pf_pull_hdr(struct mbuf *m, int off, void *p, int len, 4983 u_short *actionp, u_short *reasonp, sa_family_t af) 4984{ 4985 switch (af) { 4986#ifdef INET 4987 case AF_INET: { 4988 struct ip *h = mtod(m, struct ip *); 4989 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; 4990 4991 if (fragoff) { 4992 if (fragoff >= len) 4993 ACTION_SET(actionp, PF_PASS); 4994 else { 4995 ACTION_SET(actionp, PF_DROP); 4996 REASON_SET(reasonp, PFRES_FRAG); 4997 } 4998 return (NULL); 4999 } 5000 if (m->m_pkthdr.len < off + len || 5001 ntohs(h->ip_len) < off + len) { 5002 ACTION_SET(actionp, PF_DROP); 5003 REASON_SET(reasonp, PFRES_SHORT); 5004 return (NULL); 5005 } 5006 break; 5007 } 5008#endif /* INET */ 5009#ifdef INET6 5010 case AF_INET6: { 5011 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 5012 5013 if (m->m_pkthdr.len < off + len || 5014 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) < 5015 (unsigned)(off + len)) { 5016 ACTION_SET(actionp, PF_DROP); 5017 REASON_SET(reasonp, PFRES_SHORT); 5018 return (NULL); 5019 } 5020 break; 5021 } 5022#endif /* INET6 */ 5023 } 5024 m_copydata(m, off, len, p); 5025 return (p); 5026} 5027 5028int 5029pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif, 5030 int rtableid) 5031{ 5032#ifdef RADIX_MPATH 5033 struct radix_node_head *rnh; 5034#endif 5035 struct sockaddr_in *dst; 5036 int ret = 1; 5037 int check_mpath; 5038#ifdef INET6 5039 struct sockaddr_in6 *dst6; 5040 struct route_in6 ro; 5041#else 5042 struct route ro; 5043#endif 5044 struct radix_node *rn; 5045 struct rtentry *rt; 5046 struct ifnet *ifp; 5047 5048 check_mpath = 0; 5049#ifdef RADIX_MPATH 5050 /* XXX: stick to table 0 for now */ 5051 rnh = rt_tables_get_rnh(0, af); 5052 if (rnh != NULL && rn_mpath_capable(rnh)) 5053 check_mpath = 1; 5054#endif 5055 bzero(&ro, sizeof(ro)); 5056 switch (af) { 5057 case AF_INET: 5058 dst = satosin(&ro.ro_dst); 5059 dst->sin_family = AF_INET; 5060 dst->sin_len = sizeof(*dst); 5061 dst->sin_addr = addr->v4; 5062 break; 5063#ifdef INET6 5064 case AF_INET6: 5065 /* 5066 * Skip check for addresses with embedded interface scope, 5067 * as they would always match anyway. 5068 */ 5069 if (IN6_IS_SCOPE_EMBED(&addr->v6)) 5070 goto out; 5071 dst6 = (struct sockaddr_in6 *)&ro.ro_dst; 5072 dst6->sin6_family = AF_INET6; 5073 dst6->sin6_len = sizeof(*dst6); 5074 dst6->sin6_addr = addr->v6; 5075 break; 5076#endif /* INET6 */ 5077 default: 5078 return (0); 5079 } 5080 5081 /* Skip checks for ipsec interfaces */ 5082 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC) 5083 goto out; 5084 5085 switch (af) { 5086#ifdef INET6 5087 case AF_INET6: 5088 in6_rtalloc_ign(&ro, 0, rtableid); 5089 break; 5090#endif 5091#ifdef INET 5092 case AF_INET: 5093 in_rtalloc_ign((struct route *)&ro, 0, rtableid); 5094 break; 5095#endif 5096 default: 5097 rtalloc_ign((struct route *)&ro, 0); /* No/default FIB. */ 5098 break; 5099 } 5100 5101 if (ro.ro_rt != NULL) { 5102 /* No interface given, this is a no-route check */ 5103 if (kif == NULL) 5104 goto out; 5105 5106 if (kif->pfik_ifp == NULL) { 5107 ret = 0; 5108 goto out; 5109 } 5110 5111 /* Perform uRPF check if passed input interface */ 5112 ret = 0; 5113 rn = (struct radix_node *)ro.ro_rt; 5114 do { 5115 rt = (struct rtentry *)rn; 5116 ifp = rt->rt_ifp; 5117 5118 if (kif->pfik_ifp == ifp) 5119 ret = 1; 5120#ifdef RADIX_MPATH 5121 rn = rn_mpath_next(rn); 5122#endif 5123 } while (check_mpath == 1 && rn != NULL && ret == 0); 5124 } else 5125 ret = 0; 5126out: 5127 if (ro.ro_rt != NULL) 5128 RTFREE(ro.ro_rt); 5129 return (ret); 5130} 5131 5132#ifdef INET 5133static void 5134pf_route(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 5135 struct pf_state *s, struct pf_pdesc *pd) 5136{ 5137 struct mbuf *m0, *m1; 5138 struct sockaddr_in dst; 5139 struct ip *ip; 5140 struct ifnet *ifp = NULL; 5141 struct pf_addr naddr; 5142 struct pf_src_node *sn = NULL; 5143 int error = 0; 5144 uint16_t ip_len, ip_off; 5145 5146 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__)); 5147 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction", 5148 __func__)); 5149 5150 if ((pd->pf_mtag == NULL && 5151 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) || 5152 pd->pf_mtag->routed++ > 3) { 5153 m0 = *m; 5154 *m = NULL; 5155 goto bad_locked; 5156 } 5157 5158 if (r->rt == PF_DUPTO) { 5159 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) { 5160 if (s) 5161 PF_STATE_UNLOCK(s); 5162 return; 5163 } 5164 } else { 5165 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) { 5166 if (s) 5167 PF_STATE_UNLOCK(s); 5168 return; 5169 } 5170 m0 = *m; 5171 } 5172 5173 ip = mtod(m0, struct ip *); 5174 5175 bzero(&dst, sizeof(dst)); 5176 dst.sin_family = AF_INET; 5177 dst.sin_len = sizeof(dst); 5178 dst.sin_addr = ip->ip_dst; 5179 5180 if (r->rt == PF_FASTROUTE) { 5181 struct rtentry *rt; 5182 5183 if (s) 5184 PF_STATE_UNLOCK(s); 5185 rt = rtalloc1_fib(sintosa(&dst), 0, 0, M_GETFIB(m0)); 5186 if (rt == NULL) { 5187 RTFREE_LOCKED(rt); 5188 KMOD_IPSTAT_INC(ips_noroute); 5189 error = EHOSTUNREACH; 5190 goto bad; 5191 } 5192 5193 ifp = rt->rt_ifp; 5194 rt->rt_rmx.rmx_pksent++; 5195 5196 if (rt->rt_flags & RTF_GATEWAY) 5197 bcopy(satosin(rt->rt_gateway), &dst, sizeof(dst)); 5198 RTFREE_LOCKED(rt); 5199 } else { 5200 if (TAILQ_EMPTY(&r->rpool.list)) { 5201 DPFPRINTF(PF_DEBUG_URGENT, 5202 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__)); 5203 goto bad_locked; 5204 } 5205 if (s == NULL) { 5206 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src, 5207 &naddr, NULL, &sn); 5208 if (!PF_AZERO(&naddr, AF_INET)) 5209 dst.sin_addr.s_addr = naddr.v4.s_addr; 5210 ifp = r->rpool.cur->kif ? 5211 r->rpool.cur->kif->pfik_ifp : NULL; 5212 } else { 5213 if (!PF_AZERO(&s->rt_addr, AF_INET)) 5214 dst.sin_addr.s_addr = 5215 s->rt_addr.v4.s_addr; 5216 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 5217 PF_STATE_UNLOCK(s); 5218 } 5219 } 5220 if (ifp == NULL) 5221 goto bad; 5222 5223 if (oifp != ifp) { 5224 if (pf_test(PF_OUT, ifp, &m0, NULL) != PF_PASS) 5225 goto bad; 5226 else if (m0 == NULL) 5227 goto done; 5228 if (m0->m_len < sizeof(struct ip)) { 5229 DPFPRINTF(PF_DEBUG_URGENT, 5230 ("%s: m0->m_len < sizeof(struct ip)\n", __func__)); 5231 goto bad; 5232 } 5233 ip = mtod(m0, struct ip *); 5234 } 5235 5236 if (ifp->if_flags & IFF_LOOPBACK) 5237 m0->m_flags |= M_SKIP_FIREWALL; 5238 5239 ip_len = ntohs(ip->ip_len); 5240 ip_off = ntohs(ip->ip_off); 5241 5242 /* Copied from FreeBSD 10.0-CURRENT ip_output. */ 5243 m0->m_pkthdr.csum_flags |= CSUM_IP; 5244 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) { 5245 in_delayed_cksum(m0); 5246 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA; 5247 } 5248#ifdef SCTP 5249 if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) { 5250 sctp_delayed_cksum(m, (uint32_t)(ip->ip_hl << 2)); 5251 m0->m_pkthdr.csum_flags &= ~CSUM_SCTP; 5252 } 5253#endif 5254 5255 /* 5256 * If small enough for interface, or the interface will take 5257 * care of the fragmentation for us, we can just send directly. 5258 */ 5259 if (ip_len <= ifp->if_mtu || 5260 (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0 || 5261 ((ip_off & IP_DF) == 0 && (ifp->if_hwassist & CSUM_FRAGMENT))) { 5262 ip->ip_sum = 0; 5263 if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) { 5264 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2); 5265 m0->m_pkthdr.csum_flags &= ~CSUM_IP; 5266 } 5267 m0->m_flags &= ~(M_PROTOFLAGS); 5268 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL); 5269 goto done; 5270 } 5271 5272 /* Balk when DF bit is set or the interface didn't support TSO. */ 5273 if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) { 5274 error = EMSGSIZE; 5275 KMOD_IPSTAT_INC(ips_cantfrag); 5276 if (r->rt != PF_DUPTO) { 5277 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, 5278 ifp->if_mtu); 5279 goto done; 5280 } else 5281 goto bad; 5282 } 5283 5284 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist); 5285 if (error) 5286 goto bad; 5287 5288 for (; m0; m0 = m1) { 5289 m1 = m0->m_nextpkt; 5290 m0->m_nextpkt = NULL; 5291 if (error == 0) { 5292 m0->m_flags &= ~(M_PROTOFLAGS); 5293 error = (*ifp->if_output)(ifp, m0, sintosa(&dst), NULL); 5294 } else 5295 m_freem(m0); 5296 } 5297 5298 if (error == 0) 5299 KMOD_IPSTAT_INC(ips_fragmented); 5300 5301done: 5302 if (r->rt != PF_DUPTO) 5303 *m = NULL; 5304 return; 5305 5306bad_locked: 5307 if (s) 5308 PF_STATE_UNLOCK(s); 5309bad: 5310 m_freem(m0); 5311 goto done; 5312} 5313#endif /* INET */ 5314 5315#ifdef INET6 5316static void 5317pf_route6(struct mbuf **m, struct pf_rule *r, int dir, struct ifnet *oifp, 5318 struct pf_state *s, struct pf_pdesc *pd) 5319{ 5320 struct mbuf *m0; 5321 struct sockaddr_in6 dst; 5322 struct ip6_hdr *ip6; 5323 struct ifnet *ifp = NULL; 5324 struct pf_addr naddr; 5325 struct pf_src_node *sn = NULL; 5326 5327 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__)); 5328 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction", 5329 __func__)); 5330 5331 if ((pd->pf_mtag == NULL && 5332 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) || 5333 pd->pf_mtag->routed++ > 3) { 5334 m0 = *m; 5335 *m = NULL; 5336 goto bad_locked; 5337 } 5338 5339 if (r->rt == PF_DUPTO) { 5340 if ((m0 = m_dup(*m, M_NOWAIT)) == NULL) { 5341 if (s) 5342 PF_STATE_UNLOCK(s); 5343 return; 5344 } 5345 } else { 5346 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) { 5347 if (s) 5348 PF_STATE_UNLOCK(s); 5349 return; 5350 } 5351 m0 = *m; 5352 } 5353 5354 ip6 = mtod(m0, struct ip6_hdr *); 5355 5356 bzero(&dst, sizeof(dst)); 5357 dst.sin6_family = AF_INET6; 5358 dst.sin6_len = sizeof(dst); 5359 dst.sin6_addr = ip6->ip6_dst; 5360 5361 /* Cheat. XXX why only in the v6 case??? */ 5362 if (r->rt == PF_FASTROUTE) { 5363 if (s) 5364 PF_STATE_UNLOCK(s); 5365 m0->m_flags |= M_SKIP_FIREWALL; 5366 ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL); 5367 return; 5368 } 5369 5370 if (TAILQ_EMPTY(&r->rpool.list)) { 5371 DPFPRINTF(PF_DEBUG_URGENT, 5372 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__)); 5373 goto bad_locked; 5374 } 5375 if (s == NULL) { 5376 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src, 5377 &naddr, NULL, &sn); 5378 if (!PF_AZERO(&naddr, AF_INET6)) 5379 PF_ACPY((struct pf_addr *)&dst.sin6_addr, 5380 &naddr, AF_INET6); 5381 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; 5382 } else { 5383 if (!PF_AZERO(&s->rt_addr, AF_INET6)) 5384 PF_ACPY((struct pf_addr *)&dst.sin6_addr, 5385 &s->rt_addr, AF_INET6); 5386 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; 5387 } 5388 5389 if (s) 5390 PF_STATE_UNLOCK(s); 5391 5392 if (ifp == NULL) 5393 goto bad; 5394 5395 if (oifp != ifp) { 5396 if (pf_test6(PF_OUT, ifp, &m0, NULL) != PF_PASS) 5397 goto bad; 5398 else if (m0 == NULL) 5399 goto done; 5400 if (m0->m_len < sizeof(struct ip6_hdr)) { 5401 DPFPRINTF(PF_DEBUG_URGENT, 5402 ("%s: m0->m_len < sizeof(struct ip6_hdr)\n", 5403 __func__)); 5404 goto bad; 5405 } 5406 ip6 = mtod(m0, struct ip6_hdr *); 5407 } 5408 5409 if (ifp->if_flags & IFF_LOOPBACK) 5410 m0->m_flags |= M_SKIP_FIREWALL; 5411 5412 /* 5413 * If the packet is too large for the outgoing interface, 5414 * send back an icmp6 error. 5415 */ 5416 if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr)) 5417 dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index); 5418 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) 5419 nd6_output(ifp, ifp, m0, &dst, NULL); 5420 else { 5421 in6_ifstat_inc(ifp, ifs6_in_toobig); 5422 if (r->rt != PF_DUPTO) 5423 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu); 5424 else 5425 goto bad; 5426 } 5427 5428done: 5429 if (r->rt != PF_DUPTO) 5430 *m = NULL; 5431 return; 5432 5433bad_locked: 5434 if (s) 5435 PF_STATE_UNLOCK(s); 5436bad: 5437 m_freem(m0); 5438 goto done; 5439} 5440#endif /* INET6 */ 5441 5442/* 5443 * FreeBSD supports cksum offloads for the following drivers. 5444 * em(4), fxp(4), ixgb(4), lge(4), ndis(4), nge(4), re(4), 5445 * ti(4), txp(4), xl(4) 5446 * 5447 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR : 5448 * network driver performed cksum including pseudo header, need to verify 5449 * csum_data 5450 * CSUM_DATA_VALID : 5451 * network driver performed cksum, needs to additional pseudo header 5452 * cksum computation with partial csum_data(i.e. lack of H/W support for 5453 * pseudo header, for instance hme(4), sk(4) and possibly gem(4)) 5454 * 5455 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and 5456 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper 5457 * TCP/UDP layer. 5458 * Also, set csum_data to 0xffff to force cksum validation. 5459 */ 5460static int 5461pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af) 5462{ 5463 u_int16_t sum = 0; 5464 int hw_assist = 0; 5465 struct ip *ip; 5466 5467 if (off < sizeof(struct ip) || len < sizeof(struct udphdr)) 5468 return (1); 5469 if (m->m_pkthdr.len < off + len) 5470 return (1); 5471 5472 switch (p) { 5473 case IPPROTO_TCP: 5474 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 5475 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) { 5476 sum = m->m_pkthdr.csum_data; 5477 } else { 5478 ip = mtod(m, struct ip *); 5479 sum = in_pseudo(ip->ip_src.s_addr, 5480 ip->ip_dst.s_addr, htonl((u_short)len + 5481 m->m_pkthdr.csum_data + IPPROTO_TCP)); 5482 } 5483 sum ^= 0xffff; 5484 ++hw_assist; 5485 } 5486 break; 5487 case IPPROTO_UDP: 5488 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 5489 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) { 5490 sum = m->m_pkthdr.csum_data; 5491 } else { 5492 ip = mtod(m, struct ip *); 5493 sum = in_pseudo(ip->ip_src.s_addr, 5494 ip->ip_dst.s_addr, htonl((u_short)len + 5495 m->m_pkthdr.csum_data + IPPROTO_UDP)); 5496 } 5497 sum ^= 0xffff; 5498 ++hw_assist; 5499 } 5500 break; 5501 case IPPROTO_ICMP: 5502#ifdef INET6 5503 case IPPROTO_ICMPV6: 5504#endif /* INET6 */ 5505 break; 5506 default: 5507 return (1); 5508 } 5509 5510 if (!hw_assist) { 5511 switch (af) { 5512 case AF_INET: 5513 if (p == IPPROTO_ICMP) { 5514 if (m->m_len < off) 5515 return (1); 5516 m->m_data += off; 5517 m->m_len -= off; 5518 sum = in_cksum(m, len); 5519 m->m_data -= off; 5520 m->m_len += off; 5521 } else { 5522 if (m->m_len < sizeof(struct ip)) 5523 return (1); 5524 sum = in4_cksum(m, p, off, len); 5525 } 5526 break; 5527#ifdef INET6 5528 case AF_INET6: 5529 if (m->m_len < sizeof(struct ip6_hdr)) 5530 return (1); 5531 sum = in6_cksum(m, p, off, len); 5532 break; 5533#endif /* INET6 */ 5534 default: 5535 return (1); 5536 } 5537 } 5538 if (sum) { 5539 switch (p) { 5540 case IPPROTO_TCP: 5541 { 5542 KMOD_TCPSTAT_INC(tcps_rcvbadsum); 5543 break; 5544 } 5545 case IPPROTO_UDP: 5546 { 5547 KMOD_UDPSTAT_INC(udps_badsum); 5548 break; 5549 } 5550#ifdef INET 5551 case IPPROTO_ICMP: 5552 { 5553 KMOD_ICMPSTAT_INC(icps_checksum); 5554 break; 5555 } 5556#endif 5557#ifdef INET6 5558 case IPPROTO_ICMPV6: 5559 { 5560 KMOD_ICMP6STAT_INC(icp6s_checksum); 5561 break; 5562 } 5563#endif /* INET6 */ 5564 } 5565 return (1); 5566 } else { 5567 if (p == IPPROTO_TCP || p == IPPROTO_UDP) { 5568 m->m_pkthdr.csum_flags |= 5569 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 5570 m->m_pkthdr.csum_data = 0xffff; 5571 } 5572 } 5573 return (0); 5574} 5575 5576 5577#ifdef INET 5578int 5579pf_test(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp) 5580{ 5581 struct pfi_kif *kif; 5582 u_short action, reason = 0, log = 0; 5583 struct mbuf *m = *m0; 5584 struct ip *h = NULL; 5585 struct m_tag *ipfwtag; 5586 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr; 5587 struct pf_state *s = NULL; 5588 struct pf_ruleset *ruleset = NULL; 5589 struct pf_pdesc pd; 5590 int off, dirndx, pqid = 0; 5591 5592 M_ASSERTPKTHDR(m); 5593 5594 if (!V_pf_status.running) 5595 return (PF_PASS); 5596 5597 memset(&pd, 0, sizeof(pd)); 5598 5599 kif = (struct pfi_kif *)ifp->if_pf_kif; 5600 5601 if (kif == NULL) { 5602 DPFPRINTF(PF_DEBUG_URGENT, 5603 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname)); 5604 return (PF_DROP); 5605 } 5606 if (kif->pfik_flags & PFI_IFLAG_SKIP) 5607 return (PF_PASS); 5608 5609 if (m->m_flags & M_SKIP_FIREWALL) 5610 return (PF_PASS); 5611 5612 pd.pf_mtag = pf_find_mtag(m); 5613 5614 PF_RULES_RLOCK(); 5615 5616 if (ip_divert_ptr != NULL && 5617 ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) { 5618 struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1); 5619 if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) { 5620 if (pd.pf_mtag == NULL && 5621 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) { 5622 action = PF_DROP; 5623 goto done; 5624 } 5625 pd.pf_mtag->flags |= PF_PACKET_LOOPED; 5626 m_tag_delete(m, ipfwtag); 5627 } 5628 if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) { 5629 m->m_flags |= M_FASTFWD_OURS; 5630 pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT; 5631 } 5632 } else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) { 5633 /* We do IP header normalization and packet reassembly here */ 5634 action = PF_DROP; 5635 goto done; 5636 } 5637 m = *m0; /* pf_normalize messes with m0 */ 5638 h = mtod(m, struct ip *); 5639 5640 off = h->ip_hl << 2; 5641 if (off < (int)sizeof(struct ip)) { 5642 action = PF_DROP; 5643 REASON_SET(&reason, PFRES_SHORT); 5644 log = 1; 5645 goto done; 5646 } 5647 5648 pd.src = (struct pf_addr *)&h->ip_src; 5649 pd.dst = (struct pf_addr *)&h->ip_dst; 5650 pd.sport = pd.dport = NULL; 5651 pd.ip_sum = &h->ip_sum; 5652 pd.proto_sum = NULL; 5653 pd.proto = h->ip_p; 5654 pd.dir = dir; 5655 pd.sidx = (dir == PF_IN) ? 0 : 1; 5656 pd.didx = (dir == PF_IN) ? 1 : 0; 5657 pd.af = AF_INET; 5658 pd.tos = h->ip_tos; 5659 pd.tot_len = ntohs(h->ip_len); 5660 5661 /* handle fragments that didn't get reassembled by normalization */ 5662 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) { 5663 action = pf_test_fragment(&r, dir, kif, m, h, 5664 &pd, &a, &ruleset); 5665 goto done; 5666 } 5667 5668 switch (h->ip_p) { 5669 5670 case IPPROTO_TCP: { 5671 struct tcphdr th; 5672 5673 pd.hdr.tcp = &th; 5674 if (!pf_pull_hdr(m, off, &th, sizeof(th), 5675 &action, &reason, AF_INET)) { 5676 log = action != PF_PASS; 5677 goto done; 5678 } 5679 pd.p_len = pd.tot_len - off - (th.th_off << 2); 5680 if ((th.th_flags & TH_ACK) && pd.p_len == 0) 5681 pqid = 1; 5682 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 5683 if (action == PF_DROP) 5684 goto done; 5685 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 5686 &reason); 5687 if (action == PF_PASS) { 5688 if (pfsync_update_state_ptr != NULL) 5689 pfsync_update_state_ptr(s); 5690 r = s->rule.ptr; 5691 a = s->anchor.ptr; 5692 log = s->log; 5693 } else if (s == NULL) 5694 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, 5695 &a, &ruleset, inp); 5696 break; 5697 } 5698 5699 case IPPROTO_UDP: { 5700 struct udphdr uh; 5701 5702 pd.hdr.udp = &uh; 5703 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 5704 &action, &reason, AF_INET)) { 5705 log = action != PF_PASS; 5706 goto done; 5707 } 5708 if (uh.uh_dport == 0 || 5709 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 5710 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 5711 action = PF_DROP; 5712 REASON_SET(&reason, PFRES_SHORT); 5713 goto done; 5714 } 5715 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 5716 if (action == PF_PASS) { 5717 if (pfsync_update_state_ptr != NULL) 5718 pfsync_update_state_ptr(s); 5719 r = s->rule.ptr; 5720 a = s->anchor.ptr; 5721 log = s->log; 5722 } else if (s == NULL) 5723 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, 5724 &a, &ruleset, inp); 5725 break; 5726 } 5727 5728 case IPPROTO_ICMP: { 5729 struct icmp ih; 5730 5731 pd.hdr.icmp = &ih; 5732 if (!pf_pull_hdr(m, off, &ih, ICMP_MINLEN, 5733 &action, &reason, AF_INET)) { 5734 log = action != PF_PASS; 5735 goto done; 5736 } 5737 action = pf_test_state_icmp(&s, dir, kif, m, off, h, &pd, 5738 &reason); 5739 if (action == PF_PASS) { 5740 if (pfsync_update_state_ptr != NULL) 5741 pfsync_update_state_ptr(s); 5742 r = s->rule.ptr; 5743 a = s->anchor.ptr; 5744 log = s->log; 5745 } else if (s == NULL) 5746 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, 5747 &a, &ruleset, inp); 5748 break; 5749 } 5750 5751#ifdef INET6 5752 case IPPROTO_ICMPV6: { 5753 action = PF_DROP; 5754 DPFPRINTF(PF_DEBUG_MISC, 5755 ("pf: dropping IPv4 packet with ICMPv6 payload\n")); 5756 goto done; 5757 } 5758#endif 5759 5760 default: 5761 action = pf_test_state_other(&s, dir, kif, m, &pd); 5762 if (action == PF_PASS) { 5763 if (pfsync_update_state_ptr != NULL) 5764 pfsync_update_state_ptr(s); 5765 r = s->rule.ptr; 5766 a = s->anchor.ptr; 5767 log = s->log; 5768 } else if (s == NULL) 5769 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, 5770 &a, &ruleset, inp); 5771 break; 5772 } 5773 5774done: 5775 PF_RULES_RUNLOCK(); 5776 if (action == PF_PASS && h->ip_hl > 5 && 5777 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 5778 action = PF_DROP; 5779 REASON_SET(&reason, PFRES_IPOPTIONS); 5780 log = 1; 5781 DPFPRINTF(PF_DEBUG_MISC, 5782 ("pf: dropping packet with ip options\n")); 5783 } 5784 5785 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) { 5786 action = PF_DROP; 5787 REASON_SET(&reason, PFRES_MEMORY); 5788 } 5789 if (r->rtableid >= 0) 5790 M_SETFIB(m, r->rtableid); 5791 5792#ifdef ALTQ 5793 if (action == PF_PASS && r->qid) { 5794 if (pd.pf_mtag == NULL && 5795 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) { 5796 action = PF_DROP; 5797 REASON_SET(&reason, PFRES_MEMORY); 5798 } 5799 if (pqid || (pd.tos & IPTOS_LOWDELAY)) 5800 pd.pf_mtag->qid = r->pqid; 5801 else 5802 pd.pf_mtag->qid = r->qid; 5803 /* add hints for ecn */ 5804 pd.pf_mtag->hdr = h; 5805 5806 } 5807#endif /* ALTQ */ 5808 5809 /* 5810 * connections redirected to loopback should not match sockets 5811 * bound specifically to loopback due to security implications, 5812 * see tcp_input() and in_pcblookup_listen(). 5813 */ 5814 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 5815 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 5816 (s->nat_rule.ptr->action == PF_RDR || 5817 s->nat_rule.ptr->action == PF_BINAT) && 5818 (ntohl(pd.dst->v4.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) 5819 m->m_flags |= M_SKIP_FIREWALL; 5820 5821 if (action == PF_PASS && r->divert.port && ip_divert_ptr != NULL && 5822 !PACKET_LOOPED(&pd)) { 5823 5824 ipfwtag = m_tag_alloc(MTAG_IPFW_RULE, 0, 5825 sizeof(struct ipfw_rule_ref), M_NOWAIT | M_ZERO); 5826 if (ipfwtag != NULL) { 5827 ((struct ipfw_rule_ref *)(ipfwtag+1))->info = 5828 ntohs(r->divert.port); 5829 ((struct ipfw_rule_ref *)(ipfwtag+1))->rulenum = dir; 5830 5831 if (s) 5832 PF_STATE_UNLOCK(s); 5833 5834 m_tag_prepend(m, ipfwtag); 5835 if (m->m_flags & M_FASTFWD_OURS) { 5836 if (pd.pf_mtag == NULL && 5837 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) { 5838 action = PF_DROP; 5839 REASON_SET(&reason, PFRES_MEMORY); 5840 log = 1; 5841 DPFPRINTF(PF_DEBUG_MISC, 5842 ("pf: failed to allocate tag\n")); 5843 } 5844 pd.pf_mtag->flags |= PF_FASTFWD_OURS_PRESENT; 5845 m->m_flags &= ~M_FASTFWD_OURS; 5846 } 5847 ip_divert_ptr(*m0, dir == PF_IN ? DIR_IN : DIR_OUT); 5848 *m0 = NULL; 5849 5850 return (action); 5851 } else { 5852 /* XXX: ipfw has the same behaviour! */ 5853 action = PF_DROP; 5854 REASON_SET(&reason, PFRES_MEMORY); 5855 log = 1; 5856 DPFPRINTF(PF_DEBUG_MISC, 5857 ("pf: failed to allocate divert tag\n")); 5858 } 5859 } 5860 5861 if (log) { 5862 struct pf_rule *lr; 5863 5864 if (s != NULL && s->nat_rule.ptr != NULL && 5865 s->nat_rule.ptr->log & PF_LOG_ALL) 5866 lr = s->nat_rule.ptr; 5867 else 5868 lr = r; 5869 PFLOG_PACKET(kif, m, AF_INET, dir, reason, lr, a, ruleset, &pd, 5870 (s == NULL)); 5871 } 5872 5873 kif->pfik_bytes[0][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 5874 kif->pfik_packets[0][dir == PF_OUT][action != PF_PASS]++; 5875 5876 if (action == PF_PASS || r->action == PF_DROP) { 5877 dirndx = (dir == PF_OUT); 5878 r->packets[dirndx]++; 5879 r->bytes[dirndx] += pd.tot_len; 5880 if (a != NULL) { 5881 a->packets[dirndx]++; 5882 a->bytes[dirndx] += pd.tot_len; 5883 } 5884 if (s != NULL) { 5885 if (s->nat_rule.ptr != NULL) { 5886 s->nat_rule.ptr->packets[dirndx]++; 5887 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 5888 } 5889 if (s->src_node != NULL) { 5890 s->src_node->packets[dirndx]++; 5891 s->src_node->bytes[dirndx] += pd.tot_len; 5892 } 5893 if (s->nat_src_node != NULL) { 5894 s->nat_src_node->packets[dirndx]++; 5895 s->nat_src_node->bytes[dirndx] += pd.tot_len; 5896 } 5897 dirndx = (dir == s->direction) ? 0 : 1; 5898 s->packets[dirndx]++; 5899 s->bytes[dirndx] += pd.tot_len; 5900 } 5901 tr = r; 5902 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 5903 if (nr != NULL && r == &V_pf_default_rule) 5904 tr = nr; 5905 if (tr->src.addr.type == PF_ADDR_TABLE) 5906 pfr_update_stats(tr->src.addr.p.tbl, 5907 (s == NULL) ? pd.src : 5908 &s->key[(s->direction == PF_IN)]-> 5909 addr[(s->direction == PF_OUT)], 5910 pd.af, pd.tot_len, dir == PF_OUT, 5911 r->action == PF_PASS, tr->src.neg); 5912 if (tr->dst.addr.type == PF_ADDR_TABLE) 5913 pfr_update_stats(tr->dst.addr.p.tbl, 5914 (s == NULL) ? pd.dst : 5915 &s->key[(s->direction == PF_IN)]-> 5916 addr[(s->direction == PF_IN)], 5917 pd.af, pd.tot_len, dir == PF_OUT, 5918 r->action == PF_PASS, tr->dst.neg); 5919 } 5920 5921 switch (action) { 5922 case PF_SYNPROXY_DROP: 5923 m_freem(*m0); 5924 case PF_DEFER: 5925 *m0 = NULL; 5926 action = PF_PASS; 5927 break; 5928 default: 5929 /* pf_route() returns unlocked. */ 5930 if (r->rt) { 5931 pf_route(m0, r, dir, kif->pfik_ifp, s, &pd); 5932 return (action); 5933 } 5934 break; 5935 } 5936 if (s) 5937 PF_STATE_UNLOCK(s); 5938 5939 return (action); 5940} 5941#endif /* INET */ 5942 5943#ifdef INET6 5944int 5945pf_test6(int dir, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp) 5946{ 5947 struct pfi_kif *kif; 5948 u_short action, reason = 0, log = 0; 5949 struct mbuf *m = *m0, *n = NULL; 5950 struct ip6_hdr *h = NULL; 5951 struct pf_rule *a = NULL, *r = &V_pf_default_rule, *tr, *nr; 5952 struct pf_state *s = NULL; 5953 struct pf_ruleset *ruleset = NULL; 5954 struct pf_pdesc pd; 5955 int off, terminal = 0, dirndx, rh_cnt = 0; 5956 5957 M_ASSERTPKTHDR(m); 5958 5959 if (!V_pf_status.running) 5960 return (PF_PASS); 5961 5962 memset(&pd, 0, sizeof(pd)); 5963 pd.pf_mtag = pf_find_mtag(m); 5964 5965 if (pd.pf_mtag && pd.pf_mtag->flags & PF_TAG_GENERATED) 5966 return (PF_PASS); 5967 5968 kif = (struct pfi_kif *)ifp->if_pf_kif; 5969 if (kif == NULL) { 5970 DPFPRINTF(PF_DEBUG_URGENT, 5971 ("pf_test6: kif == NULL, if_xname %s\n", ifp->if_xname)); 5972 return (PF_DROP); 5973 } 5974 if (kif->pfik_flags & PFI_IFLAG_SKIP) 5975 return (PF_PASS); 5976 5977 PF_RULES_RLOCK(); 5978 5979 /* We do IP header normalization and packet reassembly here */ 5980 if (pf_normalize_ip6(m0, dir, kif, &reason, &pd) != PF_PASS) { 5981 action = PF_DROP; 5982 goto done; 5983 } 5984 m = *m0; /* pf_normalize messes with m0 */ 5985 h = mtod(m, struct ip6_hdr *); 5986 5987#if 1 5988 /* 5989 * we do not support jumbogram yet. if we keep going, zero ip6_plen 5990 * will do something bad, so drop the packet for now. 5991 */ 5992 if (htons(h->ip6_plen) == 0) { 5993 action = PF_DROP; 5994 REASON_SET(&reason, PFRES_NORM); /*XXX*/ 5995 goto done; 5996 } 5997#endif 5998 5999 pd.src = (struct pf_addr *)&h->ip6_src; 6000 pd.dst = (struct pf_addr *)&h->ip6_dst; 6001 pd.sport = pd.dport = NULL; 6002 pd.ip_sum = NULL; 6003 pd.proto_sum = NULL; 6004 pd.dir = dir; 6005 pd.sidx = (dir == PF_IN) ? 0 : 1; 6006 pd.didx = (dir == PF_IN) ? 1 : 0; 6007 pd.af = AF_INET6; 6008 pd.tos = 0; 6009 pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr); 6010 6011 off = ((caddr_t)h - m->m_data) + sizeof(struct ip6_hdr); 6012 pd.proto = h->ip6_nxt; 6013 do { 6014 switch (pd.proto) { 6015 case IPPROTO_FRAGMENT: 6016 action = pf_test_fragment(&r, dir, kif, m, h, 6017 &pd, &a, &ruleset); 6018 if (action == PF_DROP) 6019 REASON_SET(&reason, PFRES_FRAG); 6020 goto done; 6021 case IPPROTO_ROUTING: { 6022 struct ip6_rthdr rthdr; 6023 6024 if (rh_cnt++) { 6025 DPFPRINTF(PF_DEBUG_MISC, 6026 ("pf: IPv6 more than one rthdr\n")); 6027 action = PF_DROP; 6028 REASON_SET(&reason, PFRES_IPOPTIONS); 6029 log = 1; 6030 goto done; 6031 } 6032 if (!pf_pull_hdr(m, off, &rthdr, sizeof(rthdr), NULL, 6033 &reason, pd.af)) { 6034 DPFPRINTF(PF_DEBUG_MISC, 6035 ("pf: IPv6 short rthdr\n")); 6036 action = PF_DROP; 6037 REASON_SET(&reason, PFRES_SHORT); 6038 log = 1; 6039 goto done; 6040 } 6041 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) { 6042 DPFPRINTF(PF_DEBUG_MISC, 6043 ("pf: IPv6 rthdr0\n")); 6044 action = PF_DROP; 6045 REASON_SET(&reason, PFRES_IPOPTIONS); 6046 log = 1; 6047 goto done; 6048 } 6049 /* FALLTHROUGH */ 6050 } 6051 case IPPROTO_AH: 6052 case IPPROTO_HOPOPTS: 6053 case IPPROTO_DSTOPTS: { 6054 /* get next header and header length */ 6055 struct ip6_ext opt6; 6056 6057 if (!pf_pull_hdr(m, off, &opt6, sizeof(opt6), 6058 NULL, &reason, pd.af)) { 6059 DPFPRINTF(PF_DEBUG_MISC, 6060 ("pf: IPv6 short opt\n")); 6061 action = PF_DROP; 6062 log = 1; 6063 goto done; 6064 } 6065 if (pd.proto == IPPROTO_AH) 6066 off += (opt6.ip6e_len + 2) * 4; 6067 else 6068 off += (opt6.ip6e_len + 1) * 8; 6069 pd.proto = opt6.ip6e_nxt; 6070 /* goto the next header */ 6071 break; 6072 } 6073 default: 6074 terminal++; 6075 break; 6076 } 6077 } while (!terminal); 6078 6079 /* if there's no routing header, use unmodified mbuf for checksumming */ 6080 if (!n) 6081 n = m; 6082 6083 switch (pd.proto) { 6084 6085 case IPPROTO_TCP: { 6086 struct tcphdr th; 6087 6088 pd.hdr.tcp = &th; 6089 if (!pf_pull_hdr(m, off, &th, sizeof(th), 6090 &action, &reason, AF_INET6)) { 6091 log = action != PF_PASS; 6092 goto done; 6093 } 6094 pd.p_len = pd.tot_len - off - (th.th_off << 2); 6095 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd); 6096 if (action == PF_DROP) 6097 goto done; 6098 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd, 6099 &reason); 6100 if (action == PF_PASS) { 6101 if (pfsync_update_state_ptr != NULL) 6102 pfsync_update_state_ptr(s); 6103 r = s->rule.ptr; 6104 a = s->anchor.ptr; 6105 log = s->log; 6106 } else if (s == NULL) 6107 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, 6108 &a, &ruleset, inp); 6109 break; 6110 } 6111 6112 case IPPROTO_UDP: { 6113 struct udphdr uh; 6114 6115 pd.hdr.udp = &uh; 6116 if (!pf_pull_hdr(m, off, &uh, sizeof(uh), 6117 &action, &reason, AF_INET6)) { 6118 log = action != PF_PASS; 6119 goto done; 6120 } 6121 if (uh.uh_dport == 0 || 6122 ntohs(uh.uh_ulen) > m->m_pkthdr.len - off || 6123 ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { 6124 action = PF_DROP; 6125 REASON_SET(&reason, PFRES_SHORT); 6126 goto done; 6127 } 6128 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd); 6129 if (action == PF_PASS) { 6130 if (pfsync_update_state_ptr != NULL) 6131 pfsync_update_state_ptr(s); 6132 r = s->rule.ptr; 6133 a = s->anchor.ptr; 6134 log = s->log; 6135 } else if (s == NULL) 6136 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, 6137 &a, &ruleset, inp); 6138 break; 6139 } 6140 6141 case IPPROTO_ICMP: { 6142 action = PF_DROP; 6143 DPFPRINTF(PF_DEBUG_MISC, 6144 ("pf: dropping IPv6 packet with ICMPv4 payload\n")); 6145 goto done; 6146 } 6147 6148 case IPPROTO_ICMPV6: { 6149 struct icmp6_hdr ih; 6150 6151 pd.hdr.icmp6 = &ih; 6152 if (!pf_pull_hdr(m, off, &ih, sizeof(ih), 6153 &action, &reason, AF_INET6)) { 6154 log = action != PF_PASS; 6155 goto done; 6156 } 6157 action = pf_test_state_icmp(&s, dir, kif, 6158 m, off, h, &pd, &reason); 6159 if (action == PF_PASS) { 6160 if (pfsync_update_state_ptr != NULL) 6161 pfsync_update_state_ptr(s); 6162 r = s->rule.ptr; 6163 a = s->anchor.ptr; 6164 log = s->log; 6165 } else if (s == NULL) 6166 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, 6167 &a, &ruleset, inp); 6168 break; 6169 } 6170 6171 default: 6172 action = pf_test_state_other(&s, dir, kif, m, &pd); 6173 if (action == PF_PASS) { 6174 if (pfsync_update_state_ptr != NULL) 6175 pfsync_update_state_ptr(s); 6176 r = s->rule.ptr; 6177 a = s->anchor.ptr; 6178 log = s->log; 6179 } else if (s == NULL) 6180 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd, 6181 &a, &ruleset, inp); 6182 break; 6183 } 6184 6185done: 6186 PF_RULES_RUNLOCK(); 6187 if (n != m) { 6188 m_freem(n); 6189 n = NULL; 6190 } 6191 6192 /* handle dangerous IPv6 extension headers. */ 6193 if (action == PF_PASS && rh_cnt && 6194 !((s && s->state_flags & PFSTATE_ALLOWOPTS) || r->allow_opts)) { 6195 action = PF_DROP; 6196 REASON_SET(&reason, PFRES_IPOPTIONS); 6197 log = 1; 6198 DPFPRINTF(PF_DEBUG_MISC, 6199 ("pf: dropping packet with dangerous v6 headers\n")); 6200 } 6201 6202 if (s && s->tag > 0 && pf_tag_packet(m, &pd, s->tag)) { 6203 action = PF_DROP; 6204 REASON_SET(&reason, PFRES_MEMORY); 6205 } 6206 if (r->rtableid >= 0) 6207 M_SETFIB(m, r->rtableid); 6208 6209#ifdef ALTQ 6210 if (action == PF_PASS && r->qid) { 6211 if (pd.pf_mtag == NULL && 6212 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) { 6213 action = PF_DROP; 6214 REASON_SET(&reason, PFRES_MEMORY); 6215 } 6216 if (pd.tos & IPTOS_LOWDELAY) 6217 pd.pf_mtag->qid = r->pqid; 6218 else 6219 pd.pf_mtag->qid = r->qid; 6220 /* add hints for ecn */ 6221 pd.pf_mtag->hdr = h; 6222 } 6223#endif /* ALTQ */ 6224 6225 if (dir == PF_IN && action == PF_PASS && (pd.proto == IPPROTO_TCP || 6226 pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && 6227 (s->nat_rule.ptr->action == PF_RDR || 6228 s->nat_rule.ptr->action == PF_BINAT) && 6229 IN6_IS_ADDR_LOOPBACK(&pd.dst->v6)) 6230 m->m_flags |= M_SKIP_FIREWALL; 6231 6232 /* XXX: Anybody working on it?! */ 6233 if (r->divert.port) 6234 printf("pf: divert(9) is not supported for IPv6\n"); 6235 6236 if (log) { 6237 struct pf_rule *lr; 6238 6239 if (s != NULL && s->nat_rule.ptr != NULL && 6240 s->nat_rule.ptr->log & PF_LOG_ALL) 6241 lr = s->nat_rule.ptr; 6242 else 6243 lr = r; 6244 PFLOG_PACKET(kif, m, AF_INET6, dir, reason, lr, a, ruleset, 6245 &pd, (s == NULL)); 6246 } 6247 6248 kif->pfik_bytes[1][dir == PF_OUT][action != PF_PASS] += pd.tot_len; 6249 kif->pfik_packets[1][dir == PF_OUT][action != PF_PASS]++; 6250 6251 if (action == PF_PASS || r->action == PF_DROP) { 6252 dirndx = (dir == PF_OUT); 6253 r->packets[dirndx]++; 6254 r->bytes[dirndx] += pd.tot_len; 6255 if (a != NULL) { 6256 a->packets[dirndx]++; 6257 a->bytes[dirndx] += pd.tot_len; 6258 } 6259 if (s != NULL) { 6260 if (s->nat_rule.ptr != NULL) { 6261 s->nat_rule.ptr->packets[dirndx]++; 6262 s->nat_rule.ptr->bytes[dirndx] += pd.tot_len; 6263 } 6264 if (s->src_node != NULL) { 6265 s->src_node->packets[dirndx]++; 6266 s->src_node->bytes[dirndx] += pd.tot_len; 6267 } 6268 if (s->nat_src_node != NULL) { 6269 s->nat_src_node->packets[dirndx]++; 6270 s->nat_src_node->bytes[dirndx] += pd.tot_len; 6271 } 6272 dirndx = (dir == s->direction) ? 0 : 1; 6273 s->packets[dirndx]++; 6274 s->bytes[dirndx] += pd.tot_len; 6275 } 6276 tr = r; 6277 nr = (s != NULL) ? s->nat_rule.ptr : pd.nat_rule; 6278 if (nr != NULL && r == &V_pf_default_rule) 6279 tr = nr; 6280 if (tr->src.addr.type == PF_ADDR_TABLE) 6281 pfr_update_stats(tr->src.addr.p.tbl, 6282 (s == NULL) ? pd.src : 6283 &s->key[(s->direction == PF_IN)]->addr[0], 6284 pd.af, pd.tot_len, dir == PF_OUT, 6285 r->action == PF_PASS, tr->src.neg); 6286 if (tr->dst.addr.type == PF_ADDR_TABLE) 6287 pfr_update_stats(tr->dst.addr.p.tbl, 6288 (s == NULL) ? pd.dst : 6289 &s->key[(s->direction == PF_IN)]->addr[1], 6290 pd.af, pd.tot_len, dir == PF_OUT, 6291 r->action == PF_PASS, tr->dst.neg); 6292 } 6293 6294 switch (action) { 6295 case PF_SYNPROXY_DROP: 6296 m_freem(*m0); 6297 case PF_DEFER: 6298 *m0 = NULL; 6299 action = PF_PASS; 6300 break; 6301 default: 6302 /* pf_route6() returns unlocked. */ 6303 if (r->rt) { 6304 pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd); 6305 return (action); 6306 } 6307 break; 6308 } 6309 6310 if (s) 6311 PF_STATE_UNLOCK(s); 6312 6313 return (action); 6314} 6315#endif /* INET6 */ 6316