1/*- 2 * Copyright 2001 Niels Provos <provos@citi.umich.edu> 3 * Copyright 2011 Alexander Bluhm <bluhm@openbsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $ 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: stable/11/sys/netpfil/pf/pf_norm.c 344707 2019-03-01 18:12:07Z kp $"); 31 32#include "opt_inet.h" 33#include "opt_inet6.h" 34#include "opt_pf.h" 35 36#include <sys/param.h> 37#include <sys/kernel.h> 38#include <sys/lock.h> 39#include <sys/mbuf.h> 40#include <sys/mutex.h> 41#include <sys/refcount.h> 42#include <sys/socket.h> 43 44#include <net/if.h> 45#include <net/vnet.h> 46#include <net/pfvar.h> 47#include <net/if_pflog.h> 48 49#include <netinet/in.h> 50#include <netinet/ip.h> 51#include <netinet/ip_var.h> 52#include <netinet6/ip6_var.h> 53#include <netinet/tcp.h> 54#include <netinet/tcp_fsm.h> 55#include <netinet/tcp_seq.h> 56 57#ifdef INET6 58#include <netinet/ip6.h> 59#endif /* INET6 */ 60 61struct pf_frent { 62 TAILQ_ENTRY(pf_frent) fr_next; 63 struct mbuf *fe_m; 64 uint16_t fe_hdrlen; /* ipv4 header length with ip options 65 ipv6, extension, fragment header */ 66 uint16_t fe_extoff; /* last extension header offset or 0 */ 67 uint16_t fe_len; /* fragment length */ 68 uint16_t fe_off; /* fragment offset */ 69 uint16_t fe_mff; /* more fragment flag */ 70}; 71 72struct pf_fragment_cmp { 73 struct pf_addr frc_src; 74 struct pf_addr frc_dst; 75 uint32_t frc_id; 76 sa_family_t frc_af; 77 uint8_t frc_proto; 78}; 79 80struct pf_fragment { 81 struct pf_fragment_cmp fr_key; 82#define fr_src fr_key.frc_src 83#define fr_dst fr_key.frc_dst 84#define fr_id fr_key.frc_id 85#define fr_af fr_key.frc_af 86#define fr_proto fr_key.frc_proto 87 88 RB_ENTRY(pf_fragment) fr_entry; 89 TAILQ_ENTRY(pf_fragment) frag_next; 90 uint32_t fr_timeout; 91 uint16_t fr_maxlen; /* maximum length of single fragment */ 92 uint16_t fr_entries; /* Total number of pf_fragment entries */ 93 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue; 94}; 95#define PF_MAX_FRENT_PER_FRAGMENT 64 96 97struct pf_fragment_tag { 98 uint16_t ft_hdrlen; /* header length of reassembled pkt */ 99 uint16_t ft_extoff; /* last extension header offset or 0 */ 100 uint16_t ft_maxlen; /* maximum fragment payload length */ 101 uint32_t ft_id; /* fragment id */ 102}; 103 104static struct mtx pf_frag_mtx; 105MTX_SYSINIT(pf_frag_mtx, &pf_frag_mtx, "pf fragments", MTX_DEF); 106#define PF_FRAG_LOCK() mtx_lock(&pf_frag_mtx) 107#define PF_FRAG_UNLOCK() mtx_unlock(&pf_frag_mtx) 108#define PF_FRAG_ASSERT() mtx_assert(&pf_frag_mtx, MA_OWNED) 109 110VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */ 111 112static VNET_DEFINE(uma_zone_t, pf_frent_z); 113#define V_pf_frent_z VNET(pf_frent_z) 114static VNET_DEFINE(uma_zone_t, pf_frag_z); 115#define V_pf_frag_z VNET(pf_frag_z) 116 117TAILQ_HEAD(pf_fragqueue, pf_fragment); 118TAILQ_HEAD(pf_cachequeue, pf_fragment); 119static VNET_DEFINE(struct pf_fragqueue, pf_fragqueue); 120#define V_pf_fragqueue VNET(pf_fragqueue) 121RB_HEAD(pf_frag_tree, pf_fragment); 122static VNET_DEFINE(struct pf_frag_tree, pf_frag_tree); 123#define V_pf_frag_tree VNET(pf_frag_tree) 124static int pf_frag_compare(struct pf_fragment *, 125 struct pf_fragment *); 126static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 127static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); 128 129static void pf_flush_fragments(void); 130static void pf_free_fragment(struct pf_fragment *); 131static void pf_remove_fragment(struct pf_fragment *); 132static int pf_normalize_tcpopt(struct pf_rule *, struct mbuf *, 133 struct tcphdr *, int, sa_family_t); 134static struct pf_frent *pf_create_fragment(u_short *); 135static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key, 136 struct pf_frag_tree *tree); 137static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *, 138 struct pf_frent *, u_short *); 139static int pf_isfull_fragment(struct pf_fragment *); 140static struct mbuf *pf_join_fragment(struct pf_fragment *); 141#ifdef INET 142static void pf_scrub_ip(struct mbuf **, uint32_t, uint8_t, uint8_t); 143static int pf_reassemble(struct mbuf **, struct ip *, int, u_short *); 144#endif /* INET */ 145#ifdef INET6 146static int pf_reassemble6(struct mbuf **, struct ip6_hdr *, 147 struct ip6_frag *, uint16_t, uint16_t, u_short *); 148static void pf_scrub_ip6(struct mbuf **, uint8_t); 149#endif /* INET6 */ 150 151#define DPFPRINTF(x) do { \ 152 if (V_pf_status.debug >= PF_DEBUG_MISC) { \ 153 printf("%s: ", __func__); \ 154 printf x ; \ 155 } \ 156} while(0) 157 158#ifdef INET 159static void 160pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key) 161{ 162 163 key->frc_src.v4 = ip->ip_src; 164 key->frc_dst.v4 = ip->ip_dst; 165 key->frc_af = AF_INET; 166 key->frc_proto = ip->ip_p; 167 key->frc_id = ip->ip_id; 168} 169#endif /* INET */ 170 171void 172pf_normalize_init(void) 173{ 174 175 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment), 176 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 177 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent), 178 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 179 V_pf_state_scrub_z = uma_zcreate("pf state scrubs", 180 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL, 181 UMA_ALIGN_PTR, 0); 182 183 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z; 184 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT; 185 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT); 186 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached"); 187 188 TAILQ_INIT(&V_pf_fragqueue); 189} 190 191void 192pf_normalize_cleanup(void) 193{ 194 195 uma_zdestroy(V_pf_state_scrub_z); 196 uma_zdestroy(V_pf_frent_z); 197 uma_zdestroy(V_pf_frag_z); 198} 199 200static int 201pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b) 202{ 203 int diff; 204 205 if ((diff = a->fr_id - b->fr_id) != 0) 206 return (diff); 207 if ((diff = a->fr_proto - b->fr_proto) != 0) 208 return (diff); 209 if ((diff = a->fr_af - b->fr_af) != 0) 210 return (diff); 211 if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0) 212 return (diff); 213 if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0) 214 return (diff); 215 return (0); 216} 217 218void 219pf_purge_expired_fragments(void) 220{ 221 struct pf_fragment *frag; 222 u_int32_t expire = time_uptime - 223 V_pf_default_rule.timeout[PFTM_FRAG]; 224 225 PF_FRAG_LOCK(); 226 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) { 227 if (frag->fr_timeout > expire) 228 break; 229 230 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag)); 231 pf_free_fragment(frag); 232 } 233 234 PF_FRAG_UNLOCK(); 235} 236 237/* 238 * Try to flush old fragments to make space for new ones 239 */ 240static void 241pf_flush_fragments(void) 242{ 243 struct pf_fragment *frag; 244 int goal; 245 246 PF_FRAG_ASSERT(); 247 248 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10; 249 DPFPRINTF(("trying to free %d frag entriess\n", goal)); 250 while (goal < uma_zone_get_cur(V_pf_frent_z)) { 251 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue); 252 if (frag) 253 pf_free_fragment(frag); 254 else 255 break; 256 } 257} 258 259/* Frees the fragments and all associated entries */ 260static void 261pf_free_fragment(struct pf_fragment *frag) 262{ 263 struct pf_frent *frent; 264 265 PF_FRAG_ASSERT(); 266 267 /* Free all fragments */ 268 for (frent = TAILQ_FIRST(&frag->fr_queue); frent; 269 frent = TAILQ_FIRST(&frag->fr_queue)) { 270 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next); 271 272 m_freem(frent->fe_m); 273 uma_zfree(V_pf_frent_z, frent); 274 } 275 276 pf_remove_fragment(frag); 277} 278 279static struct pf_fragment * 280pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree) 281{ 282 struct pf_fragment *frag; 283 284 PF_FRAG_ASSERT(); 285 286 frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key); 287 if (frag != NULL) { 288 /* XXX Are we sure we want to update the timeout? */ 289 frag->fr_timeout = time_uptime; 290 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 291 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); 292 } 293 294 return (frag); 295} 296 297/* Removes a fragment from the fragment queue and frees the fragment */ 298static void 299pf_remove_fragment(struct pf_fragment *frag) 300{ 301 302 PF_FRAG_ASSERT(); 303 304 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag); 305 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next); 306 uma_zfree(V_pf_frag_z, frag); 307} 308 309static struct pf_frent * 310pf_create_fragment(u_short *reason) 311{ 312 struct pf_frent *frent; 313 314 PF_FRAG_ASSERT(); 315 316 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); 317 if (frent == NULL) { 318 pf_flush_fragments(); 319 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT); 320 if (frent == NULL) { 321 REASON_SET(reason, PFRES_MEMORY); 322 return (NULL); 323 } 324 } 325 326 return (frent); 327} 328 329static struct pf_fragment * 330pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent, 331 u_short *reason) 332{ 333 struct pf_frent *after, *next, *prev; 334 struct pf_fragment *frag; 335 uint16_t total; 336 337 PF_FRAG_ASSERT(); 338 339 /* No empty fragments. */ 340 if (frent->fe_len == 0) { 341 DPFPRINTF(("bad fragment: len 0")); 342 goto bad_fragment; 343 } 344 345 /* All fragments are 8 byte aligned. */ 346 if (frent->fe_mff && (frent->fe_len & 0x7)) { 347 DPFPRINTF(("bad fragment: mff and len %d", frent->fe_len)); 348 goto bad_fragment; 349 } 350 351 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */ 352 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) { 353 DPFPRINTF(("bad fragment: max packet %d", 354 frent->fe_off + frent->fe_len)); 355 goto bad_fragment; 356 } 357 358 DPFPRINTF((key->frc_af == AF_INET ? 359 "reass frag %d @ %d-%d" : "reass frag %#08x @ %d-%d", 360 key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len)); 361 362 /* Fully buffer all of the fragments in this fragment queue. */ 363 frag = pf_find_fragment(key, &V_pf_frag_tree); 364 365 /* Create a new reassembly queue for this packet. */ 366 if (frag == NULL) { 367 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 368 if (frag == NULL) { 369 pf_flush_fragments(); 370 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT); 371 if (frag == NULL) { 372 REASON_SET(reason, PFRES_MEMORY); 373 goto drop_fragment; 374 } 375 } 376 377 *(struct pf_fragment_cmp *)frag = *key; 378 frag->fr_timeout = time_uptime; 379 frag->fr_maxlen = frent->fe_len; 380 frag->fr_entries = 0; 381 TAILQ_INIT(&frag->fr_queue); 382 383 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag); 384 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next); 385 386 /* We do not have a previous fragment. */ 387 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next); 388 389 return (frag); 390 } 391 392 if (frag->fr_entries >= PF_MAX_FRENT_PER_FRAGMENT) 393 goto bad_fragment; 394 395 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue")); 396 397 /* Remember maximum fragment len for refragmentation. */ 398 if (frent->fe_len > frag->fr_maxlen) 399 frag->fr_maxlen = frent->fe_len; 400 401 /* Maximum data we have seen already. */ 402 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 403 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 404 405 /* Non terminal fragments must have more fragments flag. */ 406 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff) 407 goto bad_fragment; 408 409 /* Check if we saw the last fragment already. */ 410 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) { 411 if (frent->fe_off + frent->fe_len > total || 412 (frent->fe_off + frent->fe_len == total && frent->fe_mff)) 413 goto bad_fragment; 414 } else { 415 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff) 416 goto bad_fragment; 417 } 418 419 /* Find a fragment after the current one. */ 420 prev = NULL; 421 TAILQ_FOREACH(after, &frag->fr_queue, fr_next) { 422 if (after->fe_off > frent->fe_off) 423 break; 424 prev = after; 425 } 426 427 KASSERT(prev != NULL || after != NULL, 428 ("prev != NULL || after != NULL")); 429 430 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) { 431 uint16_t precut; 432 433 precut = prev->fe_off + prev->fe_len - frent->fe_off; 434 if (precut >= frent->fe_len) 435 goto bad_fragment; 436 DPFPRINTF(("overlap -%d", precut)); 437 m_adj(frent->fe_m, precut); 438 frent->fe_off += precut; 439 frent->fe_len -= precut; 440 } 441 442 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off; 443 after = next) { 444 uint16_t aftercut; 445 446 aftercut = frent->fe_off + frent->fe_len - after->fe_off; 447 DPFPRINTF(("adjust overlap %d", aftercut)); 448 if (aftercut < after->fe_len) { 449 m_adj(after->fe_m, aftercut); 450 after->fe_off += aftercut; 451 after->fe_len -= aftercut; 452 break; 453 } 454 455 /* This fragment is completely overlapped, lose it. */ 456 next = TAILQ_NEXT(after, fr_next); 457 m_freem(after->fe_m); 458 TAILQ_REMOVE(&frag->fr_queue, after, fr_next); 459 uma_zfree(V_pf_frent_z, after); 460 } 461 462 if (prev == NULL) 463 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next); 464 else 465 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next); 466 467 frag->fr_entries++; 468 469 return (frag); 470 471bad_fragment: 472 REASON_SET(reason, PFRES_FRAG); 473drop_fragment: 474 uma_zfree(V_pf_frent_z, frent); 475 return (NULL); 476} 477 478static int 479pf_isfull_fragment(struct pf_fragment *frag) 480{ 481 struct pf_frent *frent, *next; 482 uint16_t off, total; 483 484 /* Check if we are completely reassembled */ 485 if (TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) 486 return (0); 487 488 /* Maximum data we have seen already */ 489 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 490 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 491 492 /* Check if we have all the data */ 493 off = 0; 494 for (frent = TAILQ_FIRST(&frag->fr_queue); frent; frent = next) { 495 next = TAILQ_NEXT(frent, fr_next); 496 497 off += frent->fe_len; 498 if (off < total && (next == NULL || next->fe_off != off)) { 499 DPFPRINTF(("missing fragment at %d, next %d, total %d", 500 off, next == NULL ? -1 : next->fe_off, total)); 501 return (0); 502 } 503 } 504 DPFPRINTF(("%d < %d?", off, total)); 505 if (off < total) 506 return (0); 507 KASSERT(off == total, ("off == total")); 508 509 return (1); 510} 511 512static struct mbuf * 513pf_join_fragment(struct pf_fragment *frag) 514{ 515 struct mbuf *m, *m2; 516 struct pf_frent *frent, *next; 517 518 frent = TAILQ_FIRST(&frag->fr_queue); 519 next = TAILQ_NEXT(frent, fr_next); 520 521 m = frent->fe_m; 522 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len); 523 uma_zfree(V_pf_frent_z, frent); 524 for (frent = next; frent != NULL; frent = next) { 525 next = TAILQ_NEXT(frent, fr_next); 526 527 m2 = frent->fe_m; 528 /* Strip off ip header. */ 529 m_adj(m2, frent->fe_hdrlen); 530 /* Strip off any trailing bytes. */ 531 m_adj(m2, frent->fe_len - m2->m_pkthdr.len); 532 533 uma_zfree(V_pf_frent_z, frent); 534 m_cat(m, m2); 535 } 536 537 /* Remove from fragment queue. */ 538 pf_remove_fragment(frag); 539 540 return (m); 541} 542 543#ifdef INET 544static int 545pf_reassemble(struct mbuf **m0, struct ip *ip, int dir, u_short *reason) 546{ 547 struct mbuf *m = *m0; 548 struct pf_frent *frent; 549 struct pf_fragment *frag; 550 struct pf_fragment_cmp key; 551 uint16_t total, hdrlen; 552 553 /* Get an entry for the fragment queue */ 554 if ((frent = pf_create_fragment(reason)) == NULL) 555 return (PF_DROP); 556 557 frent->fe_m = m; 558 frent->fe_hdrlen = ip->ip_hl << 2; 559 frent->fe_extoff = 0; 560 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2); 561 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; 562 frent->fe_mff = ntohs(ip->ip_off) & IP_MF; 563 564 pf_ip2key(ip, dir, &key); 565 566 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) 567 return (PF_DROP); 568 569 /* The mbuf is part of the fragment entry, no direct free or access */ 570 m = *m0 = NULL; 571 572 if (!pf_isfull_fragment(frag)) 573 return (PF_PASS); /* drop because *m0 is NULL, no error */ 574 575 /* We have all the data */ 576 frent = TAILQ_FIRST(&frag->fr_queue); 577 KASSERT(frent != NULL, ("frent != NULL")); 578 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 579 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 580 hdrlen = frent->fe_hdrlen; 581 582 m = *m0 = pf_join_fragment(frag); 583 frag = NULL; 584 585 if (m->m_flags & M_PKTHDR) { 586 int plen = 0; 587 for (m = *m0; m; m = m->m_next) 588 plen += m->m_len; 589 m = *m0; 590 m->m_pkthdr.len = plen; 591 } 592 593 ip = mtod(m, struct ip *); 594 ip->ip_len = htons(hdrlen + total); 595 ip->ip_off &= ~(IP_MF|IP_OFFMASK); 596 597 if (hdrlen + total > IP_MAXPACKET) { 598 DPFPRINTF(("drop: too big: %d", total)); 599 ip->ip_len = 0; 600 REASON_SET(reason, PFRES_SHORT); 601 /* PF_DROP requires a valid mbuf *m0 in pf_test() */ 602 return (PF_DROP); 603 } 604 605 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len))); 606 return (PF_PASS); 607} 608#endif /* INET */ 609 610#ifdef INET6 611static int 612pf_reassemble6(struct mbuf **m0, struct ip6_hdr *ip6, struct ip6_frag *fraghdr, 613 uint16_t hdrlen, uint16_t extoff, u_short *reason) 614{ 615 struct mbuf *m = *m0; 616 struct pf_frent *frent; 617 struct pf_fragment *frag; 618 struct pf_fragment_cmp key; 619 struct m_tag *mtag; 620 struct pf_fragment_tag *ftag; 621 int off; 622 uint32_t frag_id; 623 uint16_t total, maxlen; 624 uint8_t proto; 625 626 PF_FRAG_LOCK(); 627 628 /* Get an entry for the fragment queue. */ 629 if ((frent = pf_create_fragment(reason)) == NULL) { 630 PF_FRAG_UNLOCK(); 631 return (PF_DROP); 632 } 633 634 frent->fe_m = m; 635 frent->fe_hdrlen = hdrlen; 636 frent->fe_extoff = extoff; 637 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen; 638 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK); 639 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG; 640 641 key.frc_src.v6 = ip6->ip6_src; 642 key.frc_dst.v6 = ip6->ip6_dst; 643 key.frc_af = AF_INET6; 644 /* Only the first fragment's protocol is relevant. */ 645 key.frc_proto = 0; 646 key.frc_id = fraghdr->ip6f_ident; 647 648 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) { 649 PF_FRAG_UNLOCK(); 650 return (PF_DROP); 651 } 652 653 /* The mbuf is part of the fragment entry, no direct free or access. */ 654 m = *m0 = NULL; 655 656 if (!pf_isfull_fragment(frag)) { 657 PF_FRAG_UNLOCK(); 658 return (PF_PASS); /* Drop because *m0 is NULL, no error. */ 659 } 660 661 /* We have all the data. */ 662 frent = TAILQ_FIRST(&frag->fr_queue); 663 KASSERT(frent != NULL, ("frent != NULL")); 664 extoff = frent->fe_extoff; 665 maxlen = frag->fr_maxlen; 666 frag_id = frag->fr_id; 667 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off + 668 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len; 669 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag); 670 671 m = *m0 = pf_join_fragment(frag); 672 frag = NULL; 673 674 PF_FRAG_UNLOCK(); 675 676 /* Take protocol from first fragment header. */ 677 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off); 678 KASSERT(m, ("%s: short mbuf chain", __func__)); 679 proto = *(mtod(m, caddr_t) + off); 680 m = *m0; 681 682 /* Delete frag6 header */ 683 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0) 684 goto fail; 685 686 if (m->m_flags & M_PKTHDR) { 687 int plen = 0; 688 for (m = *m0; m; m = m->m_next) 689 plen += m->m_len; 690 m = *m0; 691 m->m_pkthdr.len = plen; 692 } 693 694 if ((mtag = m_tag_get(PF_REASSEMBLED, sizeof(struct pf_fragment_tag), 695 M_NOWAIT)) == NULL) 696 goto fail; 697 ftag = (struct pf_fragment_tag *)(mtag + 1); 698 ftag->ft_hdrlen = hdrlen; 699 ftag->ft_extoff = extoff; 700 ftag->ft_maxlen = maxlen; 701 ftag->ft_id = frag_id; 702 m_tag_prepend(m, mtag); 703 704 ip6 = mtod(m, struct ip6_hdr *); 705 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total); 706 if (extoff) { 707 /* Write protocol into next field of last extension header. */ 708 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt), 709 &off); 710 KASSERT(m, ("%s: short mbuf chain", __func__)); 711 *(mtod(m, char *) + off) = proto; 712 m = *m0; 713 } else 714 ip6->ip6_nxt = proto; 715 716 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) { 717 DPFPRINTF(("drop: too big: %d", total)); 718 ip6->ip6_plen = 0; 719 REASON_SET(reason, PFRES_SHORT); 720 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */ 721 return (PF_DROP); 722 } 723 724 DPFPRINTF(("complete: %p(%d)", m, ntohs(ip6->ip6_plen))); 725 return (PF_PASS); 726 727fail: 728 REASON_SET(reason, PFRES_MEMORY); 729 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */ 730 return (PF_DROP); 731} 732#endif /* INET6 */ 733 734#ifdef INET6 735int 736pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag) 737{ 738 struct mbuf *m = *m0, *t; 739 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1); 740 struct pf_pdesc pd; 741 uint32_t frag_id; 742 uint16_t hdrlen, extoff, maxlen; 743 uint8_t proto; 744 int error, action; 745 746 hdrlen = ftag->ft_hdrlen; 747 extoff = ftag->ft_extoff; 748 maxlen = ftag->ft_maxlen; 749 frag_id = ftag->ft_id; 750 m_tag_delete(m, mtag); 751 mtag = NULL; 752 ftag = NULL; 753 754 if (extoff) { 755 int off; 756 757 /* Use protocol from next field of last extension header */ 758 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt), 759 &off); 760 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain")); 761 proto = *(mtod(m, caddr_t) + off); 762 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT; 763 m = *m0; 764 } else { 765 struct ip6_hdr *hdr; 766 767 hdr = mtod(m, struct ip6_hdr *); 768 proto = hdr->ip6_nxt; 769 hdr->ip6_nxt = IPPROTO_FRAGMENT; 770 } 771 772 /* The MTU must be a multiple of 8 bytes, or we risk doing the 773 * fragmentation wrong. */ 774 maxlen = maxlen & ~7; 775 776 /* 777 * Maxlen may be less than 8 if there was only a single 778 * fragment. As it was fragmented before, add a fragment 779 * header also for a single fragment. If total or maxlen 780 * is less than 8, ip6_fragment() will return EMSGSIZE and 781 * we drop the packet. 782 */ 783 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id); 784 m = (*m0)->m_nextpkt; 785 (*m0)->m_nextpkt = NULL; 786 if (error == 0) { 787 /* The first mbuf contains the unfragmented packet. */ 788 m_freem(*m0); 789 *m0 = NULL; 790 action = PF_PASS; 791 } else { 792 /* Drop expects an mbuf to free. */ 793 DPFPRINTF(("refragment error %d", error)); 794 action = PF_DROP; 795 } 796 for (t = m; m; m = t) { 797 t = m->m_nextpkt; 798 m->m_nextpkt = NULL; 799 m->m_flags |= M_SKIP_FIREWALL; 800 memset(&pd, 0, sizeof(pd)); 801 pd.pf_mtag = pf_find_mtag(m); 802 if (error == 0) 803 ip6_forward(m, 0); 804 else 805 m_freem(m); 806 } 807 808 return (action); 809} 810#endif /* INET6 */ 811 812#ifdef INET 813int 814pf_normalize_ip(struct mbuf **m0, int dir, struct pfi_kif *kif, u_short *reason, 815 struct pf_pdesc *pd) 816{ 817 struct mbuf *m = *m0; 818 struct pf_rule *r; 819 struct ip *h = mtod(m, struct ip *); 820 int mff = (ntohs(h->ip_off) & IP_MF); 821 int hlen = h->ip_hl << 2; 822 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; 823 u_int16_t max; 824 int ip_len; 825 int ip_off; 826 int tag = -1; 827 int verdict; 828 829 PF_RULES_RASSERT(); 830 831 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 832 while (r != NULL) { 833 r->evaluations++; 834 if (pfi_kif_match(r->kif, kif) == r->ifnot) 835 r = r->skip[PF_SKIP_IFP].ptr; 836 else if (r->direction && r->direction != dir) 837 r = r->skip[PF_SKIP_DIR].ptr; 838 else if (r->af && r->af != AF_INET) 839 r = r->skip[PF_SKIP_AF].ptr; 840 else if (r->proto && r->proto != h->ip_p) 841 r = r->skip[PF_SKIP_PROTO].ptr; 842 else if (PF_MISMATCHAW(&r->src.addr, 843 (struct pf_addr *)&h->ip_src.s_addr, AF_INET, 844 r->src.neg, kif, M_GETFIB(m))) 845 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 846 else if (PF_MISMATCHAW(&r->dst.addr, 847 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, 848 r->dst.neg, NULL, M_GETFIB(m))) 849 r = r->skip[PF_SKIP_DST_ADDR].ptr; 850 else if (r->match_tag && !pf_match_tag(m, r, &tag, 851 pd->pf_mtag ? pd->pf_mtag->tag : 0)) 852 r = TAILQ_NEXT(r, entries); 853 else 854 break; 855 } 856 857 if (r == NULL || r->action == PF_NOSCRUB) 858 return (PF_PASS); 859 else { 860 r->packets[dir == PF_OUT]++; 861 r->bytes[dir == PF_OUT] += pd->tot_len; 862 } 863 864 /* Check for illegal packets */ 865 if (hlen < (int)sizeof(struct ip)) { 866 REASON_SET(reason, PFRES_NORM); 867 goto drop; 868 } 869 870 if (hlen > ntohs(h->ip_len)) { 871 REASON_SET(reason, PFRES_NORM); 872 goto drop; 873 } 874 875 /* Clear IP_DF if the rule uses the no-df option */ 876 if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) { 877 u_int16_t ip_off = h->ip_off; 878 879 h->ip_off &= htons(~IP_DF); 880 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 881 } 882 883 /* We will need other tests here */ 884 if (!fragoff && !mff) 885 goto no_fragment; 886 887 /* We're dealing with a fragment now. Don't allow fragments 888 * with IP_DF to enter the cache. If the flag was cleared by 889 * no-df above, fine. Otherwise drop it. 890 */ 891 if (h->ip_off & htons(IP_DF)) { 892 DPFPRINTF(("IP_DF\n")); 893 goto bad; 894 } 895 896 ip_len = ntohs(h->ip_len) - hlen; 897 ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3; 898 899 /* All fragments are 8 byte aligned */ 900 if (mff && (ip_len & 0x7)) { 901 DPFPRINTF(("mff and %d\n", ip_len)); 902 goto bad; 903 } 904 905 /* Respect maximum length */ 906 if (fragoff + ip_len > IP_MAXPACKET) { 907 DPFPRINTF(("max packet %d\n", fragoff + ip_len)); 908 goto bad; 909 } 910 max = fragoff + ip_len; 911 912 /* Fully buffer all of the fragments 913 * Might return a completely reassembled mbuf, or NULL */ 914 PF_FRAG_LOCK(); 915 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max)); 916 verdict = pf_reassemble(m0, h, dir, reason); 917 PF_FRAG_UNLOCK(); 918 919 if (verdict != PF_PASS) 920 return (PF_DROP); 921 922 m = *m0; 923 if (m == NULL) 924 return (PF_DROP); 925 926 h = mtod(m, struct ip *); 927 928 no_fragment: 929 /* At this point, only IP_DF is allowed in ip_off */ 930 if (h->ip_off & ~htons(IP_DF)) { 931 u_int16_t ip_off = h->ip_off; 932 933 h->ip_off &= htons(IP_DF); 934 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 935 } 936 937 pf_scrub_ip(&m, r->rule_flag, r->min_ttl, r->set_tos); 938 939 return (PF_PASS); 940 941 bad: 942 DPFPRINTF(("dropping bad fragment\n")); 943 REASON_SET(reason, PFRES_FRAG); 944 drop: 945 if (r != NULL && r->log) 946 PFLOG_PACKET(kif, m, AF_INET, dir, *reason, r, NULL, NULL, pd, 947 1); 948 949 return (PF_DROP); 950} 951#endif 952 953#ifdef INET6 954int 955pf_normalize_ip6(struct mbuf **m0, int dir, struct pfi_kif *kif, 956 u_short *reason, struct pf_pdesc *pd) 957{ 958 struct mbuf *m = *m0; 959 struct pf_rule *r; 960 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 961 int extoff; 962 int off; 963 struct ip6_ext ext; 964 struct ip6_opt opt; 965 struct ip6_opt_jumbo jumbo; 966 struct ip6_frag frag; 967 u_int32_t jumbolen = 0, plen; 968 int optend; 969 int ooff; 970 u_int8_t proto; 971 int terminal; 972 973 PF_RULES_RASSERT(); 974 975 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 976 while (r != NULL) { 977 r->evaluations++; 978 if (pfi_kif_match(r->kif, kif) == r->ifnot) 979 r = r->skip[PF_SKIP_IFP].ptr; 980 else if (r->direction && r->direction != dir) 981 r = r->skip[PF_SKIP_DIR].ptr; 982 else if (r->af && r->af != AF_INET6) 983 r = r->skip[PF_SKIP_AF].ptr; 984#if 0 /* header chain! */ 985 else if (r->proto && r->proto != h->ip6_nxt) 986 r = r->skip[PF_SKIP_PROTO].ptr; 987#endif 988 else if (PF_MISMATCHAW(&r->src.addr, 989 (struct pf_addr *)&h->ip6_src, AF_INET6, 990 r->src.neg, kif, M_GETFIB(m))) 991 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 992 else if (PF_MISMATCHAW(&r->dst.addr, 993 (struct pf_addr *)&h->ip6_dst, AF_INET6, 994 r->dst.neg, NULL, M_GETFIB(m))) 995 r = r->skip[PF_SKIP_DST_ADDR].ptr; 996 else 997 break; 998 } 999 1000 if (r == NULL || r->action == PF_NOSCRUB) 1001 return (PF_PASS); 1002 else { 1003 r->packets[dir == PF_OUT]++; 1004 r->bytes[dir == PF_OUT] += pd->tot_len; 1005 } 1006 1007 /* Check for illegal packets */ 1008 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len) 1009 goto drop; 1010 1011 extoff = 0; 1012 off = sizeof(struct ip6_hdr); 1013 proto = h->ip6_nxt; 1014 terminal = 0; 1015 do { 1016 switch (proto) { 1017 case IPPROTO_FRAGMENT: 1018 goto fragment; 1019 break; 1020 case IPPROTO_AH: 1021 case IPPROTO_ROUTING: 1022 case IPPROTO_DSTOPTS: 1023 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL, 1024 NULL, AF_INET6)) 1025 goto shortpkt; 1026 extoff = off; 1027 if (proto == IPPROTO_AH) 1028 off += (ext.ip6e_len + 2) * 4; 1029 else 1030 off += (ext.ip6e_len + 1) * 8; 1031 proto = ext.ip6e_nxt; 1032 break; 1033 case IPPROTO_HOPOPTS: 1034 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL, 1035 NULL, AF_INET6)) 1036 goto shortpkt; 1037 extoff = off; 1038 optend = off + (ext.ip6e_len + 1) * 8; 1039 ooff = off + sizeof(ext); 1040 do { 1041 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type, 1042 sizeof(opt.ip6o_type), NULL, NULL, 1043 AF_INET6)) 1044 goto shortpkt; 1045 if (opt.ip6o_type == IP6OPT_PAD1) { 1046 ooff++; 1047 continue; 1048 } 1049 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt), 1050 NULL, NULL, AF_INET6)) 1051 goto shortpkt; 1052 if (ooff + sizeof(opt) + opt.ip6o_len > optend) 1053 goto drop; 1054 switch (opt.ip6o_type) { 1055 case IP6OPT_JUMBO: 1056 if (h->ip6_plen != 0) 1057 goto drop; 1058 if (!pf_pull_hdr(m, ooff, &jumbo, 1059 sizeof(jumbo), NULL, NULL, 1060 AF_INET6)) 1061 goto shortpkt; 1062 memcpy(&jumbolen, jumbo.ip6oj_jumbo_len, 1063 sizeof(jumbolen)); 1064 jumbolen = ntohl(jumbolen); 1065 if (jumbolen <= IPV6_MAXPACKET) 1066 goto drop; 1067 if (sizeof(struct ip6_hdr) + jumbolen != 1068 m->m_pkthdr.len) 1069 goto drop; 1070 break; 1071 default: 1072 break; 1073 } 1074 ooff += sizeof(opt) + opt.ip6o_len; 1075 } while (ooff < optend); 1076 1077 off = optend; 1078 proto = ext.ip6e_nxt; 1079 break; 1080 default: 1081 terminal = 1; 1082 break; 1083 } 1084 } while (!terminal); 1085 1086 /* jumbo payload option must be present, or plen > 0 */ 1087 if (ntohs(h->ip6_plen) == 0) 1088 plen = jumbolen; 1089 else 1090 plen = ntohs(h->ip6_plen); 1091 if (plen == 0) 1092 goto drop; 1093 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len) 1094 goto shortpkt; 1095 1096 pf_scrub_ip6(&m, r->min_ttl); 1097 1098 return (PF_PASS); 1099 1100 fragment: 1101 /* Jumbo payload packets cannot be fragmented. */ 1102 plen = ntohs(h->ip6_plen); 1103 if (plen == 0 || jumbolen) 1104 goto drop; 1105 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len) 1106 goto shortpkt; 1107 1108 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6)) 1109 goto shortpkt; 1110 1111 /* Offset now points to data portion. */ 1112 off += sizeof(frag); 1113 1114 /* Returns PF_DROP or *m0 is NULL or completely reassembled mbuf. */ 1115 if (pf_reassemble6(m0, h, &frag, off, extoff, reason) != PF_PASS) 1116 return (PF_DROP); 1117 m = *m0; 1118 if (m == NULL) 1119 return (PF_DROP); 1120 1121 pd->flags |= PFDESC_IP_REAS; 1122 return (PF_PASS); 1123 1124 shortpkt: 1125 REASON_SET(reason, PFRES_SHORT); 1126 if (r != NULL && r->log) 1127 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd, 1128 1); 1129 return (PF_DROP); 1130 1131 drop: 1132 REASON_SET(reason, PFRES_NORM); 1133 if (r != NULL && r->log) 1134 PFLOG_PACKET(kif, m, AF_INET6, dir, *reason, r, NULL, NULL, pd, 1135 1); 1136 return (PF_DROP); 1137} 1138#endif /* INET6 */ 1139 1140int 1141pf_normalize_tcp(int dir, struct pfi_kif *kif, struct mbuf *m, int ipoff, 1142 int off, void *h, struct pf_pdesc *pd) 1143{ 1144 struct pf_rule *r, *rm = NULL; 1145 struct tcphdr *th = pd->hdr.tcp; 1146 int rewrite = 0; 1147 u_short reason; 1148 u_int8_t flags; 1149 sa_family_t af = pd->af; 1150 1151 PF_RULES_RASSERT(); 1152 1153 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); 1154 while (r != NULL) { 1155 r->evaluations++; 1156 if (pfi_kif_match(r->kif, kif) == r->ifnot) 1157 r = r->skip[PF_SKIP_IFP].ptr; 1158 else if (r->direction && r->direction != dir) 1159 r = r->skip[PF_SKIP_DIR].ptr; 1160 else if (r->af && r->af != af) 1161 r = r->skip[PF_SKIP_AF].ptr; 1162 else if (r->proto && r->proto != pd->proto) 1163 r = r->skip[PF_SKIP_PROTO].ptr; 1164 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, 1165 r->src.neg, kif, M_GETFIB(m))) 1166 r = r->skip[PF_SKIP_SRC_ADDR].ptr; 1167 else if (r->src.port_op && !pf_match_port(r->src.port_op, 1168 r->src.port[0], r->src.port[1], th->th_sport)) 1169 r = r->skip[PF_SKIP_SRC_PORT].ptr; 1170 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, 1171 r->dst.neg, NULL, M_GETFIB(m))) 1172 r = r->skip[PF_SKIP_DST_ADDR].ptr; 1173 else if (r->dst.port_op && !pf_match_port(r->dst.port_op, 1174 r->dst.port[0], r->dst.port[1], th->th_dport)) 1175 r = r->skip[PF_SKIP_DST_PORT].ptr; 1176 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match( 1177 pf_osfp_fingerprint(pd, m, off, th), 1178 r->os_fingerprint)) 1179 r = TAILQ_NEXT(r, entries); 1180 else { 1181 rm = r; 1182 break; 1183 } 1184 } 1185 1186 if (rm == NULL || rm->action == PF_NOSCRUB) 1187 return (PF_PASS); 1188 else { 1189 r->packets[dir == PF_OUT]++; 1190 r->bytes[dir == PF_OUT] += pd->tot_len; 1191 } 1192 1193 if (rm->rule_flag & PFRULE_REASSEMBLE_TCP) 1194 pd->flags |= PFDESC_TCP_NORM; 1195 1196 flags = th->th_flags; 1197 if (flags & TH_SYN) { 1198 /* Illegal packet */ 1199 if (flags & TH_RST) 1200 goto tcp_drop; 1201 1202 if (flags & TH_FIN) 1203 goto tcp_drop; 1204 } else { 1205 /* Illegal packet */ 1206 if (!(flags & (TH_ACK|TH_RST))) 1207 goto tcp_drop; 1208 } 1209 1210 if (!(flags & TH_ACK)) { 1211 /* These flags are only valid if ACK is set */ 1212 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG)) 1213 goto tcp_drop; 1214 } 1215 1216 /* Check for illegal header length */ 1217 if (th->th_off < (sizeof(struct tcphdr) >> 2)) 1218 goto tcp_drop; 1219 1220 /* If flags changed, or reserved data set, then adjust */ 1221 if (flags != th->th_flags || th->th_x2 != 0) { 1222 u_int16_t ov, nv; 1223 1224 ov = *(u_int16_t *)(&th->th_ack + 1); 1225 th->th_flags = flags; 1226 th->th_x2 = 0; 1227 nv = *(u_int16_t *)(&th->th_ack + 1); 1228 1229 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, ov, nv, 0); 1230 rewrite = 1; 1231 } 1232 1233 /* Remove urgent pointer, if TH_URG is not set */ 1234 if (!(flags & TH_URG) && th->th_urp) { 1235 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, th->th_urp, 1236 0, 0); 1237 th->th_urp = 0; 1238 rewrite = 1; 1239 } 1240 1241 /* Process options */ 1242 if (r->max_mss && pf_normalize_tcpopt(r, m, th, off, pd->af)) 1243 rewrite = 1; 1244 1245 /* copy back packet headers if we sanitized */ 1246 if (rewrite) 1247 m_copyback(m, off, sizeof(*th), (caddr_t)th); 1248 1249 return (PF_PASS); 1250 1251 tcp_drop: 1252 REASON_SET(&reason, PFRES_NORM); 1253 if (rm != NULL && r->log) 1254 PFLOG_PACKET(kif, m, AF_INET, dir, reason, r, NULL, NULL, pd, 1255 1); 1256 return (PF_DROP); 1257} 1258 1259int 1260pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd, 1261 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst) 1262{ 1263 u_int32_t tsval, tsecr; 1264 u_int8_t hdr[60]; 1265 u_int8_t *opt; 1266 1267 KASSERT((src->scrub == NULL), 1268 ("pf_normalize_tcp_init: src->scrub != NULL")); 1269 1270 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT); 1271 if (src->scrub == NULL) 1272 return (1); 1273 1274 switch (pd->af) { 1275#ifdef INET 1276 case AF_INET: { 1277 struct ip *h = mtod(m, struct ip *); 1278 src->scrub->pfss_ttl = h->ip_ttl; 1279 break; 1280 } 1281#endif /* INET */ 1282#ifdef INET6 1283 case AF_INET6: { 1284 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1285 src->scrub->pfss_ttl = h->ip6_hlim; 1286 break; 1287 } 1288#endif /* INET6 */ 1289 } 1290 1291 1292 /* 1293 * All normalizations below are only begun if we see the start of 1294 * the connections. They must all set an enabled bit in pfss_flags 1295 */ 1296 if ((th->th_flags & TH_SYN) == 0) 1297 return (0); 1298 1299 1300 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub && 1301 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1302 /* Diddle with TCP options */ 1303 int hlen; 1304 opt = hdr + sizeof(struct tcphdr); 1305 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1306 while (hlen >= TCPOLEN_TIMESTAMP) { 1307 switch (*opt) { 1308 case TCPOPT_EOL: /* FALLTHROUGH */ 1309 case TCPOPT_NOP: 1310 opt++; 1311 hlen--; 1312 break; 1313 case TCPOPT_TIMESTAMP: 1314 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1315 src->scrub->pfss_flags |= 1316 PFSS_TIMESTAMP; 1317 src->scrub->pfss_ts_mod = 1318 htonl(arc4random()); 1319 1320 /* note PFSS_PAWS not set yet */ 1321 memcpy(&tsval, &opt[2], 1322 sizeof(u_int32_t)); 1323 memcpy(&tsecr, &opt[6], 1324 sizeof(u_int32_t)); 1325 src->scrub->pfss_tsval0 = ntohl(tsval); 1326 src->scrub->pfss_tsval = ntohl(tsval); 1327 src->scrub->pfss_tsecr = ntohl(tsecr); 1328 getmicrouptime(&src->scrub->pfss_last); 1329 } 1330 /* FALLTHROUGH */ 1331 default: 1332 hlen -= MAX(opt[1], 2); 1333 opt += MAX(opt[1], 2); 1334 break; 1335 } 1336 } 1337 } 1338 1339 return (0); 1340} 1341 1342void 1343pf_normalize_tcp_cleanup(struct pf_state *state) 1344{ 1345 if (state->src.scrub) 1346 uma_zfree(V_pf_state_scrub_z, state->src.scrub); 1347 if (state->dst.scrub) 1348 uma_zfree(V_pf_state_scrub_z, state->dst.scrub); 1349 1350 /* Someday... flush the TCP segment reassembly descriptors. */ 1351} 1352 1353int 1354pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd, 1355 u_short *reason, struct tcphdr *th, struct pf_state *state, 1356 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback) 1357{ 1358 struct timeval uptime; 1359 u_int32_t tsval, tsecr; 1360 u_int tsval_from_last; 1361 u_int8_t hdr[60]; 1362 u_int8_t *opt; 1363 int copyback = 0; 1364 int got_ts = 0; 1365 1366 KASSERT((src->scrub || dst->scrub), 1367 ("%s: src->scrub && dst->scrub!", __func__)); 1368 1369 /* 1370 * Enforce the minimum TTL seen for this connection. Negate a common 1371 * technique to evade an intrusion detection system and confuse 1372 * firewall state code. 1373 */ 1374 switch (pd->af) { 1375#ifdef INET 1376 case AF_INET: { 1377 if (src->scrub) { 1378 struct ip *h = mtod(m, struct ip *); 1379 if (h->ip_ttl > src->scrub->pfss_ttl) 1380 src->scrub->pfss_ttl = h->ip_ttl; 1381 h->ip_ttl = src->scrub->pfss_ttl; 1382 } 1383 break; 1384 } 1385#endif /* INET */ 1386#ifdef INET6 1387 case AF_INET6: { 1388 if (src->scrub) { 1389 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1390 if (h->ip6_hlim > src->scrub->pfss_ttl) 1391 src->scrub->pfss_ttl = h->ip6_hlim; 1392 h->ip6_hlim = src->scrub->pfss_ttl; 1393 } 1394 break; 1395 } 1396#endif /* INET6 */ 1397 } 1398 1399 if (th->th_off > (sizeof(struct tcphdr) >> 2) && 1400 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) || 1401 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) && 1402 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { 1403 /* Diddle with TCP options */ 1404 int hlen; 1405 opt = hdr + sizeof(struct tcphdr); 1406 hlen = (th->th_off << 2) - sizeof(struct tcphdr); 1407 while (hlen >= TCPOLEN_TIMESTAMP) { 1408 switch (*opt) { 1409 case TCPOPT_EOL: /* FALLTHROUGH */ 1410 case TCPOPT_NOP: 1411 opt++; 1412 hlen--; 1413 break; 1414 case TCPOPT_TIMESTAMP: 1415 /* Modulate the timestamps. Can be used for 1416 * NAT detection, OS uptime determination or 1417 * reboot detection. 1418 */ 1419 1420 if (got_ts) { 1421 /* Huh? Multiple timestamps!? */ 1422 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1423 DPFPRINTF(("multiple TS??")); 1424 pf_print_state(state); 1425 printf("\n"); 1426 } 1427 REASON_SET(reason, PFRES_TS); 1428 return (PF_DROP); 1429 } 1430 if (opt[1] >= TCPOLEN_TIMESTAMP) { 1431 memcpy(&tsval, &opt[2], 1432 sizeof(u_int32_t)); 1433 if (tsval && src->scrub && 1434 (src->scrub->pfss_flags & 1435 PFSS_TIMESTAMP)) { 1436 tsval = ntohl(tsval); 1437 pf_change_proto_a(m, &opt[2], 1438 &th->th_sum, 1439 htonl(tsval + 1440 src->scrub->pfss_ts_mod), 1441 0); 1442 copyback = 1; 1443 } 1444 1445 /* Modulate TS reply iff valid (!0) */ 1446 memcpy(&tsecr, &opt[6], 1447 sizeof(u_int32_t)); 1448 if (tsecr && dst->scrub && 1449 (dst->scrub->pfss_flags & 1450 PFSS_TIMESTAMP)) { 1451 tsecr = ntohl(tsecr) 1452 - dst->scrub->pfss_ts_mod; 1453 pf_change_proto_a(m, &opt[6], 1454 &th->th_sum, htonl(tsecr), 1455 0); 1456 copyback = 1; 1457 } 1458 got_ts = 1; 1459 } 1460 /* FALLTHROUGH */ 1461 default: 1462 hlen -= MAX(opt[1], 2); 1463 opt += MAX(opt[1], 2); 1464 break; 1465 } 1466 } 1467 if (copyback) { 1468 /* Copyback the options, caller copys back header */ 1469 *writeback = 1; 1470 m_copyback(m, off + sizeof(struct tcphdr), 1471 (th->th_off << 2) - sizeof(struct tcphdr), hdr + 1472 sizeof(struct tcphdr)); 1473 } 1474 } 1475 1476 1477 /* 1478 * Must invalidate PAWS checks on connections idle for too long. 1479 * The fastest allowed timestamp clock is 1ms. That turns out to 1480 * be about 24 days before it wraps. XXX Right now our lowerbound 1481 * TS echo check only works for the first 12 days of a connection 1482 * when the TS has exhausted half its 32bit space 1483 */ 1484#define TS_MAX_IDLE (24*24*60*60) 1485#define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */ 1486 1487 getmicrouptime(&uptime); 1488 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) && 1489 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE || 1490 time_uptime - state->creation > TS_MAX_CONN)) { 1491 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1492 DPFPRINTF(("src idled out of PAWS\n")); 1493 pf_print_state(state); 1494 printf("\n"); 1495 } 1496 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS) 1497 | PFSS_PAWS_IDLED; 1498 } 1499 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) && 1500 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) { 1501 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1502 DPFPRINTF(("dst idled out of PAWS\n")); 1503 pf_print_state(state); 1504 printf("\n"); 1505 } 1506 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS) 1507 | PFSS_PAWS_IDLED; 1508 } 1509 1510 if (got_ts && src->scrub && dst->scrub && 1511 (src->scrub->pfss_flags & PFSS_PAWS) && 1512 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1513 /* Validate that the timestamps are "in-window". 1514 * RFC1323 describes TCP Timestamp options that allow 1515 * measurement of RTT (round trip time) and PAWS 1516 * (protection against wrapped sequence numbers). PAWS 1517 * gives us a set of rules for rejecting packets on 1518 * long fat pipes (packets that were somehow delayed 1519 * in transit longer than the time it took to send the 1520 * full TCP sequence space of 4Gb). We can use these 1521 * rules and infer a few others that will let us treat 1522 * the 32bit timestamp and the 32bit echoed timestamp 1523 * as sequence numbers to prevent a blind attacker from 1524 * inserting packets into a connection. 1525 * 1526 * RFC1323 tells us: 1527 * - The timestamp on this packet must be greater than 1528 * or equal to the last value echoed by the other 1529 * endpoint. The RFC says those will be discarded 1530 * since it is a dup that has already been acked. 1531 * This gives us a lowerbound on the timestamp. 1532 * timestamp >= other last echoed timestamp 1533 * - The timestamp will be less than or equal to 1534 * the last timestamp plus the time between the 1535 * last packet and now. The RFC defines the max 1536 * clock rate as 1ms. We will allow clocks to be 1537 * up to 10% fast and will allow a total difference 1538 * or 30 seconds due to a route change. And this 1539 * gives us an upperbound on the timestamp. 1540 * timestamp <= last timestamp + max ticks 1541 * We have to be careful here. Windows will send an 1542 * initial timestamp of zero and then initialize it 1543 * to a random value after the 3whs; presumably to 1544 * avoid a DoS by having to call an expensive RNG 1545 * during a SYN flood. Proof MS has at least one 1546 * good security geek. 1547 * 1548 * - The TCP timestamp option must also echo the other 1549 * endpoints timestamp. The timestamp echoed is the 1550 * one carried on the earliest unacknowledged segment 1551 * on the left edge of the sequence window. The RFC 1552 * states that the host will reject any echoed 1553 * timestamps that were larger than any ever sent. 1554 * This gives us an upperbound on the TS echo. 1555 * tescr <= largest_tsval 1556 * - The lowerbound on the TS echo is a little more 1557 * tricky to determine. The other endpoint's echoed 1558 * values will not decrease. But there may be 1559 * network conditions that re-order packets and 1560 * cause our view of them to decrease. For now the 1561 * only lowerbound we can safely determine is that 1562 * the TS echo will never be less than the original 1563 * TS. XXX There is probably a better lowerbound. 1564 * Remove TS_MAX_CONN with better lowerbound check. 1565 * tescr >= other original TS 1566 * 1567 * It is also important to note that the fastest 1568 * timestamp clock of 1ms will wrap its 32bit space in 1569 * 24 days. So we just disable TS checking after 24 1570 * days of idle time. We actually must use a 12d 1571 * connection limit until we can come up with a better 1572 * lowerbound to the TS echo check. 1573 */ 1574 struct timeval delta_ts; 1575 int ts_fudge; 1576 1577 1578 /* 1579 * PFTM_TS_DIFF is how many seconds of leeway to allow 1580 * a host's timestamp. This can happen if the previous 1581 * packet got delayed in transit for much longer than 1582 * this packet. 1583 */ 1584 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0) 1585 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF]; 1586 1587 /* Calculate max ticks since the last timestamp */ 1588#define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */ 1589#define TS_MICROSECS 1000000 /* microseconds per second */ 1590 delta_ts = uptime; 1591 timevalsub(&delta_ts, &src->scrub->pfss_last); 1592 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ; 1593 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ); 1594 1595 if ((src->state >= TCPS_ESTABLISHED && 1596 dst->state >= TCPS_ESTABLISHED) && 1597 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) || 1598 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) || 1599 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) || 1600 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) { 1601 /* Bad RFC1323 implementation or an insertion attack. 1602 * 1603 * - Solaris 2.6 and 2.7 are known to send another ACK 1604 * after the FIN,FIN|ACK,ACK closing that carries 1605 * an old timestamp. 1606 */ 1607 1608 DPFPRINTF(("Timestamp failed %c%c%c%c\n", 1609 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ', 1610 SEQ_GT(tsval, src->scrub->pfss_tsval + 1611 tsval_from_last) ? '1' : ' ', 1612 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ', 1613 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' ')); 1614 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u " 1615 "idle: %jus %lums\n", 1616 tsval, tsecr, tsval_from_last, 1617 (uintmax_t)delta_ts.tv_sec, 1618 delta_ts.tv_usec / 1000)); 1619 DPFPRINTF((" src->tsval: %u tsecr: %u\n", 1620 src->scrub->pfss_tsval, src->scrub->pfss_tsecr)); 1621 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u" 1622 "\n", dst->scrub->pfss_tsval, 1623 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0)); 1624 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1625 pf_print_state(state); 1626 pf_print_flags(th->th_flags); 1627 printf("\n"); 1628 } 1629 REASON_SET(reason, PFRES_TS); 1630 return (PF_DROP); 1631 } 1632 1633 /* XXX I'd really like to require tsecr but it's optional */ 1634 1635 } else if (!got_ts && (th->th_flags & TH_RST) == 0 && 1636 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED) 1637 || pd->p_len > 0 || (th->th_flags & TH_SYN)) && 1638 src->scrub && dst->scrub && 1639 (src->scrub->pfss_flags & PFSS_PAWS) && 1640 (dst->scrub->pfss_flags & PFSS_PAWS)) { 1641 /* Didn't send a timestamp. Timestamps aren't really useful 1642 * when: 1643 * - connection opening or closing (often not even sent). 1644 * but we must not let an attacker to put a FIN on a 1645 * data packet to sneak it through our ESTABLISHED check. 1646 * - on a TCP reset. RFC suggests not even looking at TS. 1647 * - on an empty ACK. The TS will not be echoed so it will 1648 * probably not help keep the RTT calculation in sync and 1649 * there isn't as much danger when the sequence numbers 1650 * got wrapped. So some stacks don't include TS on empty 1651 * ACKs :-( 1652 * 1653 * To minimize the disruption to mostly RFC1323 conformant 1654 * stacks, we will only require timestamps on data packets. 1655 * 1656 * And what do ya know, we cannot require timestamps on data 1657 * packets. There appear to be devices that do legitimate 1658 * TCP connection hijacking. There are HTTP devices that allow 1659 * a 3whs (with timestamps) and then buffer the HTTP request. 1660 * If the intermediate device has the HTTP response cache, it 1661 * will spoof the response but not bother timestamping its 1662 * packets. So we can look for the presence of a timestamp in 1663 * the first data packet and if there, require it in all future 1664 * packets. 1665 */ 1666 1667 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) { 1668 /* 1669 * Hey! Someone tried to sneak a packet in. Or the 1670 * stack changed its RFC1323 behavior?!?! 1671 */ 1672 if (V_pf_status.debug >= PF_DEBUG_MISC) { 1673 DPFPRINTF(("Did not receive expected RFC1323 " 1674 "timestamp\n")); 1675 pf_print_state(state); 1676 pf_print_flags(th->th_flags); 1677 printf("\n"); 1678 } 1679 REASON_SET(reason, PFRES_TS); 1680 return (PF_DROP); 1681 } 1682 } 1683 1684 1685 /* 1686 * We will note if a host sends his data packets with or without 1687 * timestamps. And require all data packets to contain a timestamp 1688 * if the first does. PAWS implicitly requires that all data packets be 1689 * timestamped. But I think there are middle-man devices that hijack 1690 * TCP streams immediately after the 3whs and don't timestamp their 1691 * packets (seen in a WWW accelerator or cache). 1692 */ 1693 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags & 1694 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) { 1695 if (got_ts) 1696 src->scrub->pfss_flags |= PFSS_DATA_TS; 1697 else { 1698 src->scrub->pfss_flags |= PFSS_DATA_NOTS; 1699 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub && 1700 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) { 1701 /* Don't warn if other host rejected RFC1323 */ 1702 DPFPRINTF(("Broken RFC1323 stack did not " 1703 "timestamp data packet. Disabled PAWS " 1704 "security.\n")); 1705 pf_print_state(state); 1706 pf_print_flags(th->th_flags); 1707 printf("\n"); 1708 } 1709 } 1710 } 1711 1712 1713 /* 1714 * Update PAWS values 1715 */ 1716 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags & 1717 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) { 1718 getmicrouptime(&src->scrub->pfss_last); 1719 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) || 1720 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1721 src->scrub->pfss_tsval = tsval; 1722 1723 if (tsecr) { 1724 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) || 1725 (src->scrub->pfss_flags & PFSS_PAWS) == 0) 1726 src->scrub->pfss_tsecr = tsecr; 1727 1728 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 && 1729 (SEQ_LT(tsval, src->scrub->pfss_tsval0) || 1730 src->scrub->pfss_tsval0 == 0)) { 1731 /* tsval0 MUST be the lowest timestamp */ 1732 src->scrub->pfss_tsval0 = tsval; 1733 } 1734 1735 /* Only fully initialized after a TS gets echoed */ 1736 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0) 1737 src->scrub->pfss_flags |= PFSS_PAWS; 1738 } 1739 } 1740 1741 /* I have a dream.... TCP segment reassembly.... */ 1742 return (0); 1743} 1744 1745static int 1746pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th, 1747 int off, sa_family_t af) 1748{ 1749 u_int16_t *mss; 1750 int thoff; 1751 int opt, cnt, optlen = 0; 1752 int rewrite = 0; 1753 u_char opts[TCP_MAXOLEN]; 1754 u_char *optp = opts; 1755 1756 thoff = th->th_off << 2; 1757 cnt = thoff - sizeof(struct tcphdr); 1758 1759 if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt, 1760 NULL, NULL, af)) 1761 return (rewrite); 1762 1763 for (; cnt > 0; cnt -= optlen, optp += optlen) { 1764 opt = optp[0]; 1765 if (opt == TCPOPT_EOL) 1766 break; 1767 if (opt == TCPOPT_NOP) 1768 optlen = 1; 1769 else { 1770 if (cnt < 2) 1771 break; 1772 optlen = optp[1]; 1773 if (optlen < 2 || optlen > cnt) 1774 break; 1775 } 1776 switch (opt) { 1777 case TCPOPT_MAXSEG: 1778 mss = (u_int16_t *)(optp + 2); 1779 if ((ntohs(*mss)) > r->max_mss) { 1780 th->th_sum = pf_proto_cksum_fixup(m, 1781 th->th_sum, *mss, htons(r->max_mss), 0); 1782 *mss = htons(r->max_mss); 1783 rewrite = 1; 1784 } 1785 break; 1786 default: 1787 break; 1788 } 1789 } 1790 1791 if (rewrite) 1792 m_copyback(m, off + sizeof(*th), thoff - sizeof(*th), opts); 1793 1794 return (rewrite); 1795} 1796 1797#ifdef INET 1798static void 1799pf_scrub_ip(struct mbuf **m0, u_int32_t flags, u_int8_t min_ttl, u_int8_t tos) 1800{ 1801 struct mbuf *m = *m0; 1802 struct ip *h = mtod(m, struct ip *); 1803 1804 /* Clear IP_DF if no-df was requested */ 1805 if (flags & PFRULE_NODF && h->ip_off & htons(IP_DF)) { 1806 u_int16_t ip_off = h->ip_off; 1807 1808 h->ip_off &= htons(~IP_DF); 1809 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0); 1810 } 1811 1812 /* Enforce a minimum ttl, may cause endless packet loops */ 1813 if (min_ttl && h->ip_ttl < min_ttl) { 1814 u_int16_t ip_ttl = h->ip_ttl; 1815 1816 h->ip_ttl = min_ttl; 1817 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0); 1818 } 1819 1820 /* Enforce tos */ 1821 if (flags & PFRULE_SET_TOS) { 1822 u_int16_t ov, nv; 1823 1824 ov = *(u_int16_t *)h; 1825 h->ip_tos = tos; 1826 nv = *(u_int16_t *)h; 1827 1828 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0); 1829 } 1830 1831 /* random-id, but not for fragments */ 1832 if (flags & PFRULE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) { 1833 uint16_t ip_id = h->ip_id; 1834 1835 ip_fillid(h); 1836 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0); 1837 } 1838} 1839#endif /* INET */ 1840 1841#ifdef INET6 1842static void 1843pf_scrub_ip6(struct mbuf **m0, u_int8_t min_ttl) 1844{ 1845 struct mbuf *m = *m0; 1846 struct ip6_hdr *h = mtod(m, struct ip6_hdr *); 1847 1848 /* Enforce a minimum ttl, may cause endless packet loops */ 1849 if (min_ttl && h->ip6_hlim < min_ttl) 1850 h->ip6_hlim = min_ttl; 1851} 1852#endif 1853