Lines Matching defs:frag

308 	struct pf_fragment *frag;
312 while ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) != NULL) {
313 VERIFY(BUFFER_FRAGMENTS(frag));
314 if (frag->fr_timeout > expire)
317 switch (frag->fr_af) {
320 ntohs(frag->fr_id), frag));
324 ntohl(frag->fr_id6), frag));
330 pf_free_fragment(frag);
333 while ((frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue)) != NULL) {
334 VERIFY(!BUFFER_FRAGMENTS(frag));
335 if (frag->fr_timeout > expire)
338 switch (frag->fr_af) {
341 ntohs(frag->fr_id), frag));
345 ntohl(frag->fr_id6), frag));
351 pf_free_fragment(frag);
353 TAILQ_LAST(&pf_cachequeue, pf_cachequeue) != frag);
364 struct pf_fragment *frag;
371 frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue);
372 if (frag == NULL)
374 pf_free_fragment(frag);
382 frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue);
383 if (frag == NULL)
385 pf_free_fragment(frag);
392 pf_free_fragment(struct pf_fragment *frag)
398 if (BUFFER_FRAGMENTS(frag)) {
399 for (frent = LIST_FIRST(&frag->fr_queue); frent;
400 frent = LIST_FIRST(&frag->fr_queue)) {
408 for (frcache = LIST_FIRST(&frag->fr_cache); frcache;
409 frcache = LIST_FIRST(&frag->fr_cache)) {
412 VERIFY(LIST_EMPTY(&frag->fr_cache) ||
413 LIST_FIRST(&frag->fr_cache)->fr_off >
421 pf_remove_fragment(frag);
448 struct pf_fragment *frag;
450 frag = RB_FIND(pf_frag_tree, tree, key);
451 if (frag != NULL) {
453 frag->fr_timeout = pf_time_second();
454 if (BUFFER_FRAGMENTS(frag)) {
455 TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
456 TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
458 TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
459 TAILQ_INSERT_HEAD(&pf_cachequeue, frag, frag_next);
463 return (frag);
486 pf_remove_fragment(struct pf_fragment *frag)
488 if (BUFFER_FRAGMENTS(frag)) {
489 RB_REMOVE(pf_frag_tree, &pf_frag_tree, frag);
490 TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
491 pool_put(&pf_frag_pl, frag);
493 RB_REMOVE(pf_frag_tree, &pf_cache_tree, frag);
494 TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
495 pool_put(&pf_cache_pl, frag);
501 pf_reassemble(struct mbuf **m0, struct pf_fragment **frag,
513 VERIFY(*frag == NULL || BUFFER_FRAGMENTS(*frag));
520 if (*frag == NULL) {
521 *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
522 if (*frag == NULL) {
524 *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
525 if (*frag == NULL)
529 (*frag)->fr_flags = 0;
530 (*frag)->fr_max = 0;
531 (*frag)->fr_af = AF_INET;
532 (*frag)->fr_srcx.v4 = frent->fr_ip->ip_src;
533 (*frag)->fr_dstx.v4 = frent->fr_ip->ip_dst;
534 (*frag)->fr_p = frent->fr_ip->ip_p;
535 (*frag)->fr_id = frent->fr_ip->ip_id;
536 (*frag)->fr_timeout = pf_time_second();
537 LIST_INIT(&(*frag)->fr_queue);
539 RB_INSERT(pf_frag_tree, &pf_frag_tree, *frag);
540 TAILQ_INSERT_HEAD(&pf_fragqueue, *frag, frag_next);
551 LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) {
603 if ((*frag)->fr_max < fr_max)
604 (*frag)->fr_max = fr_max;
607 (*frag)->fr_flags |= PFFRAG_SEENLAST;
610 LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next);
615 if (!((*frag)->fr_flags & PFFRAG_SEENLAST))
620 for (frep = LIST_FIRST(&(*frag)->fr_queue); frep; frep = next) {
624 if (off < (*frag)->fr_max &&
628 (*frag)->fr_max));
632 DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max));
633 if (off < (*frag)->fr_max)
637 frent = LIST_FIRST(&(*frag)->fr_queue);
641 pf_free_fragment(*frag);
642 *frag = NULL;
664 ip->ip_src = (*frag)->fr_srcx.v4;
665 ip->ip_dst = (*frag)->fr_dstx.v4;
668 pf_remove_fragment(*frag);
669 *frag = NULL;
697 pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff,
707 VERIFY(*frag == NULL || !BUFFER_FRAGMENTS(*frag));
710 if (*frag == NULL) {
711 *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
712 if (*frag == NULL) {
714 *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
715 if (*frag == NULL)
722 pool_put(&pf_cache_pl, *frag);
723 *frag = NULL;
728 (*frag)->fr_flags = PFFRAG_NOBUFFER;
729 (*frag)->fr_max = 0;
730 (*frag)->fr_af = AF_INET;
731 (*frag)->fr_srcx.v4 = h->ip_src;
732 (*frag)->fr_dstx.v4 = h->ip_dst;
733 (*frag)->fr_p = h->ip_p;
734 (*frag)->fr_id = h->ip_id;
735 (*frag)->fr_timeout = pf_time_second();
739 LIST_INIT(&(*frag)->fr_cache);
740 LIST_INSERT_HEAD(&(*frag)->fr_cache, cur, fr_next);
742 RB_INSERT(pf_frag_tree, &pf_cache_tree, *frag);
743 TAILQ_INSERT_HEAD(&pf_cachequeue, *frag, frag_next);
756 LIST_FOREACH(fra, &(*frag)->fr_cache, fr_next) {
794 /* Update the previous frag to encompass this one */
940 * free the overall descriptor. Thus we drop the frag late.
948 if ((*frag)->fr_max < fr_max)
949 (*frag)->fr_max = fr_max;
953 (*frag)->fr_flags |= PFFRAG_SEENLAST;
956 if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
957 LIST_FIRST(&(*frag)->fr_cache)->fr_off == 0 &&
958 LIST_FIRST(&(*frag)->fr_cache)->fr_end == (*frag)->fr_max) {
961 (*frag)->fr_max));
962 pf_free_fragment(*frag);
963 *frag = NULL;
972 if (!mff && *frag != NULL)
973 (*frag)->fr_flags |= PFFRAG_SEENLAST;
981 if (!mff && *frag != NULL)
982 (*frag)->fr_flags |= PFFRAG_SEENLAST;
986 if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
989 (*frag)->fr_flags |= PFFRAG_DROP;
1000 pf_reassemble6(struct mbuf **m0, struct pf_fragment **frag,
1008 VERIFY(*frag == NULL || BUFFER_FRAGMENTS(*frag));
1016 DPFPRINTF(("%p IPv6 frag plen %u off %u fr_ip6f_hlen %u fr_max %u m_len %u\n", m,
1024 if (*frag == NULL) {
1025 *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
1026 if (*frag == NULL) {
1028 *frag = pool_get(&pf_frag_pl, PR_NOWAIT);
1029 if (*frag == NULL)
1033 (*frag)->fr_flags = 0;
1034 (*frag)->fr_max = 0;
1035 (*frag)->fr_af = AF_INET6;
1036 (*frag)->fr_srcx.v6 = frent->fr_ip6->ip6_src;
1037 (*frag)->fr_dstx.v6 = frent->fr_ip6->ip6_dst;
1038 (*frag)->fr_p = frent->fr_ip6f_opt.ip6f_nxt;
1039 (*frag)->fr_id6 = frent->fr_ip6f_opt.ip6f_ident;
1040 (*frag)->fr_timeout = pf_time_second();
1041 LIST_INIT(&(*frag)->fr_queue);
1043 RB_INSERT(pf_frag_tree, &pf_frag_tree, *frag);
1044 TAILQ_INSERT_HEAD(&pf_fragqueue, *frag, frag_next);
1055 LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) {
1108 if ((*frag)->fr_max < fr_max)
1109 (*frag)->fr_max = fr_max;
1112 (*frag)->fr_flags |= PFFRAG_SEENLAST;
1115 LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next);
1120 if (!((*frag)->fr_flags & PFFRAG_SEENLAST))
1125 for (frep = LIST_FIRST(&(*frag)->fr_queue); frep; frep = next) {
1130 (*frag)->fr_max));
1131 if (off < (*frag)->fr_max &&
1135 (*frag)->fr_max));
1139 DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max));
1140 if (off < (*frag)->fr_max)
1144 frent = LIST_FIRST(&(*frag)->fr_queue);
1148 pf_free_fragment(*frag);
1149 *frag = NULL;
1154 ip6->ip6_nxt = (*frag)->fr_p;
1156 ip6->ip6_src = (*frag)->fr_srcx.v6;
1157 ip6->ip6_dst = (*frag)->fr_dstx.v6;
1160 pf_remove_fragment(*frag);
1161 *frag = NULL;
1203 struct pf_fragment **frag, int hlen, int mff, int drop, int *nomem)
1210 VERIFY(*frag == NULL || !BUFFER_FRAGMENTS(*frag));
1238 if (*frag == NULL) {
1239 *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
1240 if (*frag == NULL) {
1242 *frag = pool_get(&pf_cache_pl, PR_NOWAIT);
1243 if (*frag == NULL)
1250 pool_put(&pf_cache_pl, *frag);
1251 *frag = NULL;
1256 (*frag)->fr_flags = PFFRAG_NOBUFFER;
1257 (*frag)->fr_max = 0;
1258 (*frag)->fr_af = AF_INET6;
1259 (*frag)->fr_srcx.v6 = h->ip6_src;
1260 (*frag)->fr_dstx.v6 = h->ip6_dst;
1261 (*frag)->fr_p = fh->ip6f_nxt;
1262 (*frag)->fr_id6 = fh->ip6f_ident;
1263 (*frag)->fr_timeout = pf_time_second();
1267 LIST_INIT(&(*frag)->fr_cache);
1268 LIST_INSERT_HEAD(&(*frag)->fr_cache, cur, fr_next);
1270 RB_INSERT(pf_frag_tree, &pf_cache_tree, *frag);
1271 TAILQ_INSERT_HEAD(&pf_cachequeue, *frag, frag_next);
1284 LIST_FOREACH(fra, &(*frag)->fr_cache, fr_next) {
1323 /* Update the previous frag to encompass this one */
1470 * free the overall descriptor. Thus we drop the frag late.
1477 if ((*frag)->fr_max < fr_max)
1478 (*frag)->fr_max = fr_max;
1482 (*frag)->fr_flags |= PFFRAG_SEENLAST;
1485 if (((*frag)->fr_flags & PFFRAG_SEENLAST) &&
1486 LIST_FIRST(&(*frag)->fr_cache)->fr_off == 0 &&
1487 LIST_FIRST(&(*frag)->fr_cache)->fr_end == (*frag)->fr_max) {
1490 ntohl(fh->ip6f_ident), (*frag)->fr_max));
1491 pf_free_fragment(*frag);
1492 *frag = NULL;
1501 if (!mff && *frag != NULL)
1502 (*frag)->fr_flags |= PFFRAG_SEENLAST;
1510 if (!mff && *frag != NULL)
1511 (*frag)->fr_flags |= PFFRAG_SEENLAST;
1515 if (((*frag)->fr_flags & PFFRAG_DROP) == 0)
1518 (*frag)->fr_flags |= PFFRAG_DROP;
1532 struct pf_fragment *frag = NULL;
1629 frag = pf_find_fragment_by_ipv4_header(h, &pf_frag_tree);
1631 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
1632 fr_max > frag->fr_max)
1646 DPFPRINTF(("reass IPv4 frag %d @ %d-%d\n", ntohs(h->ip_id),
1648 *m0 = m = pf_reassemble(m0, &frag, frent, mff);
1665 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
1682 frag = pf_find_fragment_by_ipv4_header(h, &pf_cache_tree);
1685 if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
1686 fr_max > frag->fr_max) {
1688 frag->fr_flags |= PFFRAG_DROP;
1692 *m0 = m = pf_fragcache(m0, h, &frag, mff,
1715 if (frag != NULL && (frag->fr_flags & PFFRAG_DROP))
1779 if (frag != NULL)
1780 pf_free_fragment(frag);
1806 struct ip6_frag frag;
1973 if (!pf_pull_hdr(m, off, &frag, sizeof (frag), NULL, NULL, AF_INET6))
1975 fragoff = ntohs(frag.ip6f_offlg & IP6F_OFF_MASK);
1976 pd->proto = frag.ip6f_nxt;
1977 mff = ntohs(frag.ip6f_offlg & IP6F_MORE_FRAG);
1978 off += sizeof frag;
1983 DPFPRINTF(("%p IPv6 frag plen %u mff %d off %u fragoff %u fr_max %u\n", m,
1990 pff = pf_find_fragment_by_ipv6_header(h, &frag,
2007 frent->fr_ip6f_opt = frag;
2011 DPFPRINTF(("reass IPv6 frag %d @ %d-%d\n",
2012 ntohl(frag.ip6f_ident), fragoff, fr_max));
2027 pff = pf_find_fragment_by_ipv6_header(h, &frag,
2038 *m0 = m = pf_frag6cache(m0, h, &frag, &pff, off, mff,