527void mb_free_ext(struct mbuf *); 528int m_pkthdr_init(struct mbuf *, int); 529 530static __inline int 531m_gettype(int size) 532{ 533 int type; 534 535 switch (size) { 536 case MSIZE: 537 type = EXT_MBUF; 538 break; 539 case MCLBYTES: 540 type = EXT_CLUSTER; 541 break; 542#if MJUMPAGESIZE != MCLBYTES 543 case MJUMPAGESIZE: 544 type = EXT_JUMBOP; 545 break; 546#endif 547 case MJUM9BYTES: 548 type = EXT_JUMBO9; 549 break; 550 case MJUM16BYTES: 551 type = EXT_JUMBO16; 552 break; 553 default: 554 panic("%s: invalid cluster size %d", __func__, size); 555 } 556 557 return (type); 558} 559 560/* 561 * Associated an external reference counted buffer with an mbuf. 562 */ 563static __inline void 564m_extaddref(struct mbuf *m, caddr_t buf, u_int size, u_int *ref_cnt, 565 void (*freef)(struct mbuf *, void *, void *), void *arg1, void *arg2) 566{ 567 568 KASSERT(ref_cnt != NULL, ("%s: ref_cnt not provided", __func__)); 569 570 atomic_add_int(ref_cnt, 1); 571 m->m_flags |= M_EXT; 572 m->m_ext.ext_buf = buf; 573 m->m_ext.ext_cnt = ref_cnt; 574 m->m_data = m->m_ext.ext_buf; 575 m->m_ext.ext_size = size; 576 m->m_ext.ext_free = freef; 577 m->m_ext.ext_arg1 = arg1; 578 m->m_ext.ext_arg2 = arg2; 579 m->m_ext.ext_type = EXT_EXTREF; 580 m->m_ext.ext_flags = 0; 581} 582 583static __inline uma_zone_t 584m_getzone(int size) 585{ 586 uma_zone_t zone; 587 588 switch (size) { 589 case MCLBYTES: 590 zone = zone_clust; 591 break; 592#if MJUMPAGESIZE != MCLBYTES 593 case MJUMPAGESIZE: 594 zone = zone_jumbop; 595 break; 596#endif 597 case MJUM9BYTES: 598 zone = zone_jumbo9; 599 break; 600 case MJUM16BYTES: 601 zone = zone_jumbo16; 602 break; 603 default: 604 panic("%s: invalid cluster size %d", __func__, size); 605 } 606 607 return (zone); 608} 609 610/* 611 * Initialize an mbuf with linear storage. 612 * 613 * Inline because the consumer text overhead will be roughly the same to 614 * initialize or call a function with this many parameters and M_PKTHDR 615 * should go away with constant propagation for !MGETHDR. 616 */ 617static __inline int 618m_init(struct mbuf *m, uma_zone_t zone __unused, int size __unused, int how, 619 short type, int flags) 620{ 621 int error; 622 623 m->m_next = NULL; 624 m->m_nextpkt = NULL; 625 m->m_data = m->m_dat; 626 m->m_len = 0; 627 m->m_flags = flags; 628 m->m_type = type; 629 if (flags & M_PKTHDR) { 630 if ((error = m_pkthdr_init(m, how)) != 0) 631 return (error); 632 } 633 634 return (0); 635} 636 637static __inline struct mbuf * 638m_get(int how, short type) 639{ 640 struct mb_args args; 641 642 args.flags = 0; 643 args.type = type; 644 return (uma_zalloc_arg(zone_mbuf, &args, how)); 645} 646 647/* 648 * XXX This should be deprecated, very little use. 649 */ 650static __inline struct mbuf * 651m_getclr(int how, short type) 652{ 653 struct mbuf *m; 654 struct mb_args args; 655 656 args.flags = 0; 657 args.type = type; 658 m = uma_zalloc_arg(zone_mbuf, &args, how); 659 if (m != NULL) 660 bzero(m->m_data, MLEN); 661 return (m); 662} 663 664static __inline struct mbuf * 665m_gethdr(int how, short type) 666{ 667 struct mb_args args; 668 669 args.flags = M_PKTHDR; 670 args.type = type; 671 return (uma_zalloc_arg(zone_mbuf, &args, how)); 672} 673 674static __inline struct mbuf * 675m_getcl(int how, short type, int flags) 676{ 677 struct mb_args args; 678 679 args.flags = flags; 680 args.type = type; 681 return (uma_zalloc_arg(zone_pack, &args, how)); 682} 683 684static __inline int 685m_clget(struct mbuf *m, int how) 686{ 687 688 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", 689 __func__, m)); 690 m->m_ext.ext_buf = (char *)NULL; 691 uma_zalloc_arg(zone_clust, m, how); 692 /* 693 * On a cluster allocation failure, drain the packet zone and retry, 694 * we might be able to loosen a few clusters up on the drain. 695 */ 696 if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) { 697 zone_drain(zone_pack); 698 uma_zalloc_arg(zone_clust, m, how); 699 } 700 return (m->m_flags & M_EXT); 701} 702 703/* 704 * m_cljget() is different from m_clget() as it can allocate clusters without 705 * attaching them to an mbuf. In that case the return value is the pointer 706 * to the cluster of the requested size. If an mbuf was specified, it gets 707 * the cluster attached to it and the return value can be safely ignored. 708 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. 709 */ 710static __inline void * 711m_cljget(struct mbuf *m, int how, int size) 712{ 713 uma_zone_t zone; 714 715 if (m != NULL) { 716 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", 717 __func__, m)); 718 m->m_ext.ext_buf = NULL; 719 } 720 721 zone = m_getzone(size); 722 return (uma_zalloc_arg(zone, m, how)); 723} 724 725static __inline void 726m_cljset(struct mbuf *m, void *cl, int type) 727{ 728 uma_zone_t zone; 729 int size; 730 731 switch (type) { 732 case EXT_CLUSTER: 733 size = MCLBYTES; 734 zone = zone_clust; 735 break; 736#if MJUMPAGESIZE != MCLBYTES 737 case EXT_JUMBOP: 738 size = MJUMPAGESIZE; 739 zone = zone_jumbop; 740 break; 741#endif 742 case EXT_JUMBO9: 743 size = MJUM9BYTES; 744 zone = zone_jumbo9; 745 break; 746 case EXT_JUMBO16: 747 size = MJUM16BYTES; 748 zone = zone_jumbo16; 749 break; 750 default: 751 panic("%s: unknown cluster type %d", __func__, type); 752 break; 753 } 754 755 m->m_data = m->m_ext.ext_buf = cl; 756 m->m_ext.ext_free = m->m_ext.ext_arg1 = m->m_ext.ext_arg2 = NULL; 757 m->m_ext.ext_size = size; 758 m->m_ext.ext_type = type; 759 m->m_ext.ext_flags = 0; 760 m->m_ext.ext_cnt = uma_find_refcnt(zone, cl); 761 m->m_flags |= M_EXT; 762 763} 764 765static __inline void 766m_chtype(struct mbuf *m, short new_type) 767{ 768 769 m->m_type = new_type; 770} 771 772static __inline void 773m_clrprotoflags(struct mbuf *m) 774{ 775 776 while (m) { 777 m->m_flags &= ~M_PROTOFLAGS; 778 m = m->m_next; 779 } 780} 781 782static __inline struct mbuf * 783m_last(struct mbuf *m) 784{ 785 786 while (m->m_next) 787 m = m->m_next; 788 return (m); 789} 790 791/* 792 * mbuf, cluster, and external object allocation macros (for compatibility 793 * purposes). 794 */ 795#define M_MOVE_PKTHDR(to, from) m_move_pkthdr((to), (from)) 796#define MGET(m, how, type) ((m) = m_get((how), (type))) 797#define MGETHDR(m, how, type) ((m) = m_gethdr((how), (type))) 798#define MCLGET(m, how) m_clget((m), (how)) 799#define MEXTADD(m, buf, size, free, arg1, arg2, flags, type) \ 800 (void )m_extadd((m), (caddr_t)(buf), (size), (free), (arg1), (arg2),\ 801 (flags), (type), M_NOWAIT) 802#define m_getm(m, len, how, type) \ 803 m_getm2((m), (len), (how), (type), M_PKTHDR) 804 805/* 806 * Evaluate TRUE if it's safe to write to the mbuf m's data region (this can 807 * be both the local data payload, or an external buffer area, depending on 808 * whether M_EXT is set). 809 */ 810#define M_WRITABLE(m) (!((m)->m_flags & M_RDONLY) && \ 811 (!(((m)->m_flags & M_EXT)) || \ 812 (*((m)->m_ext.ext_cnt) == 1)) ) \ 813 814/* Check if the supplied mbuf has a packet header, or else panic. */ 815#define M_ASSERTPKTHDR(m) \ 816 KASSERT((m) != NULL && (m)->m_flags & M_PKTHDR, \ 817 ("%s: no mbuf packet header!", __func__)) 818 819/* 820 * Ensure that the supplied mbuf is a valid, non-free mbuf. 821 * 822 * XXX: Broken at the moment. Need some UMA magic to make it work again. 823 */ 824#define M_ASSERTVALID(m) \ 825 KASSERT((((struct mbuf *)m)->m_flags & 0) == 0, \ 826 ("%s: attempted use of a free mbuf!", __func__)) 827 828/* 829 * Return the address of the start of the buffer associated with an mbuf, 830 * handling external storage, packet-header mbufs, and regular data mbufs. 831 */ 832#define M_START(m) \ 833 (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_buf : \ 834 ((m)->m_flags & M_PKTHDR) ? &(m)->m_pktdat[0] : \ 835 &(m)->m_dat[0]) 836 837/* 838 * Return the size of the buffer associated with an mbuf, handling external 839 * storage, packet-header mbufs, and regular data mbufs. 840 */ 841#define M_SIZE(m) \ 842 (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_size : \ 843 ((m)->m_flags & M_PKTHDR) ? MHLEN : \ 844 MLEN) 845 846/* 847 * Set the m_data pointer of a newly allocated mbuf to place an object of the 848 * specified size at the end of the mbuf, longword aligned. 849 * 850 * NB: Historically, we had M_ALIGN(), MH_ALIGN(), and MEXT_ALIGN() as 851 * separate macros, each asserting that it was called at the proper moment. 852 * This required callers to themselves test the storage type and call the 853 * right one. Rather than require callers to be aware of those layout 854 * decisions, we centralize here. 855 */ 856static __inline void 857m_align(struct mbuf *m, int len) 858{ 859#ifdef INVARIANTS 860 const char *msg = "%s: not a virgin mbuf"; 861#endif 862 int adjust; 863 864 KASSERT(m->m_data == M_START(m), (msg, __func__)); 865 866 adjust = M_SIZE(m) - len; 867 m->m_data += adjust &~ (sizeof(long)-1); 868} 869 870#define M_ALIGN(m, len) m_align(m, len) 871#define MH_ALIGN(m, len) m_align(m, len) 872#define MEXT_ALIGN(m, len) m_align(m, len) 873 874/* 875 * Compute the amount of space available before the current start of data in 876 * an mbuf. 877 * 878 * The M_WRITABLE() is a temporary, conservative safety measure: the burden 879 * of checking writability of the mbuf data area rests solely with the caller. 880 * 881 * NB: In previous versions, M_LEADINGSPACE() would only check M_WRITABLE() 882 * for mbufs with external storage. We now allow mbuf-embedded data to be 883 * read-only as well. 884 */ 885#define M_LEADINGSPACE(m) \ 886 (M_WRITABLE(m) ? ((m)->m_data - M_START(m)) : 0) 887 888/* 889 * Compute the amount of space available after the end of data in an mbuf. 890 * 891 * The M_WRITABLE() is a temporary, conservative safety measure: the burden 892 * of checking writability of the mbuf data area rests solely with the caller. 893 * 894 * NB: In previous versions, M_TRAILINGSPACE() would only check M_WRITABLE() 895 * for mbufs with external storage. We now allow mbuf-embedded data to be 896 * read-only as well. 897 */ 898#define M_TRAILINGSPACE(m) \ 899 (M_WRITABLE(m) ? \ 900 ((M_START(m) + M_SIZE(m)) - ((m)->m_data + (m)->m_len)) : 0) 901 902/* 903 * Arrange to prepend space of size plen to mbuf m. If a new mbuf must be 904 * allocated, how specifies whether to wait. If the allocation fails, the 905 * original mbuf chain is freed and m is set to NULL. 906 */ 907#define M_PREPEND(m, plen, how) do { \ 908 struct mbuf **_mmp = &(m); \ 909 struct mbuf *_mm = *_mmp; \ 910 int _mplen = (plen); \ 911 int __mhow = (how); \ 912 \ 913 MBUF_CHECKSLEEP(how); \ 914 if (M_LEADINGSPACE(_mm) >= _mplen) { \ 915 _mm->m_data -= _mplen; \ 916 _mm->m_len += _mplen; \ 917 } else \ 918 _mm = m_prepend(_mm, _mplen, __mhow); \ 919 if (_mm != NULL && _mm->m_flags & M_PKTHDR) \ 920 _mm->m_pkthdr.len += _mplen; \ 921 *_mmp = _mm; \ 922} while (0) 923 924/* 925 * Change mbuf to new type. This is a relatively expensive operation and 926 * should be avoided. 927 */ 928#define MCHTYPE(m, t) m_chtype((m), (t)) 929 930/* Length to m_copy to copy all. */ 931#define M_COPYALL 1000000000 932 933/* Compatibility with 4.3. */ 934#define m_copy(m, o, l) m_copym((m), (o), (l), M_NOWAIT) 935 936extern int max_datalen; /* MHLEN - max_hdr */ 937extern int max_hdr; /* Largest link + protocol header */ 938extern int max_linkhdr; /* Largest link-level header */ 939extern int max_protohdr; /* Largest protocol header */ 940extern int nmbclusters; /* Maximum number of clusters */ 941 942struct uio; 943 944void m_adj(struct mbuf *, int); 945int m_apply(struct mbuf *, int, int, 946 int (*)(void *, void *, u_int), void *); 947int m_append(struct mbuf *, int, c_caddr_t); 948void m_cat(struct mbuf *, struct mbuf *); 949void m_catpkt(struct mbuf *, struct mbuf *); 950int m_extadd(struct mbuf *, caddr_t, u_int, 951 void (*)(struct mbuf *, void *, void *), void *, void *, 952 int, int, int); 953struct mbuf *m_collapse(struct mbuf *, int, int); 954void m_copyback(struct mbuf *, int, int, c_caddr_t); 955void m_copydata(const struct mbuf *, int, int, caddr_t); 956struct mbuf *m_copym(const struct mbuf *, int, int, int); 957struct mbuf *m_copypacket(struct mbuf *, int); 958void m_copy_pkthdr(struct mbuf *, struct mbuf *); 959struct mbuf *m_copyup(struct mbuf *, int, int); 960struct mbuf *m_defrag(struct mbuf *, int); 961void m_demote_pkthdr(struct mbuf *); 962void m_demote(struct mbuf *, int, int); 963struct mbuf *m_devget(char *, int, int, struct ifnet *, 964 void (*)(char *, caddr_t, u_int)); 965struct mbuf *m_dup(const struct mbuf *, int); 966int m_dup_pkthdr(struct mbuf *, const struct mbuf *, int); 967u_int m_fixhdr(struct mbuf *); 968struct mbuf *m_fragment(struct mbuf *, int, int); 969void m_freem(struct mbuf *); 970struct mbuf *m_get2(int, int, short, int); 971struct mbuf *m_getjcl(int, short, int, int); 972struct mbuf *m_getm2(struct mbuf *, int, int, short, int); 973struct mbuf *m_getptr(struct mbuf *, int, int *); 974u_int m_length(struct mbuf *, struct mbuf **); 975int m_mbuftouio(struct uio *, struct mbuf *, int); 976void m_move_pkthdr(struct mbuf *, struct mbuf *); 977struct mbuf *m_prepend(struct mbuf *, int, int); 978void m_print(const struct mbuf *, int); 979struct mbuf *m_pulldown(struct mbuf *, int, int, int *); 980struct mbuf *m_pullup(struct mbuf *, int); 981int m_sanity(struct mbuf *, int); 982struct mbuf *m_split(struct mbuf *, int, int); 983struct mbuf *m_uiotombuf(struct uio *, int, int, int, int); 984struct mbuf *m_unshare(struct mbuf *, int); 985 986/*- 987 * Network packets may have annotations attached by affixing a list of 988 * "packet tags" to the pkthdr structure. Packet tags are dynamically 989 * allocated semi-opaque data structures that have a fixed header 990 * (struct m_tag) that specifies the size of the memory block and a 991 * <cookie,type> pair that identifies it. The cookie is a 32-bit unique 992 * unsigned value used to identify a module or ABI. By convention this value 993 * is chosen as the date+time that the module is created, expressed as the 994 * number of seconds since the epoch (e.g., using date -u +'%s'). The type 995 * value is an ABI/module-specific value that identifies a particular 996 * annotation and is private to the module. For compatibility with systems 997 * like OpenBSD that define packet tags w/o an ABI/module cookie, the value 998 * PACKET_ABI_COMPAT is used to implement m_tag_get and m_tag_find 999 * compatibility shim functions and several tag types are defined below. 1000 * Users that do not require compatibility should use a private cookie value 1001 * so that packet tag-related definitions can be maintained privately. 1002 * 1003 * Note that the packet tag returned by m_tag_alloc has the default memory 1004 * alignment implemented by malloc. To reference private data one can use a 1005 * construct like: 1006 * 1007 * struct m_tag *mtag = m_tag_alloc(...); 1008 * struct foo *p = (struct foo *)(mtag+1); 1009 * 1010 * if the alignment of struct m_tag is sufficient for referencing members of 1011 * struct foo. Otherwise it is necessary to embed struct m_tag within the 1012 * private data structure to insure proper alignment; e.g., 1013 * 1014 * struct foo { 1015 * struct m_tag tag; 1016 * ... 1017 * }; 1018 * struct foo *p = (struct foo *) m_tag_alloc(...); 1019 * struct m_tag *mtag = &p->tag; 1020 */ 1021 1022/* 1023 * Persistent tags stay with an mbuf until the mbuf is reclaimed. Otherwise 1024 * tags are expected to ``vanish'' when they pass through a network 1025 * interface. For most interfaces this happens normally as the tags are 1026 * reclaimed when the mbuf is free'd. However in some special cases 1027 * reclaiming must be done manually. An example is packets that pass through 1028 * the loopback interface. Also, one must be careful to do this when 1029 * ``turning around'' packets (e.g., icmp_reflect). 1030 * 1031 * To mark a tag persistent bit-or this flag in when defining the tag id. 1032 * The tag will then be treated as described above. 1033 */ 1034#define MTAG_PERSISTENT 0x800 1035 1036#define PACKET_TAG_NONE 0 /* Nadda */ 1037 1038/* Packet tags for use with PACKET_ABI_COMPAT. */ 1039#define PACKET_TAG_IPSEC_IN_DONE 1 /* IPsec applied, in */ 1040#define PACKET_TAG_IPSEC_OUT_DONE 2 /* IPsec applied, out */ 1041#define PACKET_TAG_IPSEC_IN_CRYPTO_DONE 3 /* NIC IPsec crypto done */ 1042#define PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED 4 /* NIC IPsec crypto req'ed */ 1043#define PACKET_TAG_IPSEC_IN_COULD_DO_CRYPTO 5 /* NIC notifies IPsec */ 1044#define PACKET_TAG_IPSEC_PENDING_TDB 6 /* Reminder to do IPsec */ 1045#define PACKET_TAG_BRIDGE 7 /* Bridge processing done */ 1046#define PACKET_TAG_GIF 8 /* GIF processing done */ 1047#define PACKET_TAG_GRE 9 /* GRE processing done */ 1048#define PACKET_TAG_IN_PACKET_CHECKSUM 10 /* NIC checksumming done */ 1049#define PACKET_TAG_ENCAP 11 /* Encap. processing */ 1050#define PACKET_TAG_IPSEC_SOCKET 12 /* IPSEC socket ref */ 1051#define PACKET_TAG_IPSEC_HISTORY 13 /* IPSEC history */ 1052#define PACKET_TAG_IPV6_INPUT 14 /* IPV6 input processing */ 1053#define PACKET_TAG_DUMMYNET 15 /* dummynet info */ 1054#define PACKET_TAG_DIVERT 17 /* divert info */ 1055#define PACKET_TAG_IPFORWARD 18 /* ipforward info */ 1056#define PACKET_TAG_MACLABEL (19 | MTAG_PERSISTENT) /* MAC label */ 1057#define PACKET_TAG_PF (21 | MTAG_PERSISTENT) /* PF/ALTQ information */ 1058#define PACKET_TAG_RTSOCKFAM 25 /* rtsock sa family */ 1059#define PACKET_TAG_IPOPTIONS 27 /* Saved IP options */ 1060#define PACKET_TAG_CARP 28 /* CARP info */ 1061#define PACKET_TAG_IPSEC_NAT_T_PORTS 29 /* two uint16_t */ 1062#define PACKET_TAG_ND_OUTGOING 30 /* ND outgoing */ 1063 1064/* Specific cookies and tags. */ 1065 1066/* Packet tag routines. */ 1067struct m_tag *m_tag_alloc(u_int32_t, int, int, int); 1068void m_tag_delete(struct mbuf *, struct m_tag *); 1069void m_tag_delete_chain(struct mbuf *, struct m_tag *); 1070void m_tag_free_default(struct m_tag *); 1071struct m_tag *m_tag_locate(struct mbuf *, u_int32_t, int, struct m_tag *); 1072struct m_tag *m_tag_copy(struct m_tag *, int); 1073int m_tag_copy_chain(struct mbuf *, const struct mbuf *, int); 1074void m_tag_delete_nonpersistent(struct mbuf *); 1075 1076/* 1077 * Initialize the list of tags associated with an mbuf. 1078 */ 1079static __inline void 1080m_tag_init(struct mbuf *m) 1081{ 1082 1083 SLIST_INIT(&m->m_pkthdr.tags); 1084} 1085 1086/* 1087 * Set up the contents of a tag. Note that this does not fill in the free 1088 * method; the caller is expected to do that. 1089 * 1090 * XXX probably should be called m_tag_init, but that was already taken. 1091 */ 1092static __inline void 1093m_tag_setup(struct m_tag *t, u_int32_t cookie, int type, int len) 1094{ 1095 1096 t->m_tag_id = type; 1097 t->m_tag_len = len; 1098 t->m_tag_cookie = cookie; 1099} 1100 1101/* 1102 * Reclaim resources associated with a tag. 1103 */ 1104static __inline void 1105m_tag_free(struct m_tag *t) 1106{ 1107 1108 (*t->m_tag_free)(t); 1109} 1110 1111/* 1112 * Return the first tag associated with an mbuf. 1113 */ 1114static __inline struct m_tag * 1115m_tag_first(struct mbuf *m) 1116{ 1117 1118 return (SLIST_FIRST(&m->m_pkthdr.tags)); 1119} 1120 1121/* 1122 * Return the next tag in the list of tags associated with an mbuf. 1123 */ 1124static __inline struct m_tag * 1125m_tag_next(struct mbuf *m __unused, struct m_tag *t) 1126{ 1127 1128 return (SLIST_NEXT(t, m_tag_link)); 1129} 1130 1131/* 1132 * Prepend a tag to the list of tags associated with an mbuf. 1133 */ 1134static __inline void 1135m_tag_prepend(struct mbuf *m, struct m_tag *t) 1136{ 1137 1138 SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link); 1139} 1140 1141/* 1142 * Unlink a tag from the list of tags associated with an mbuf. 1143 */ 1144static __inline void 1145m_tag_unlink(struct mbuf *m, struct m_tag *t) 1146{ 1147 1148 SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link); 1149} 1150 1151/* These are for OpenBSD compatibility. */ 1152#define MTAG_ABI_COMPAT 0 /* compatibility ABI */ 1153 1154static __inline struct m_tag * 1155m_tag_get(int type, int length, int wait) 1156{ 1157 return (m_tag_alloc(MTAG_ABI_COMPAT, type, length, wait)); 1158} 1159 1160static __inline struct m_tag * 1161m_tag_find(struct mbuf *m, int type, struct m_tag *start) 1162{ 1163 return (SLIST_EMPTY(&m->m_pkthdr.tags) ? (struct m_tag *)NULL : 1164 m_tag_locate(m, MTAG_ABI_COMPAT, type, start)); 1165} 1166 1167static __inline struct mbuf * 1168m_free(struct mbuf *m) 1169{ 1170 struct mbuf *n = m->m_next; 1171 1172 if ((m->m_flags & (M_PKTHDR|M_NOFREE)) == (M_PKTHDR|M_NOFREE)) 1173 m_tag_delete_chain(m, NULL); 1174 if (m->m_flags & M_EXT) 1175 mb_free_ext(m); 1176 else if ((m->m_flags & M_NOFREE) == 0) 1177 uma_zfree(zone_mbuf, m); 1178 return (n); 1179} 1180 1181static __inline int 1182rt_m_getfib(struct mbuf *m) 1183{ 1184 KASSERT(m->m_flags & M_PKTHDR , ("Attempt to get FIB from non header mbuf.")); 1185 return (m->m_pkthdr.fibnum); 1186} 1187 1188#define M_GETFIB(_m) rt_m_getfib(_m) 1189 1190#define M_SETFIB(_m, _fib) do { \ 1191 KASSERT((_m)->m_flags & M_PKTHDR, ("Attempt to set FIB on non header mbuf.")); \ 1192 ((_m)->m_pkthdr.fibnum) = (_fib); \ 1193} while (0) 1194 1195/* flags passed as first argument for "m_ether_tcpip_hash()" */ 1196#define MBUF_HASHFLAG_L2 (1 << 2) 1197#define MBUF_HASHFLAG_L3 (1 << 3) 1198#define MBUF_HASHFLAG_L4 (1 << 4) 1199 1200/* mbuf hashing helper routines */ 1201uint32_t m_ether_tcpip_hash_init(void); 1202uint32_t m_ether_tcpip_hash(const uint32_t, const struct mbuf *, const uint32_t); 1203 1204#ifdef MBUF_PROFILING 1205 void m_profile(struct mbuf *m); 1206 #define M_PROFILE(m) m_profile(m) 1207#else 1208 #define M_PROFILE(m) 1209#endif 1210 1211struct mbufq { 1212 STAILQ_HEAD(, mbuf) mq_head; 1213 int mq_len; 1214 int mq_maxlen; 1215}; 1216 1217static inline void 1218mbufq_init(struct mbufq *mq, int maxlen) 1219{ 1220 1221 STAILQ_INIT(&mq->mq_head); 1222 mq->mq_maxlen = maxlen; 1223 mq->mq_len = 0; 1224} 1225 1226static inline struct mbuf * 1227mbufq_flush(struct mbufq *mq) 1228{ 1229 struct mbuf *m; 1230 1231 m = STAILQ_FIRST(&mq->mq_head); 1232 STAILQ_INIT(&mq->mq_head); 1233 mq->mq_len = 0; 1234 return (m); 1235} 1236 1237static inline void 1238mbufq_drain(struct mbufq *mq) 1239{ 1240 struct mbuf *m, *n; 1241 1242 n = mbufq_flush(mq); 1243 while ((m = n) != NULL) { 1244 n = STAILQ_NEXT(m, m_stailqpkt); 1245 m_freem(m); 1246 } 1247} 1248 1249static inline struct mbuf * 1250mbufq_first(const struct mbufq *mq) 1251{ 1252 1253 return (STAILQ_FIRST(&mq->mq_head)); 1254} 1255 1256static inline struct mbuf * 1257mbufq_last(const struct mbufq *mq) 1258{ 1259 1260 return (STAILQ_LAST(&mq->mq_head, mbuf, m_stailqpkt)); 1261} 1262 1263static inline int 1264mbufq_full(const struct mbufq *mq) 1265{ 1266 1267 return (mq->mq_len >= mq->mq_maxlen); 1268} 1269 1270static inline int 1271mbufq_len(const struct mbufq *mq) 1272{ 1273 1274 return (mq->mq_len); 1275} 1276 1277static inline int 1278mbufq_enqueue(struct mbufq *mq, struct mbuf *m) 1279{ 1280 1281 if (mbufq_full(mq)) 1282 return (ENOBUFS); 1283 STAILQ_INSERT_TAIL(&mq->mq_head, m, m_stailqpkt); 1284 mq->mq_len++; 1285 return (0); 1286} 1287 1288static inline struct mbuf * 1289mbufq_dequeue(struct mbufq *mq) 1290{ 1291 struct mbuf *m; 1292 1293 m = STAILQ_FIRST(&mq->mq_head); 1294 if (m) { 1295 STAILQ_REMOVE_HEAD(&mq->mq_head, m_stailqpkt); 1296 m->m_nextpkt = NULL; 1297 mq->mq_len--; 1298 } 1299 return (m); 1300} 1301 1302static inline void 1303mbufq_prepend(struct mbufq *mq, struct mbuf *m) 1304{ 1305 1306 STAILQ_INSERT_HEAD(&mq->mq_head, m, m_stailqpkt); 1307 mq->mq_len++; 1308} 1309#endif /* _KERNEL */ 1310#endif /* !_SYS_MBUF_H_ */
| 528void mb_free_ext(struct mbuf *); 529int m_pkthdr_init(struct mbuf *, int); 530 531static __inline int 532m_gettype(int size) 533{ 534 int type; 535 536 switch (size) { 537 case MSIZE: 538 type = EXT_MBUF; 539 break; 540 case MCLBYTES: 541 type = EXT_CLUSTER; 542 break; 543#if MJUMPAGESIZE != MCLBYTES 544 case MJUMPAGESIZE: 545 type = EXT_JUMBOP; 546 break; 547#endif 548 case MJUM9BYTES: 549 type = EXT_JUMBO9; 550 break; 551 case MJUM16BYTES: 552 type = EXT_JUMBO16; 553 break; 554 default: 555 panic("%s: invalid cluster size %d", __func__, size); 556 } 557 558 return (type); 559} 560 561/* 562 * Associated an external reference counted buffer with an mbuf. 563 */ 564static __inline void 565m_extaddref(struct mbuf *m, caddr_t buf, u_int size, u_int *ref_cnt, 566 void (*freef)(struct mbuf *, void *, void *), void *arg1, void *arg2) 567{ 568 569 KASSERT(ref_cnt != NULL, ("%s: ref_cnt not provided", __func__)); 570 571 atomic_add_int(ref_cnt, 1); 572 m->m_flags |= M_EXT; 573 m->m_ext.ext_buf = buf; 574 m->m_ext.ext_cnt = ref_cnt; 575 m->m_data = m->m_ext.ext_buf; 576 m->m_ext.ext_size = size; 577 m->m_ext.ext_free = freef; 578 m->m_ext.ext_arg1 = arg1; 579 m->m_ext.ext_arg2 = arg2; 580 m->m_ext.ext_type = EXT_EXTREF; 581 m->m_ext.ext_flags = 0; 582} 583 584static __inline uma_zone_t 585m_getzone(int size) 586{ 587 uma_zone_t zone; 588 589 switch (size) { 590 case MCLBYTES: 591 zone = zone_clust; 592 break; 593#if MJUMPAGESIZE != MCLBYTES 594 case MJUMPAGESIZE: 595 zone = zone_jumbop; 596 break; 597#endif 598 case MJUM9BYTES: 599 zone = zone_jumbo9; 600 break; 601 case MJUM16BYTES: 602 zone = zone_jumbo16; 603 break; 604 default: 605 panic("%s: invalid cluster size %d", __func__, size); 606 } 607 608 return (zone); 609} 610 611/* 612 * Initialize an mbuf with linear storage. 613 * 614 * Inline because the consumer text overhead will be roughly the same to 615 * initialize or call a function with this many parameters and M_PKTHDR 616 * should go away with constant propagation for !MGETHDR. 617 */ 618static __inline int 619m_init(struct mbuf *m, uma_zone_t zone __unused, int size __unused, int how, 620 short type, int flags) 621{ 622 int error; 623 624 m->m_next = NULL; 625 m->m_nextpkt = NULL; 626 m->m_data = m->m_dat; 627 m->m_len = 0; 628 m->m_flags = flags; 629 m->m_type = type; 630 if (flags & M_PKTHDR) { 631 if ((error = m_pkthdr_init(m, how)) != 0) 632 return (error); 633 } 634 635 return (0); 636} 637 638static __inline struct mbuf * 639m_get(int how, short type) 640{ 641 struct mb_args args; 642 643 args.flags = 0; 644 args.type = type; 645 return (uma_zalloc_arg(zone_mbuf, &args, how)); 646} 647 648/* 649 * XXX This should be deprecated, very little use. 650 */ 651static __inline struct mbuf * 652m_getclr(int how, short type) 653{ 654 struct mbuf *m; 655 struct mb_args args; 656 657 args.flags = 0; 658 args.type = type; 659 m = uma_zalloc_arg(zone_mbuf, &args, how); 660 if (m != NULL) 661 bzero(m->m_data, MLEN); 662 return (m); 663} 664 665static __inline struct mbuf * 666m_gethdr(int how, short type) 667{ 668 struct mb_args args; 669 670 args.flags = M_PKTHDR; 671 args.type = type; 672 return (uma_zalloc_arg(zone_mbuf, &args, how)); 673} 674 675static __inline struct mbuf * 676m_getcl(int how, short type, int flags) 677{ 678 struct mb_args args; 679 680 args.flags = flags; 681 args.type = type; 682 return (uma_zalloc_arg(zone_pack, &args, how)); 683} 684 685static __inline int 686m_clget(struct mbuf *m, int how) 687{ 688 689 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", 690 __func__, m)); 691 m->m_ext.ext_buf = (char *)NULL; 692 uma_zalloc_arg(zone_clust, m, how); 693 /* 694 * On a cluster allocation failure, drain the packet zone and retry, 695 * we might be able to loosen a few clusters up on the drain. 696 */ 697 if ((how & M_NOWAIT) && (m->m_ext.ext_buf == NULL)) { 698 zone_drain(zone_pack); 699 uma_zalloc_arg(zone_clust, m, how); 700 } 701 return (m->m_flags & M_EXT); 702} 703 704/* 705 * m_cljget() is different from m_clget() as it can allocate clusters without 706 * attaching them to an mbuf. In that case the return value is the pointer 707 * to the cluster of the requested size. If an mbuf was specified, it gets 708 * the cluster attached to it and the return value can be safely ignored. 709 * For size it takes MCLBYTES, MJUMPAGESIZE, MJUM9BYTES, MJUM16BYTES. 710 */ 711static __inline void * 712m_cljget(struct mbuf *m, int how, int size) 713{ 714 uma_zone_t zone; 715 716 if (m != NULL) { 717 KASSERT((m->m_flags & M_EXT) == 0, ("%s: mbuf %p has M_EXT", 718 __func__, m)); 719 m->m_ext.ext_buf = NULL; 720 } 721 722 zone = m_getzone(size); 723 return (uma_zalloc_arg(zone, m, how)); 724} 725 726static __inline void 727m_cljset(struct mbuf *m, void *cl, int type) 728{ 729 uma_zone_t zone; 730 int size; 731 732 switch (type) { 733 case EXT_CLUSTER: 734 size = MCLBYTES; 735 zone = zone_clust; 736 break; 737#if MJUMPAGESIZE != MCLBYTES 738 case EXT_JUMBOP: 739 size = MJUMPAGESIZE; 740 zone = zone_jumbop; 741 break; 742#endif 743 case EXT_JUMBO9: 744 size = MJUM9BYTES; 745 zone = zone_jumbo9; 746 break; 747 case EXT_JUMBO16: 748 size = MJUM16BYTES; 749 zone = zone_jumbo16; 750 break; 751 default: 752 panic("%s: unknown cluster type %d", __func__, type); 753 break; 754 } 755 756 m->m_data = m->m_ext.ext_buf = cl; 757 m->m_ext.ext_free = m->m_ext.ext_arg1 = m->m_ext.ext_arg2 = NULL; 758 m->m_ext.ext_size = size; 759 m->m_ext.ext_type = type; 760 m->m_ext.ext_flags = 0; 761 m->m_ext.ext_cnt = uma_find_refcnt(zone, cl); 762 m->m_flags |= M_EXT; 763 764} 765 766static __inline void 767m_chtype(struct mbuf *m, short new_type) 768{ 769 770 m->m_type = new_type; 771} 772 773static __inline void 774m_clrprotoflags(struct mbuf *m) 775{ 776 777 while (m) { 778 m->m_flags &= ~M_PROTOFLAGS; 779 m = m->m_next; 780 } 781} 782 783static __inline struct mbuf * 784m_last(struct mbuf *m) 785{ 786 787 while (m->m_next) 788 m = m->m_next; 789 return (m); 790} 791 792/* 793 * mbuf, cluster, and external object allocation macros (for compatibility 794 * purposes). 795 */ 796#define M_MOVE_PKTHDR(to, from) m_move_pkthdr((to), (from)) 797#define MGET(m, how, type) ((m) = m_get((how), (type))) 798#define MGETHDR(m, how, type) ((m) = m_gethdr((how), (type))) 799#define MCLGET(m, how) m_clget((m), (how)) 800#define MEXTADD(m, buf, size, free, arg1, arg2, flags, type) \ 801 (void )m_extadd((m), (caddr_t)(buf), (size), (free), (arg1), (arg2),\ 802 (flags), (type), M_NOWAIT) 803#define m_getm(m, len, how, type) \ 804 m_getm2((m), (len), (how), (type), M_PKTHDR) 805 806/* 807 * Evaluate TRUE if it's safe to write to the mbuf m's data region (this can 808 * be both the local data payload, or an external buffer area, depending on 809 * whether M_EXT is set). 810 */ 811#define M_WRITABLE(m) (!((m)->m_flags & M_RDONLY) && \ 812 (!(((m)->m_flags & M_EXT)) || \ 813 (*((m)->m_ext.ext_cnt) == 1)) ) \ 814 815/* Check if the supplied mbuf has a packet header, or else panic. */ 816#define M_ASSERTPKTHDR(m) \ 817 KASSERT((m) != NULL && (m)->m_flags & M_PKTHDR, \ 818 ("%s: no mbuf packet header!", __func__)) 819 820/* 821 * Ensure that the supplied mbuf is a valid, non-free mbuf. 822 * 823 * XXX: Broken at the moment. Need some UMA magic to make it work again. 824 */ 825#define M_ASSERTVALID(m) \ 826 KASSERT((((struct mbuf *)m)->m_flags & 0) == 0, \ 827 ("%s: attempted use of a free mbuf!", __func__)) 828 829/* 830 * Return the address of the start of the buffer associated with an mbuf, 831 * handling external storage, packet-header mbufs, and regular data mbufs. 832 */ 833#define M_START(m) \ 834 (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_buf : \ 835 ((m)->m_flags & M_PKTHDR) ? &(m)->m_pktdat[0] : \ 836 &(m)->m_dat[0]) 837 838/* 839 * Return the size of the buffer associated with an mbuf, handling external 840 * storage, packet-header mbufs, and regular data mbufs. 841 */ 842#define M_SIZE(m) \ 843 (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_size : \ 844 ((m)->m_flags & M_PKTHDR) ? MHLEN : \ 845 MLEN) 846 847/* 848 * Set the m_data pointer of a newly allocated mbuf to place an object of the 849 * specified size at the end of the mbuf, longword aligned. 850 * 851 * NB: Historically, we had M_ALIGN(), MH_ALIGN(), and MEXT_ALIGN() as 852 * separate macros, each asserting that it was called at the proper moment. 853 * This required callers to themselves test the storage type and call the 854 * right one. Rather than require callers to be aware of those layout 855 * decisions, we centralize here. 856 */ 857static __inline void 858m_align(struct mbuf *m, int len) 859{ 860#ifdef INVARIANTS 861 const char *msg = "%s: not a virgin mbuf"; 862#endif 863 int adjust; 864 865 KASSERT(m->m_data == M_START(m), (msg, __func__)); 866 867 adjust = M_SIZE(m) - len; 868 m->m_data += adjust &~ (sizeof(long)-1); 869} 870 871#define M_ALIGN(m, len) m_align(m, len) 872#define MH_ALIGN(m, len) m_align(m, len) 873#define MEXT_ALIGN(m, len) m_align(m, len) 874 875/* 876 * Compute the amount of space available before the current start of data in 877 * an mbuf. 878 * 879 * The M_WRITABLE() is a temporary, conservative safety measure: the burden 880 * of checking writability of the mbuf data area rests solely with the caller. 881 * 882 * NB: In previous versions, M_LEADINGSPACE() would only check M_WRITABLE() 883 * for mbufs with external storage. We now allow mbuf-embedded data to be 884 * read-only as well. 885 */ 886#define M_LEADINGSPACE(m) \ 887 (M_WRITABLE(m) ? ((m)->m_data - M_START(m)) : 0) 888 889/* 890 * Compute the amount of space available after the end of data in an mbuf. 891 * 892 * The M_WRITABLE() is a temporary, conservative safety measure: the burden 893 * of checking writability of the mbuf data area rests solely with the caller. 894 * 895 * NB: In previous versions, M_TRAILINGSPACE() would only check M_WRITABLE() 896 * for mbufs with external storage. We now allow mbuf-embedded data to be 897 * read-only as well. 898 */ 899#define M_TRAILINGSPACE(m) \ 900 (M_WRITABLE(m) ? \ 901 ((M_START(m) + M_SIZE(m)) - ((m)->m_data + (m)->m_len)) : 0) 902 903/* 904 * Arrange to prepend space of size plen to mbuf m. If a new mbuf must be 905 * allocated, how specifies whether to wait. If the allocation fails, the 906 * original mbuf chain is freed and m is set to NULL. 907 */ 908#define M_PREPEND(m, plen, how) do { \ 909 struct mbuf **_mmp = &(m); \ 910 struct mbuf *_mm = *_mmp; \ 911 int _mplen = (plen); \ 912 int __mhow = (how); \ 913 \ 914 MBUF_CHECKSLEEP(how); \ 915 if (M_LEADINGSPACE(_mm) >= _mplen) { \ 916 _mm->m_data -= _mplen; \ 917 _mm->m_len += _mplen; \ 918 } else \ 919 _mm = m_prepend(_mm, _mplen, __mhow); \ 920 if (_mm != NULL && _mm->m_flags & M_PKTHDR) \ 921 _mm->m_pkthdr.len += _mplen; \ 922 *_mmp = _mm; \ 923} while (0) 924 925/* 926 * Change mbuf to new type. This is a relatively expensive operation and 927 * should be avoided. 928 */ 929#define MCHTYPE(m, t) m_chtype((m), (t)) 930 931/* Length to m_copy to copy all. */ 932#define M_COPYALL 1000000000 933 934/* Compatibility with 4.3. */ 935#define m_copy(m, o, l) m_copym((m), (o), (l), M_NOWAIT) 936 937extern int max_datalen; /* MHLEN - max_hdr */ 938extern int max_hdr; /* Largest link + protocol header */ 939extern int max_linkhdr; /* Largest link-level header */ 940extern int max_protohdr; /* Largest protocol header */ 941extern int nmbclusters; /* Maximum number of clusters */ 942 943struct uio; 944 945void m_adj(struct mbuf *, int); 946int m_apply(struct mbuf *, int, int, 947 int (*)(void *, void *, u_int), void *); 948int m_append(struct mbuf *, int, c_caddr_t); 949void m_cat(struct mbuf *, struct mbuf *); 950void m_catpkt(struct mbuf *, struct mbuf *); 951int m_extadd(struct mbuf *, caddr_t, u_int, 952 void (*)(struct mbuf *, void *, void *), void *, void *, 953 int, int, int); 954struct mbuf *m_collapse(struct mbuf *, int, int); 955void m_copyback(struct mbuf *, int, int, c_caddr_t); 956void m_copydata(const struct mbuf *, int, int, caddr_t); 957struct mbuf *m_copym(const struct mbuf *, int, int, int); 958struct mbuf *m_copypacket(struct mbuf *, int); 959void m_copy_pkthdr(struct mbuf *, struct mbuf *); 960struct mbuf *m_copyup(struct mbuf *, int, int); 961struct mbuf *m_defrag(struct mbuf *, int); 962void m_demote_pkthdr(struct mbuf *); 963void m_demote(struct mbuf *, int, int); 964struct mbuf *m_devget(char *, int, int, struct ifnet *, 965 void (*)(char *, caddr_t, u_int)); 966struct mbuf *m_dup(const struct mbuf *, int); 967int m_dup_pkthdr(struct mbuf *, const struct mbuf *, int); 968u_int m_fixhdr(struct mbuf *); 969struct mbuf *m_fragment(struct mbuf *, int, int); 970void m_freem(struct mbuf *); 971struct mbuf *m_get2(int, int, short, int); 972struct mbuf *m_getjcl(int, short, int, int); 973struct mbuf *m_getm2(struct mbuf *, int, int, short, int); 974struct mbuf *m_getptr(struct mbuf *, int, int *); 975u_int m_length(struct mbuf *, struct mbuf **); 976int m_mbuftouio(struct uio *, struct mbuf *, int); 977void m_move_pkthdr(struct mbuf *, struct mbuf *); 978struct mbuf *m_prepend(struct mbuf *, int, int); 979void m_print(const struct mbuf *, int); 980struct mbuf *m_pulldown(struct mbuf *, int, int, int *); 981struct mbuf *m_pullup(struct mbuf *, int); 982int m_sanity(struct mbuf *, int); 983struct mbuf *m_split(struct mbuf *, int, int); 984struct mbuf *m_uiotombuf(struct uio *, int, int, int, int); 985struct mbuf *m_unshare(struct mbuf *, int); 986 987/*- 988 * Network packets may have annotations attached by affixing a list of 989 * "packet tags" to the pkthdr structure. Packet tags are dynamically 990 * allocated semi-opaque data structures that have a fixed header 991 * (struct m_tag) that specifies the size of the memory block and a 992 * <cookie,type> pair that identifies it. The cookie is a 32-bit unique 993 * unsigned value used to identify a module or ABI. By convention this value 994 * is chosen as the date+time that the module is created, expressed as the 995 * number of seconds since the epoch (e.g., using date -u +'%s'). The type 996 * value is an ABI/module-specific value that identifies a particular 997 * annotation and is private to the module. For compatibility with systems 998 * like OpenBSD that define packet tags w/o an ABI/module cookie, the value 999 * PACKET_ABI_COMPAT is used to implement m_tag_get and m_tag_find 1000 * compatibility shim functions and several tag types are defined below. 1001 * Users that do not require compatibility should use a private cookie value 1002 * so that packet tag-related definitions can be maintained privately. 1003 * 1004 * Note that the packet tag returned by m_tag_alloc has the default memory 1005 * alignment implemented by malloc. To reference private data one can use a 1006 * construct like: 1007 * 1008 * struct m_tag *mtag = m_tag_alloc(...); 1009 * struct foo *p = (struct foo *)(mtag+1); 1010 * 1011 * if the alignment of struct m_tag is sufficient for referencing members of 1012 * struct foo. Otherwise it is necessary to embed struct m_tag within the 1013 * private data structure to insure proper alignment; e.g., 1014 * 1015 * struct foo { 1016 * struct m_tag tag; 1017 * ... 1018 * }; 1019 * struct foo *p = (struct foo *) m_tag_alloc(...); 1020 * struct m_tag *mtag = &p->tag; 1021 */ 1022 1023/* 1024 * Persistent tags stay with an mbuf until the mbuf is reclaimed. Otherwise 1025 * tags are expected to ``vanish'' when they pass through a network 1026 * interface. For most interfaces this happens normally as the tags are 1027 * reclaimed when the mbuf is free'd. However in some special cases 1028 * reclaiming must be done manually. An example is packets that pass through 1029 * the loopback interface. Also, one must be careful to do this when 1030 * ``turning around'' packets (e.g., icmp_reflect). 1031 * 1032 * To mark a tag persistent bit-or this flag in when defining the tag id. 1033 * The tag will then be treated as described above. 1034 */ 1035#define MTAG_PERSISTENT 0x800 1036 1037#define PACKET_TAG_NONE 0 /* Nadda */ 1038 1039/* Packet tags for use with PACKET_ABI_COMPAT. */ 1040#define PACKET_TAG_IPSEC_IN_DONE 1 /* IPsec applied, in */ 1041#define PACKET_TAG_IPSEC_OUT_DONE 2 /* IPsec applied, out */ 1042#define PACKET_TAG_IPSEC_IN_CRYPTO_DONE 3 /* NIC IPsec crypto done */ 1043#define PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED 4 /* NIC IPsec crypto req'ed */ 1044#define PACKET_TAG_IPSEC_IN_COULD_DO_CRYPTO 5 /* NIC notifies IPsec */ 1045#define PACKET_TAG_IPSEC_PENDING_TDB 6 /* Reminder to do IPsec */ 1046#define PACKET_TAG_BRIDGE 7 /* Bridge processing done */ 1047#define PACKET_TAG_GIF 8 /* GIF processing done */ 1048#define PACKET_TAG_GRE 9 /* GRE processing done */ 1049#define PACKET_TAG_IN_PACKET_CHECKSUM 10 /* NIC checksumming done */ 1050#define PACKET_TAG_ENCAP 11 /* Encap. processing */ 1051#define PACKET_TAG_IPSEC_SOCKET 12 /* IPSEC socket ref */ 1052#define PACKET_TAG_IPSEC_HISTORY 13 /* IPSEC history */ 1053#define PACKET_TAG_IPV6_INPUT 14 /* IPV6 input processing */ 1054#define PACKET_TAG_DUMMYNET 15 /* dummynet info */ 1055#define PACKET_TAG_DIVERT 17 /* divert info */ 1056#define PACKET_TAG_IPFORWARD 18 /* ipforward info */ 1057#define PACKET_TAG_MACLABEL (19 | MTAG_PERSISTENT) /* MAC label */ 1058#define PACKET_TAG_PF (21 | MTAG_PERSISTENT) /* PF/ALTQ information */ 1059#define PACKET_TAG_RTSOCKFAM 25 /* rtsock sa family */ 1060#define PACKET_TAG_IPOPTIONS 27 /* Saved IP options */ 1061#define PACKET_TAG_CARP 28 /* CARP info */ 1062#define PACKET_TAG_IPSEC_NAT_T_PORTS 29 /* two uint16_t */ 1063#define PACKET_TAG_ND_OUTGOING 30 /* ND outgoing */ 1064 1065/* Specific cookies and tags. */ 1066 1067/* Packet tag routines. */ 1068struct m_tag *m_tag_alloc(u_int32_t, int, int, int); 1069void m_tag_delete(struct mbuf *, struct m_tag *); 1070void m_tag_delete_chain(struct mbuf *, struct m_tag *); 1071void m_tag_free_default(struct m_tag *); 1072struct m_tag *m_tag_locate(struct mbuf *, u_int32_t, int, struct m_tag *); 1073struct m_tag *m_tag_copy(struct m_tag *, int); 1074int m_tag_copy_chain(struct mbuf *, const struct mbuf *, int); 1075void m_tag_delete_nonpersistent(struct mbuf *); 1076 1077/* 1078 * Initialize the list of tags associated with an mbuf. 1079 */ 1080static __inline void 1081m_tag_init(struct mbuf *m) 1082{ 1083 1084 SLIST_INIT(&m->m_pkthdr.tags); 1085} 1086 1087/* 1088 * Set up the contents of a tag. Note that this does not fill in the free 1089 * method; the caller is expected to do that. 1090 * 1091 * XXX probably should be called m_tag_init, but that was already taken. 1092 */ 1093static __inline void 1094m_tag_setup(struct m_tag *t, u_int32_t cookie, int type, int len) 1095{ 1096 1097 t->m_tag_id = type; 1098 t->m_tag_len = len; 1099 t->m_tag_cookie = cookie; 1100} 1101 1102/* 1103 * Reclaim resources associated with a tag. 1104 */ 1105static __inline void 1106m_tag_free(struct m_tag *t) 1107{ 1108 1109 (*t->m_tag_free)(t); 1110} 1111 1112/* 1113 * Return the first tag associated with an mbuf. 1114 */ 1115static __inline struct m_tag * 1116m_tag_first(struct mbuf *m) 1117{ 1118 1119 return (SLIST_FIRST(&m->m_pkthdr.tags)); 1120} 1121 1122/* 1123 * Return the next tag in the list of tags associated with an mbuf. 1124 */ 1125static __inline struct m_tag * 1126m_tag_next(struct mbuf *m __unused, struct m_tag *t) 1127{ 1128 1129 return (SLIST_NEXT(t, m_tag_link)); 1130} 1131 1132/* 1133 * Prepend a tag to the list of tags associated with an mbuf. 1134 */ 1135static __inline void 1136m_tag_prepend(struct mbuf *m, struct m_tag *t) 1137{ 1138 1139 SLIST_INSERT_HEAD(&m->m_pkthdr.tags, t, m_tag_link); 1140} 1141 1142/* 1143 * Unlink a tag from the list of tags associated with an mbuf. 1144 */ 1145static __inline void 1146m_tag_unlink(struct mbuf *m, struct m_tag *t) 1147{ 1148 1149 SLIST_REMOVE(&m->m_pkthdr.tags, t, m_tag, m_tag_link); 1150} 1151 1152/* These are for OpenBSD compatibility. */ 1153#define MTAG_ABI_COMPAT 0 /* compatibility ABI */ 1154 1155static __inline struct m_tag * 1156m_tag_get(int type, int length, int wait) 1157{ 1158 return (m_tag_alloc(MTAG_ABI_COMPAT, type, length, wait)); 1159} 1160 1161static __inline struct m_tag * 1162m_tag_find(struct mbuf *m, int type, struct m_tag *start) 1163{ 1164 return (SLIST_EMPTY(&m->m_pkthdr.tags) ? (struct m_tag *)NULL : 1165 m_tag_locate(m, MTAG_ABI_COMPAT, type, start)); 1166} 1167 1168static __inline struct mbuf * 1169m_free(struct mbuf *m) 1170{ 1171 struct mbuf *n = m->m_next; 1172 1173 if ((m->m_flags & (M_PKTHDR|M_NOFREE)) == (M_PKTHDR|M_NOFREE)) 1174 m_tag_delete_chain(m, NULL); 1175 if (m->m_flags & M_EXT) 1176 mb_free_ext(m); 1177 else if ((m->m_flags & M_NOFREE) == 0) 1178 uma_zfree(zone_mbuf, m); 1179 return (n); 1180} 1181 1182static __inline int 1183rt_m_getfib(struct mbuf *m) 1184{ 1185 KASSERT(m->m_flags & M_PKTHDR , ("Attempt to get FIB from non header mbuf.")); 1186 return (m->m_pkthdr.fibnum); 1187} 1188 1189#define M_GETFIB(_m) rt_m_getfib(_m) 1190 1191#define M_SETFIB(_m, _fib) do { \ 1192 KASSERT((_m)->m_flags & M_PKTHDR, ("Attempt to set FIB on non header mbuf.")); \ 1193 ((_m)->m_pkthdr.fibnum) = (_fib); \ 1194} while (0) 1195 1196/* flags passed as first argument for "m_ether_tcpip_hash()" */ 1197#define MBUF_HASHFLAG_L2 (1 << 2) 1198#define MBUF_HASHFLAG_L3 (1 << 3) 1199#define MBUF_HASHFLAG_L4 (1 << 4) 1200 1201/* mbuf hashing helper routines */ 1202uint32_t m_ether_tcpip_hash_init(void); 1203uint32_t m_ether_tcpip_hash(const uint32_t, const struct mbuf *, const uint32_t); 1204 1205#ifdef MBUF_PROFILING 1206 void m_profile(struct mbuf *m); 1207 #define M_PROFILE(m) m_profile(m) 1208#else 1209 #define M_PROFILE(m) 1210#endif 1211 1212struct mbufq { 1213 STAILQ_HEAD(, mbuf) mq_head; 1214 int mq_len; 1215 int mq_maxlen; 1216}; 1217 1218static inline void 1219mbufq_init(struct mbufq *mq, int maxlen) 1220{ 1221 1222 STAILQ_INIT(&mq->mq_head); 1223 mq->mq_maxlen = maxlen; 1224 mq->mq_len = 0; 1225} 1226 1227static inline struct mbuf * 1228mbufq_flush(struct mbufq *mq) 1229{ 1230 struct mbuf *m; 1231 1232 m = STAILQ_FIRST(&mq->mq_head); 1233 STAILQ_INIT(&mq->mq_head); 1234 mq->mq_len = 0; 1235 return (m); 1236} 1237 1238static inline void 1239mbufq_drain(struct mbufq *mq) 1240{ 1241 struct mbuf *m, *n; 1242 1243 n = mbufq_flush(mq); 1244 while ((m = n) != NULL) { 1245 n = STAILQ_NEXT(m, m_stailqpkt); 1246 m_freem(m); 1247 } 1248} 1249 1250static inline struct mbuf * 1251mbufq_first(const struct mbufq *mq) 1252{ 1253 1254 return (STAILQ_FIRST(&mq->mq_head)); 1255} 1256 1257static inline struct mbuf * 1258mbufq_last(const struct mbufq *mq) 1259{ 1260 1261 return (STAILQ_LAST(&mq->mq_head, mbuf, m_stailqpkt)); 1262} 1263 1264static inline int 1265mbufq_full(const struct mbufq *mq) 1266{ 1267 1268 return (mq->mq_len >= mq->mq_maxlen); 1269} 1270 1271static inline int 1272mbufq_len(const struct mbufq *mq) 1273{ 1274 1275 return (mq->mq_len); 1276} 1277 1278static inline int 1279mbufq_enqueue(struct mbufq *mq, struct mbuf *m) 1280{ 1281 1282 if (mbufq_full(mq)) 1283 return (ENOBUFS); 1284 STAILQ_INSERT_TAIL(&mq->mq_head, m, m_stailqpkt); 1285 mq->mq_len++; 1286 return (0); 1287} 1288 1289static inline struct mbuf * 1290mbufq_dequeue(struct mbufq *mq) 1291{ 1292 struct mbuf *m; 1293 1294 m = STAILQ_FIRST(&mq->mq_head); 1295 if (m) { 1296 STAILQ_REMOVE_HEAD(&mq->mq_head, m_stailqpkt); 1297 m->m_nextpkt = NULL; 1298 mq->mq_len--; 1299 } 1300 return (m); 1301} 1302 1303static inline void 1304mbufq_prepend(struct mbufq *mq, struct mbuf *m) 1305{ 1306 1307 STAILQ_INSERT_HEAD(&mq->mq_head, m, m_stailqpkt); 1308 mq->mq_len++; 1309} 1310#endif /* _KERNEL */ 1311#endif /* !_SYS_MBUF_H_ */
|