Lines Matching refs:mbuf

73 #include <sys/mbuf.h>
113 * object represents an mbuf structure. This cache preserves only
114 * the m_type field of the mbuf during its transactions.
129 * fields of the mbuf related to the external cluster are preserved
135 * fields of the mbuf related to the external cluster are preserved
146 * for all of the caches as the mbuf global lock is held most of the time.
281 * | mbuf addr | | mclaudit[i] |
327 static unsigned int mbuf_debug; /* patchable mbuf mcache flags */
334 MC_MBUF = 0, /* Regular mbuf */
338 MC_MBUF_CL, /* mbuf + cluster */
339 MC_MBUF_BIGCL, /* mbuf + large (4KB) cluster */
340 MC_MBUF_16KCL /* mbuf + jumbo (16KB) cluster */
353 * mbuf specific mcache allocation request flags.
372 * mbuf cluster slab can be converted to a regular cluster slab when all
384 u_int8_t sl_class; /* controlling mbuf class */
435 * Size of data from the beginning of an mbuf that covers m_hdr,
437 * a shadow mbuf structure of this size inside each audit structure,
438 * and the contents of the real mbuf gets copied into it when the mbuf
439 * is freed. This allows us to pattern-fill the mbuf for integrity
440 * check, and to preserve any constructed mbuf fields (e.g. mbuf +
451 ((struct mbuf *)(void *)((mcl_saved_contents_t *) \
459 * mbuf specific mcache audit flags
492 /* mbuf leak detection variables */
619 * in the cache is an mbuf with a cluster attached to it. Unlike
641 * mbuf watchdog is enabled by default on embedded platforms. It is
648 static void m_redzone_init(struct mbuf *);
649 static void m_redzone_verify(struct mbuf *m);
664 static inline void m_incref(struct mbuf *);
665 static inline u_int32_t m_decref(struct mbuf *);
698 static void mcl_audit_restore_mbuf(struct mbuf *, mcache_audit_t *, boolean_t);
699 static void mcl_audit_save_mbuf(struct mbuf *, mcache_audit_t *);
701 static void mcl_audit_mcheck_panic(struct mbuf *);
721 static int m_copyback0(struct mbuf **, int, int, const void *, int, int);
722 static struct mbuf *m_split0(struct mbuf *, int, int, int);
732 * mbuf + cluster caches, i.e. MC_MBUF_CL and MC_MBUF_BIGCL. mbufs that
735 * into the mbuf and cluster freelists, the composite mbuf + cluster objects
758 * Macros used to verify the integrity of the mbuf.
791 * Macro to find the mbuf index relative to a base.
801 * Macros used during mbuf and cluster initialization.
859 * The structure that holds all mbuf class statistics exportable via sysctl.
861 * global mbuf lock. It contains additional information about the classes
873 * The legacy structure holding all of the mbuf allocation statistics.
875 * instead, and are updated atomically while the global mbuf lock is held.
889 * Allocation statistics related to mbuf types (up to MT_MAX-1) are updated
1121 m_incref(struct mbuf *m)
1142 m_decref(struct mbuf *m)
1176 * Set aside 1/3 of the mbuf cluster map for jumbo clusters; we do
1240 m_maxlimit(MC_MBUF) = (s << NMBPCLSHIFT); /* in mbuf unit */
1242 (void) snprintf(m_cname(MC_MBUF), MAX_MBUF_CNAME, "mbuf");
1288 uint32_t nt_mbpool; /* mbuf pool size */
1318 * 32-bit kernel (default to 64MB of mbuf pool for >= 1GB RAM).
1326 * 64-bit kernel (mbuf pool size based on table).
1415 _CASSERT(!(offsetof(struct mbuf, m_pkthdr.pkt_mpriv) %
1424 _CASSERT(MCA_SAVED_MBUF_SIZE <= sizeof (struct mbuf));
1432 /* Setup the mbuf table */
1437 mbuf_mlock_grp = lck_grp_alloc_init("mbuf", mbuf_mlock_grp_attr);
1479 /* Enable mbuf leak logging, with a lock to protect the tables */
1592 * memory allocated for mbuf clusters.
1598 /* We have atleast 16 M of mbuf pool */
1646 * more than one buffer chunks (e.g. mbuf slabs). For other
1689 /* Save contents on mbuf objects only */
1743 _MCHECK((struct mbuf *)buf);
1746 * an mbuf slab (formerly a 4KB cluster slab that was cut
1890 struct mbuf *m = sp->sl_head;
1897 /* Remove the slab from the mbuf class's slab list */
2160 struct mbuf *m;
2172 m = (struct mbuf *)*list;
2220 struct mbuf *m, *ms;
2242 while ((m = ms = (struct mbuf *)o) != NULL) {
2245 /* Do the mbuf sanity checks */
2283 * If we're asked to purge, restore the actual mbuf using
2285 * and clear EXTF_COMPOSITE flag from the mbuf, as we are
2289 /* Restore constructed mbuf fields */
2305 /* Save mbuf fields and make auditing happy */
2312 /* Free the mbuf */
2348 * into composite mbuf + cluster objects.
2362 struct mbuf *m;
2398 * with MCR_FAILOK such that we don't end up sleeping at the mbuf
2435 * By this time "needed" is MIN(mbuf, cluster, ref). Any left
2439 struct mbuf *ms;
2441 m = ms = (struct mbuf *)mp_list;
2453 * If auditing is enabled, construct the shadow mbuf
2468 * the mbuf+cluster objects are constructed. This
2472 * be freed along with the mbuf it was paired with.
2581 struct mbuf *m, *ms;
2588 while ((m = ms = (struct mbuf *)list) != NULL) {
2590 /* Do the mbuf sanity checks and record its transaction */
2602 * Use the shadow mbuf in the audit structure if we are
2603 * freeing, since the contents of the actual mbuf has been
2652 * Allocate some number of mbuf clusters and place on cluster freelist.
2690 * to grow the pool asynchronously using the mbuf worker thread.
2735 * mbuf lock and the caller is okay to be blocked.
2956 struct mbuf *m = (struct mbuf *)o;
2974 /* Reinitialize it as an mbuf or 2K slab */
2998 * shadow mbuf in the audit structure
3005 struct mbuf *ms;
3026 /* Insert into the mbuf or 2k slab list */
3199 * from the freelist of other mbuf classes. Only
3235 static inline struct mbuf *
3238 struct mbuf *m;
3271 struct mbuf *
3277 struct mbuf *
3283 struct mbuf *
3289 struct mbuf *
3295 struct mbuf *
3298 struct mbuf *m;
3306 struct mbuf *
3307 m_free(struct mbuf *m)
3309 struct mbuf *n = m->m_next;
3312 panic("m_free: freeing an already freed mbuf");
3382 __private_extern__ struct mbuf *
3383 m_clattach(struct mbuf *m, int type, caddr_t extbuf,
3436 * Allocate a new mbuf, since we didn't divorce
3437 * the composite mbuf + cluster pair above.
3456 * Perform `fast' allocation mbuf clusters from a cache of recently-freed
3459 struct mbuf *
3462 struct mbuf *m;
3500 /* m_mclget() add an mbuf cluster to a normal mbuf */
3501 struct mbuf *
3502 m_mclget(struct mbuf *m, int wait)
3518 /* Allocate an mbuf cluster */
3531 /* Free an mbuf cluster */
3539 * mcl_hasreference() checks if a cluster of an mbuf is referenced by
3540 * another mbuf; see comments in m_incref() regarding EXTF_READONLY.
3543 m_mclhasreference(struct mbuf *m)
3571 /* m_mbigget() add an 4KB mbuf cluster to a normal mbuf */
3572 __private_extern__ struct mbuf *
3573 m_mbigget(struct mbuf *m, int wait)
3607 /* m_m16kget() add a 16KB mbuf cluster to a normal mbuf */
3608 __private_extern__ struct mbuf *
3609 m_m16kget(struct mbuf *m, int wait)
3626 * "Move" mbuf pkthdr from "from" to "to".
3630 m_copy_pkthdr(struct mbuf *to, struct mbuf *from)
3654 * Duplicate "from"'s mbuf pkthdr in "to".
3659 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
3682 m_copy_pftag(struct mbuf *to, struct mbuf *from)
3692 m_classifier_init(struct mbuf *m, uint32_t pktf_mask)
3711 m_copy_classifier(struct mbuf *to, struct mbuf *from)
3729 * Return a list of mbuf hdrs that point to clusters. Try for num_needed;
3731 * first num_with_pkthdrs with mbuf hdrs configured as packet headers; these
3736 __private_extern__ struct mbuf *
3740 struct mbuf *m;
3741 struct mbuf **np, *top;
3773 /* Allocate the composite mbuf + cluster elements from the cache */
3783 m = (struct mbuf *)mp_list;
3846 * Return list of mbuf linked by m_nextpkt. Try for numlist, and if
3848 * each mbuf in the list is controlled by the parameter packetlen. Each
3849 * mbuf of the list may have a chain of mbufs linked by m_next. Each mbuf
3854 * The actual number of segments of a mbuf chain is return in the value
3857 __private_extern__ struct mbuf *
3861 struct mbuf **np, *top, *first = NULL;
3933 * up of exactly 1 mbuf. Otherwise, each segment chain is made up
3935 * the remaining data that cannot fit into the first mbuf.
3938 /* Allocate the elements in one shot from the mbuf cache */
3946 * mbuf (instead of a cluster) to store the residual data.
3955 struct mbuf *m;
3957 m = (struct mbuf *)mp_list;
3970 /* A second mbuf for this segment chain */
3971 m->m_next = (struct mbuf *)mp_list;
4001 * [mbuf + cluster 1] [mbuf + cluster 2] ... [mbuf + cluster N]
4003 * Every composite mbuf + cluster element comes from the intermediate
4037 * Attempt to allocate composite mbuf + cluster elements for
4059 * Attempt to allocate the rest of the composite mbuf + cluster
4088 struct mbuf *m;
4096 m = (struct mbuf *)mp_list;
4099 m = (struct mbuf *)rmp_list;
4173 * Best effort to get a mbuf cluster + pkthdr. Used by drivers to allocated
4176 __private_extern__ struct mbuf *
4186 * Best effort to get a mbuf cluster + pkthdr. Used by drivers to allocated
4189 struct mbuf *
4199 * Return a list of mbuf hdrs that point to clusters. Try for num_needed;
4201 * first num_with_pkthdrs with mbuf hdrs configured as packet headers. These
4205 struct mbuf *
4215 * Return a list of mbuf hdrs set up as packet hdrs chained together
4218 struct mbuf *
4221 struct mbuf *m;
4222 struct mbuf **np, *top;
4240 * Free an mbuf list (m_nextpkt) while following m_next. Returns the count
4244 m_freem_list(struct mbuf *m)
4246 struct mbuf *nextpkt;
4265 struct mbuf *next = m->m_next;
4270 panic("m_free: freeing an already freed mbuf");
4408 m_freem(struct mbuf *m)
4420 * of data in an mbuf.
4423 m_leadingspace(struct mbuf *m)
4436 * Compute the amount of space available after the end of data in an mbuf.
4439 m_trailingspace(struct mbuf *m)
4451 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain,
4454 struct mbuf *
4455 m_prepend(struct mbuf *m, int len, int how)
4457 struct mbuf *mn;
4477 * Replacement for old M_PREPEND macro: allocate new mbuf to prepend to
4480 struct mbuf *
4481 m_prepend_2(struct mbuf *m, int len, int how)
4495 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
4496 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
4501 struct mbuf *
4502 m_copym_mode(struct mbuf *m, int off0, int len, int wait, uint32_t mode)
4504 struct mbuf *n, *mhdr = NULL, **np;
4506 struct mbuf *top;
4519 panic("m_copym: invalid mbuf chain");
4592 struct mbuf *
4593 m_copym(struct mbuf *m, int off0, int len, int wait)
4599 * Equivalent to m_copym except that all necessary mbuf hdrs are allocated
4600 * within this routine also, the last mbuf and offset accessed are passed
4601 * out and can be passed back in to avoid having to rescan the entire mbuf
4604 struct mbuf *
4605 m_copym_with_hdrs(struct mbuf *m, int off0, int len0, int wait,
4606 struct mbuf **m_lastm, int *m_off, uint32_t mode)
4608 struct mbuf *n, **np = NULL;
4610 struct mbuf *top = NULL;
4654 n = (struct mbuf *)list;
4733 * Copy data from an mbuf chain starting "off" bytes from the beginning,
4737 m_copydata(struct mbuf *m, int off, int len, void *vp)
4747 panic("m_copydata: invalid mbuf chain");
4755 panic("m_copydata: invalid mbuf chain");
4766 * Concatenate mbuf chain n to m. Both chains must be of the same type
4770 m_cat(struct mbuf *m, struct mbuf *n)
4790 m_adj(struct mbuf *mp, int req_len)
4793 struct mbuf *m;
4818 * Trim from tail. Scan the mbuf chain,
4819 * calculating its length and finding the last mbuf.
4820 * If the adjustment only affects this mbuf, then just
4828 if (m->m_next == (struct mbuf *)0)
4844 * Find the mbuf with last data, adjust its length,
4863 * Rearange an mbuf chain so that len bytes are contiguous
4864 * and in the data area of an mbuf (so that mtod and dtom
4866 * mbuf chain on success, frees it and returns null on failure.
4872 struct mbuf *
4873 m_pullup(struct mbuf *n, int len)
4875 struct mbuf *m;
4880 * If first mbuf has no cluster, and has room for len bytes
4882 * otherwise allocate a new mbuf to prepend to the chain.
4930 * Like m_pullup(), except a new mbuf is always allocated, and we allow
4931 * the amount of empty space before the data in the new mbuf to be specified
4936 __private_extern__ struct mbuf *
4937 m_copyup(struct mbuf *n, int len, int dstoff)
4939 struct mbuf *m;
4980 * Partition an mbuf chain in two pieces, returning the tail --
4984 struct mbuf *
4985 m_split(struct mbuf *m0, int len0, int wait)
4990 static struct mbuf *
4991 m_split0(struct mbuf *m0, int len0, int wait, int copyhdr)
4993 struct mbuf *m, *n;
5050 struct mbuf *
5054 struct mbuf *m;
5055 struct mbuf *top = NULL, **mp = &top;
5100 * Place initial small packet/header at end of mbuf.
5160 /* Bail if we've maxed out the mbuf memory map */
5248 * Return the number of bytes in the mbuf chain, m.
5251 m_length(struct mbuf *m)
5253 struct mbuf *m0;
5266 * Copy data from a buffer back into the indicated mbuf chain,
5267 * starting "off" bytes from the beginning, extending the mbuf
5271 m_copyback(struct mbuf *m0, int off, int len, const void *cp)
5274 struct mbuf *origm = m0;
5293 struct mbuf *
5294 m_copyback_cow(struct mbuf *m0, int off, int len, const void *cp, int how)
5318 m_makewritable(struct mbuf **mp, int off, int len, int how)
5322 struct mbuf *n;
5350 m_copyback0(struct mbuf **mp0, int off, int len, const void *vp, int flags,
5354 struct mbuf *m, *n;
5355 struct mbuf **mp;
5404 * need to allocate an mbuf.
5430 * this mbuf is read-only.
5431 * allocate a new writable mbuf and try again.
5441 * a mbuf, split it first.
5456 * the previous mbuf when possible.
5460 * allocate a new mbuf. copy packet header if needed.
5562 * Dup the mbuf chain passed in. The whole thing. No cute additional cruft.
5570 struct mbuf *
5571 m_dup(struct mbuf *m, int how)
5573 struct mbuf *n, **np;
5574 struct mbuf *top;
5583 * Quick check: if we have one mbuf and its data fits in an
5584 * mbuf with packet header, just copy and go.
5587 /* Then just move the data into an mbuf and be done... */
5667 static struct mbuf *
5668 m_expand(struct mbuf *m, struct mbuf **last)
5670 struct mbuf *top = NULL;
5671 struct mbuf **nm = &top;
5682 struct mbuf *n;
5722 struct mbuf *
5723 m_normalize(struct mbuf *m)
5725 struct mbuf *top = NULL;
5726 struct mbuf **nm = &top;
5730 struct mbuf *n;
5737 struct mbuf *last;
5759 * Append the specified data to the indicated mbuf chain,
5760 * Extend the mbuf chain if the new data does not fit in
5766 m_append(struct mbuf *m0, int len, caddr_t cp)
5768 struct mbuf *m, *n;
5787 * Allocate a new mbuf; could check space
5805 struct mbuf *
5806 m_last(struct mbuf *m)
5814 m_fixhdr(struct mbuf *m0)
5826 m_length2(struct mbuf *m0, struct mbuf **last)
5828 struct mbuf *m;
5843 * Defragment a mbuf chain, returning the shortest possible chain of mbufs
5848 * If a non-packet header is passed in, the original mbuf (chain?) will
5851 * If offset is specfied, the first mbuf in the chain will have a leading
5855 * mbuf chain is cleared by the caller.
5857 struct mbuf *
5858 m_defrag_offset(struct mbuf *m0, u_int32_t off, int how)
5860 struct mbuf *m_new = NULL, *m_final = NULL;
5886 * the original mbuf which will get freed upon success.
5926 struct mbuf *
5927 m_defrag(struct mbuf *m0, int how)
5933 m_mchtype(struct mbuf *m, int t)
5941 m_mtod(struct mbuf *m)
5946 struct mbuf *
5949 return ((struct mbuf *)((uintptr_t)(x) & ~(MSIZE-1)));
5953 m_mcheck(struct mbuf *m)
5959 * Return a pointer to mbuf/offset of location in mbuf chain.
5961 struct mbuf *
5962 m_getptr(struct mbuf *m, int loc, int *off)
6170 * Because we can run out of memory before filling the mbuf
6216 * hole in the kernel sub-map for the mbuf pool.
6425 * Given an address of a buffer (mbuf/2KB/4KB/16KB), return
6440 * For the mbuf case, find the index of the page
6441 * used by the mbuf and use that index to locate the
6443 * mbuf index relative to the page base and use
6478 struct mbuf *m = addr;
6488 /* Save constructed mbuf fields */
6502 /* Restore constructed mbuf fields */
6507 mcl_audit_restore_mbuf(struct mbuf *m, mcache_audit_t *mca, boolean_t composite)
6509 struct mbuf *ms = MCA_SAVED_MBUF_PTR(mca);
6512 struct mbuf *next = m->m_next;
6517 * We could have hand-picked the mbuf fields and restore
6520 * the mbuf layer will recheck and reinitialize anyway.
6526 * For a regular mbuf (no cluster attached) there's nothing
6536 mcl_audit_save_mbuf(struct mbuf *m, mcache_audit_t *mca)
6591 mcl_audit_mcheck_panic(struct mbuf *m)
6598 panic("mcl_audit: freed mbuf %p with type 0x%x (instead of 0x%x)\n%s\n",
6615 /* This function turns on mbuf leak detection */
6919 /* synchronize all statistics in the mbuf table */
6977 "<mbuf type %d>\n", mbstat.m_mtypes[i], i);
6985 k = snprintf(c, clen, "%u/%u mbuf 2KB clusters in use\n"
6986 "%u/%u mbuf 4KB clusters in use\n",
6994 k = snprintf(c, clen, "%u/%u mbuf %uKB clusters in use\n",
7013 /* mbuf leak detection statistics */
7091 * Convert between a regular and a packet header mbuf. Caller is responsible
7095 m_reinit(struct mbuf *m, int hdr)
7105 * mbuf appears to contain user data, we cannot
7106 * safely convert this to a packet header mbuf,
7110 printf("%s: cannot set M_PKTHDR on altered mbuf %p, "
7131 m_scratch_init(struct mbuf *m)
7139 m_scratch_get(struct mbuf *m, u_int8_t **p)
7158 m_redzone_init(struct mbuf *m)
7162 * Each mbuf has a unique red zone pattern, which is a XOR
7163 * of the red zone cookie and the address of the mbuf.
7169 m_redzone_verify(struct mbuf *m)
7177 panic("mbuf %p redzone violation with value 0x%x "