Lines Matching refs:mbuf

73 #include <sys/mbuf.h>
110 * object represents an mbuf structure. This cache preserves only
111 * the m_type field of the mbuf during its transactions.
126 * fields of the mbuf related to the external cluster are preserved
132 * fields of the mbuf related to the external cluster are preserved
143 * for all of the caches as the mbuf global lock is held most of the time.
278 * | mbuf addr | | mclaudit[i] |
324 static unsigned int mbuf_debug; /* patchable mbuf mcache flags */
331 MC_MBUF = 0, /* Regular mbuf */
335 MC_MBUF_CL, /* mbuf + cluster */
336 MC_MBUF_BIGCL, /* mbuf + large (4KB) cluster */
337 MC_MBUF_16KCL /* mbuf + jumbo (16KB) cluster */
350 * mbuf specific mcache allocation request flags.
369 * mbuf cluster slab can be converted to a regular cluster slab when all
381 u_int8_t sl_class; /* controlling mbuf class */
420 * Size of data from the beginning of an mbuf that covers m_hdr, pkthdr
422 * mbuf structure of this size inside each audit structure, and the
423 * contents of the real mbuf gets copied into it when the mbuf is freed.
424 * This allows us to pattern-fill the mbuf for integrity check, and to
425 * preserve any constructed mbuf fields (e.g. mbuf + cluster cache case).
432 * mbuf specific mcache audit flags
463 /* mbuf leak detection variables */
593 * in the cache is an mbuf with a cluster attached to it. Unlike
615 * mbuf watchdog is enabled by default on embedded platforms. It is
637 static inline void m_incref(struct mbuf *);
638 static inline u_int32_t m_decref(struct mbuf *);
671 static void mcl_audit_restore_mbuf(struct mbuf *, mcache_audit_t *, boolean_t);
672 static void mcl_audit_save_mbuf(struct mbuf *, mcache_audit_t *);
673 static void mcl_audit_mcheck_panic(struct mbuf *);
693 static int m_copyback0(struct mbuf **, int, int, const void *, int, int);
694 static struct mbuf *m_split0(struct mbuf *, int, int, int);
704 * mbuf + cluster caches, i.e. MC_MBUF_CL and MC_MBUF_BIGCL. mbufs that
707 * into the mbuf and cluster freelists, the composite mbuf + cluster objects
730 * Macros used to verify the integrity of the mbuf.
763 * Macro to find the mbuf index relative to a base.
773 * Macros used during mbuf and cluster initialization.
829 * The structure that holds all mbuf class statistics exportable via sysctl.
831 * global mbuf lock. It contains additional information about the classes
843 * The legacy structure holding all of the mbuf allocation statistics.
845 * instead, and are updated atomically while the global mbuf lock is held.
859 * Allocation statistics related to mbuf types (up to MT_MAX-1) are updated
1091 m_incref(struct mbuf *m)
1112 m_decref(struct mbuf *m)
1146 * Set aside 1/3 of the mbuf cluster map for jumbo clusters; we do
1210 m_maxlimit(MC_MBUF) = (s << NMBPCLSHIFT); /* in mbuf unit */
1212 (void) snprintf(m_cname(MC_MBUF), MAX_MBUF_CNAME, "mbuf");
1258 uint32_t nt_mbpool; /* mbuf pool size */
1288 * 32-bit kernel (default to 64MB of mbuf pool for >= 1GB RAM).
1296 * 64-bit kernel (mbuf pool size based on table).
1391 /* Setup the mbuf table */
1396 mbuf_mlock_grp = lck_grp_alloc_init("mbuf", mbuf_mlock_grp_attr);
1438 /* Enable mbuf leak logging, with a lock to protect the tables */
1550 * memory allocated for mbuf clusters.
1556 /* We have atleast 16 M of mbuf pool */
1599 * more than one buffer chunks (e.g. mbuf slabs). For other
1642 /* Save contents on mbuf objects only */
1696 _MCHECK((struct mbuf *)buf);
1699 * an mbuf slab (formerly a 4KB cluster slab that was cut
1843 struct mbuf *m = sp->sl_head;
1850 /* Remove the slab from the mbuf class's slab list */
2113 struct mbuf *m;
2125 m = (struct mbuf *)*list;
2173 struct mbuf *m, *ms;
2195 while ((m = ms = (struct mbuf *)o) != NULL) {
2198 /* Do the mbuf sanity checks */
2205 ms = (struct mbuf *)mca->mca_contents;
2236 * If we're asked to purge, restore the actual mbuf using
2238 * and clear EXTF_COMPOSITE flag from the mbuf, as we are
2242 /* Restore constructed mbuf fields */
2258 /* Save mbuf fields and make auditing happy */
2265 /* Free the mbuf */
2301 * into composite mbuf + cluster objects.
2315 struct mbuf *m;
2351 * with MCR_FAILOK such that we don't end up sleeping at the mbuf
2388 * By this time "needed" is MIN(mbuf, cluster, ref). Any left
2392 struct mbuf *ms;
2394 m = ms = (struct mbuf *)mp_list;
2406 * If auditing is enabled, construct the shadow mbuf
2416 ms = ((struct mbuf *)mca->mca_contents);
2421 * the mbuf+cluster objects are constructed. This
2425 * be freed along with the mbuf it was paired with.
2534 struct mbuf *m, *ms;
2541 while ((m = ms = (struct mbuf *)list) != NULL) {
2543 /* Do the mbuf sanity checks and record its transaction */
2555 * Use the shadow mbuf in the audit structure if we are
2556 * freeing, since the contents of the actual mbuf has been
2560 ms = (struct mbuf *)mca->mca_contents;
2605 * Allocate some number of mbuf clusters and place on cluster freelist.
2643 * to grow the pool asynchronously using the mbuf worker thread.
2688 * mbuf lock and the caller is okay to be blocked.
2908 struct mbuf *m = (struct mbuf *)o;
2926 /* Reinitialize it as an mbuf or 2K slab */
2950 * shadow mbuf in the audit structure
2957 struct mbuf *ms;
2960 ms = ((struct mbuf *)
2979 /* Insert into the mbuf or 2k slab list */
3152 * from the freelist of other mbuf classes. Only
3188 static inline struct mbuf *
3191 struct mbuf *m;
3224 struct mbuf *
3230 struct mbuf *
3236 struct mbuf *
3242 struct mbuf *
3248 struct mbuf *
3251 struct mbuf *m;
3259 struct mbuf *
3260 m_free(struct mbuf *m)
3262 struct mbuf *n = m->m_next;
3265 panic("m_free: freeing an already freed mbuf");
3333 __private_extern__ struct mbuf *
3334 m_clattach(struct mbuf *m, int type, caddr_t extbuf,
3387 * Allocate a new mbuf, since we didn't divorce
3388 * the composite mbuf + cluster pair above.
3407 * Perform `fast' allocation mbuf clusters from a cache of recently-freed
3410 struct mbuf *
3413 struct mbuf *m;
3451 /* m_mclget() add an mbuf cluster to a normal mbuf */
3452 struct mbuf *
3453 m_mclget(struct mbuf *m, int wait)
3469 /* Allocate an mbuf cluster */
3482 /* Free an mbuf cluster */
3490 * mcl_hasreference() checks if a cluster of an mbuf is referenced by
3491 * another mbuf; see comments in m_incref() regarding EXTF_READONLY.
3494 m_mclhasreference(struct mbuf *m)
3522 /* m_mbigget() add an 4KB mbuf cluster to a normal mbuf */
3523 __private_extern__ struct mbuf *
3524 m_mbigget(struct mbuf *m, int wait)
3558 /* m_m16kget() add a 16KB mbuf cluster to a normal mbuf */
3559 __private_extern__ struct mbuf *
3560 m_m16kget(struct mbuf *m, int wait)
3577 * "Move" mbuf pkthdr from "from" to "to".
3581 m_copy_pkthdr(struct mbuf *to, struct mbuf *from)
3596 * Duplicate "from"'s mbuf pkthdr in "to".
3601 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
3614 m_copy_pftag(struct mbuf *to, struct mbuf *from)
3622 * Return a list of mbuf hdrs that point to clusters. Try for num_needed;
3624 * first num_with_pkthdrs with mbuf hdrs configured as packet headers; these
3629 __private_extern__ struct mbuf *
3633 struct mbuf *m;
3634 struct mbuf **np, *top;
3666 /* Allocate the composite mbuf + cluster elements from the cache */
3676 m = (struct mbuf *)mp_list;
3739 * Return list of mbuf linked by m_nextpkt. Try for numlist, and if
3741 * each mbuf in the list is controlled by the parameter packetlen. Each
3742 * mbuf of the list may have a chain of mbufs linked by m_next. Each mbuf
3747 * The actual number of segments of a mbuf chain is return in the value
3750 __private_extern__ struct mbuf *
3754 struct mbuf **np, *top, *first = NULL;
3826 * up of exactly 1 mbuf. Otherwise, each segment chain is made up
3828 * the remaining data that cannot fit into the first mbuf.
3831 /* Allocate the elements in one shot from the mbuf cache */
3839 * mbuf (instead of a cluster) to store the residual data.
3848 struct mbuf *m;
3850 m = (struct mbuf *)mp_list;
3863 /* A second mbuf for this segment chain */
3864 m->m_next = (struct mbuf *)mp_list;
3894 * [mbuf + cluster 1] [mbuf + cluster 2] ... [mbuf + cluster N]
3896 * Every composite mbuf + cluster element comes from the intermediate
3930 * Attempt to allocate composite mbuf + cluster elements for
3952 * Attempt to allocate the rest of the composite mbuf + cluster
3981 struct mbuf *m;
3989 m = (struct mbuf *)mp_list;
3992 m = (struct mbuf *)rmp_list;
4066 * Best effort to get a mbuf cluster + pkthdr. Used by drivers to allocated
4069 __private_extern__ struct mbuf *
4079 * Best effort to get a mbuf cluster + pkthdr. Used by drivers to allocated
4082 struct mbuf *
4092 * Return a list of mbuf hdrs that point to clusters. Try for num_needed;
4094 * first num_with_pkthdrs with mbuf hdrs configured as packet headers. These
4098 struct mbuf *
4108 * Return a list of mbuf hdrs set up as packet hdrs chained together
4111 struct mbuf *
4114 struct mbuf *m;
4115 struct mbuf **np, *top;
4133 * Free an mbuf list (m_nextpkt) while following m_next. Returns the count
4137 m_freem_list(struct mbuf *m)
4139 struct mbuf *nextpkt;
4158 struct mbuf *next = m->m_next;
4163 panic("m_free: freeing an already freed mbuf");
4298 m_freem(struct mbuf *m)
4310 * of data in an mbuf.
4313 m_leadingspace(struct mbuf *m)
4326 * Compute the amount of space available after the end of data in an mbuf.
4329 m_trailingspace(struct mbuf *m)
4341 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain,
4344 struct mbuf *
4345 m_prepend(struct mbuf *m, int len, int how)
4347 struct mbuf *mn;
4367 * Replacement for old M_PREPEND macro: allocate new mbuf to prepend to
4370 struct mbuf *
4371 m_prepend_2(struct mbuf *m, int len, int how)
4385 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
4386 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
4391 struct mbuf *
4392 m_copym(struct mbuf *m, int off0, int len, int wait)
4394 struct mbuf *n, *mhdr = NULL, **np;
4396 struct mbuf *top;
4409 panic("m_copym: invalid mbuf chain");
4477 * Equivalent to m_copym except that all necessary mbuf hdrs are allocated
4478 * within this routine also, the last mbuf and offset accessed are passed
4479 * out and can be passed back in to avoid having to rescan the entire mbuf
4482 struct mbuf *
4483 m_copym_with_hdrs(struct mbuf *m, int off0, int len0, int wait,
4484 struct mbuf **m_lastm, int *m_off)
4486 struct mbuf *n, **np = NULL;
4488 struct mbuf *top = NULL;
4532 n = (struct mbuf *)list;
4606 * Copy data from an mbuf chain starting "off" bytes from the beginning,
4610 m_copydata(struct mbuf *m, int off, int len, void *vp)
4620 panic("m_copydata: invalid mbuf chain");
4628 panic("m_copydata: invalid mbuf chain");
4639 * Concatenate mbuf chain n to m. Both chains must be of the same type
4643 m_cat(struct mbuf *m, struct mbuf *n)
4663 m_adj(struct mbuf *mp, int req_len)
4666 struct mbuf *m;
4691 * Trim from tail. Scan the mbuf chain,
4692 * calculating its length and finding the last mbuf.
4693 * If the adjustment only affects this mbuf, then just
4701 if (m->m_next == (struct mbuf *)0)
4717 * Find the mbuf with last data, adjust its length,
4736 * Rearange an mbuf chain so that len bytes are contiguous
4737 * and in the data area of an mbuf (so that mtod and dtom
4739 * mbuf chain on success, frees it and returns null on failure.
4745 struct mbuf *
4746 m_pullup(struct mbuf *n, int len)
4748 struct mbuf *m;
4753 * If first mbuf has no cluster, and has room for len bytes
4755 * otherwise allocate a new mbuf to prepend to the chain.
4803 * Like m_pullup(), except a new mbuf is always allocated, and we allow
4804 * the amount of empty space before the data in the new mbuf to be specified
4809 __private_extern__ struct mbuf *
4810 m_copyup(struct mbuf *n, int len, int dstoff)
4812 struct mbuf *m;
4853 * Partition an mbuf chain in two pieces, returning the tail --
4857 struct mbuf *
4858 m_split(struct mbuf *m0, int len0, int wait)
4863 static struct mbuf *
4864 m_split0(struct mbuf *m0, int len0, int wait, int copyhdr)
4866 struct mbuf *m, *n;
4923 struct mbuf *
4927 struct mbuf *m;
4928 struct mbuf *top = NULL, **mp = &top;
4973 * Place initial small packet/header at end of mbuf.
5033 /* Bail if we've maxed out the mbuf memory map */
5121 * Return the number of bytes in the mbuf chain, m.
5124 m_length(struct mbuf *m)
5126 struct mbuf *m0;
5139 * Copy data from a buffer back into the indicated mbuf chain,
5140 * starting "off" bytes from the beginning, extending the mbuf
5144 m_copyback(struct mbuf *m0, int off, int len, const void *cp)
5147 struct mbuf *origm = m0;
5166 struct mbuf *
5167 m_copyback_cow(struct mbuf *m0, int off, int len, const void *cp, int how)
5191 m_makewritable(struct mbuf **mp, int off, int len, int how)
5195 struct mbuf *n;
5223 m_copyback0(struct mbuf **mp0, int off, int len, const void *vp, int flags,
5227 struct mbuf *m, *n;
5228 struct mbuf **mp;
5277 * need to allocate an mbuf.
5303 * this mbuf is read-only.
5304 * allocate a new writable mbuf and try again.
5314 * a mbuf, split it first.
5329 * the previous mbuf when possible.
5333 * allocate a new mbuf. copy packet header if needed.
5435 * Dup the mbuf chain passed in. The whole thing. No cute additional cruft.
5443 struct mbuf *
5444 m_dup(struct mbuf *m, int how)
5446 struct mbuf *n, **np;
5447 struct mbuf *top;
5456 * Quick check: if we have one mbuf and its data fits in an
5457 * mbuf with packet header, just copy and go.
5460 /* Then just move the data into an mbuf and be done... */
5540 static struct mbuf *
5541 m_expand(struct mbuf *m, struct mbuf **last)
5543 struct mbuf *top = NULL;
5544 struct mbuf **nm = &top;
5555 struct mbuf *n;
5595 struct mbuf *
5596 m_normalize(struct mbuf *m)
5598 struct mbuf *top = NULL;
5599 struct mbuf **nm = &top;
5603 struct mbuf *n;
5610 struct mbuf *last;
5632 * Append the specified data to the indicated mbuf chain,
5633 * Extend the mbuf chain if the new data does not fit in
5639 m_append(struct mbuf *m0, int len, caddr_t cp)
5641 struct mbuf *m, *n;
5660 * Allocate a new mbuf; could check space
5678 struct mbuf *
5679 m_last(struct mbuf *m)
5687 m_fixhdr(struct mbuf *m0)
5697 m_length2(struct mbuf *m0, struct mbuf **last)
5699 struct mbuf *m;
5714 * Defragment a mbuf chain, returning the shortest possible chain of mbufs
5719 * If a non-packet header is passed in, the original mbuf (chain?) will
5722 * If offset is specfied, the first mbuf in the chain will have a leading
5726 * mbuf chain is cleared by the caller.
5728 struct mbuf *
5729 m_defrag_offset(struct mbuf *m0, u_int32_t off, int how)
5731 struct mbuf *m_new = NULL, *m_final = NULL;
5758 * the original mbuf which will get freed upon success.
5797 struct mbuf *
5798 m_defrag(struct mbuf *m0, int how)
5804 m_mchtype(struct mbuf *m, int t)
5812 m_mtod(struct mbuf *m)
5817 struct mbuf *
5820 return ((struct mbuf *)((uintptr_t)(x) & ~(MSIZE-1)));
5824 m_mcheck(struct mbuf *m)
5830 * Return a pointer to mbuf/offset of location in mbuf chain.
5832 struct mbuf *
5833 m_getptr(struct mbuf *m, int loc, int *off)
6041 * Because we can run out of memory before filling the mbuf
6087 * hole in the kernel sub-map for the mbuf pool.
6291 * Given an address of a buffer (mbuf/2KB/4KB/16KB), return
6306 * For the mbuf case, find the index of the page
6307 * used by the mbuf and use that index to locate the
6309 * mbuf index relative to the page base and use
6344 struct mbuf *m = addr;
6354 /* Save constructed mbuf fields */
6368 /* Restore constructed mbuf fields */
6373 mcl_audit_restore_mbuf(struct mbuf *m, mcache_audit_t *mca, boolean_t composite)
6375 struct mbuf *ms = (struct mbuf *)mca->mca_contents;
6378 struct mbuf *next = m->m_next;
6382 * We could have hand-picked the mbuf fields and restore
6385 * the mbuf layer will recheck and reinitialize anyway.
6391 * For a regular mbuf (no cluster attached) there's nothing
6401 mcl_audit_save_mbuf(struct mbuf *m, mcache_audit_t *mca)
6429 mcl_audit_mcheck_panic(struct mbuf *m)
6436 panic("mcl_audit: freed mbuf %p with type 0x%x (instead of 0x%x)\n%s\n",
6453 /* This function turns on mbuf leak detection */
6757 /* synchronize all statistics in the mbuf table */
6815 "<mbuf type %d>\n", mbstat.m_mtypes[i], i);
6823 k = snprintf(c, clen, "%u/%u mbuf 2KB clusters in use\n"
6824 "%u/%u mbuf 4KB clusters in use\n",
6832 k = snprintf(c, clen, "%u/%u mbuf %uKB clusters in use\n",
6851 /* mbuf leak detection statistics */