• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/bsd/kern/

Lines Matching refs:mbuf

73 #include <sys/mbuf.h>
108 * object represents an mbuf structure. This cache preserves only
109 * the m_type field of the mbuf during its transactions.
124 * fields of the mbuf related to the external cluster are preserved
130 * fields of the mbuf related to the external cluster are preserved
141 * for all of the caches as the mbuf global lock is held most of the time.
275 * | mbuf addr | | mclaudit[i] |
319 static unsigned int mbuf_debug; /* patchable mbuf mcache flags */
323 MC_MBUF = 0, /* Regular mbuf */
327 MC_MBUF_CL, /* mbuf + cluster */
328 MC_MBUF_BIGCL, /* mbuf + large (4K) cluster */
329 MC_MBUF_16KCL /* mbuf + jumbo (16K) cluster */
342 * mbuf specific mcache allocation request flags.
361 * mbuf cluster slab can be converted to a regular cluster slab when all
371 u_int8_t sl_class; /* controlling mbuf class */
409 * Size of data from the beginning of an mbuf that covers m_hdr, pkthdr
411 * mbuf structure of this size inside each audit structure, and the
412 * contents of the real mbuf gets copied into it when the mbuf is freed.
413 * This allows us to pattern-fill the mbuf for integrity check, and to
414 * preserve any constructed mbuf fields (e.g. mbuf + cluster cache case).
425 * mbuf specific mcache audit flags
511 * in the cache is an mbuf with a cluster attached to it. Unlike
534 static inline void m_incref(struct mbuf *);
535 static inline u_int32_t m_decref(struct mbuf *);
566 static void mcl_audit_restore_mbuf(struct mbuf *, mcache_audit_t *, boolean_t);
567 static void mcl_audit_save_mbuf(struct mbuf *, mcache_audit_t *);
568 static void mcl_audit_mcheck_panic(struct mbuf *);
583 * mbuf + cluster caches, i.e. MC_MBUF_CL and MC_MBUF_BIGCL. mbufs that
586 * into the mbuf and cluster freelists, the composite mbuf + cluster objects
601 * Macros used to verify the integrity of the mbuf.
633 * Macro to find the mbuf index relative to the cluster base.
638 * Macros used during mbuf and cluster initialization.
691 * The structure that holds all mbuf class statistics exportable via sysctl.
693 * global mbuf lock. It contains additional information about the classes
702 * The legacy structure holding all of the mbuf allocation statistics.
704 * instead, and are updated atomically while the global mbuf lock is held.
718 * Allocation statistics related to mbuf types (up to MT_MAX-1) are updated
862 m_incref(struct mbuf *m)
875 m_decref(struct mbuf *m)
904 * Set aside 1/3 of the mbuf cluster map for jumbo clusters; we do
950 (void) snprintf(m_cname(MC_MBUF), MAX_MBUF_CNAME, "mbuf");
1012 /* Setup the mbuf table */
1017 mbuf_mlock_grp = lck_grp_alloc_init("mbuf", mbuf_mlock_grp_attr);
1136 * more than one buffer chunks (e.g. mbuf slabs). For other
1176 /* Save contents on mbuf objects only */
1238 _MCHECK((struct mbuf *)buf);
1241 * an mbuf slab (formerly a 2K cluster slab that was cut
1332 * An mbuf slab has a total of NMBPL reference counts.
1377 struct mbuf *m = sp->sl_head;
1384 /* Remove the slab from the mbuf class's slab list */
1603 struct mbuf *m;
1615 m = (struct mbuf *)*list;
1662 struct mbuf *m, *ms;
1674 while ((m = ms = (struct mbuf *)o) != NULL) {
1677 /* Do the mbuf sanity checks */
1681 ms = (struct mbuf *)mca->mca_contents;
1719 * If we're asked to purge, restore the actual mbuf using
1721 * and clear EXTF_COMPOSITE flag from the mbuf, as we are
1725 /* Restore constructed mbuf fields */
1741 /* Save mbuf fields and make auditing happy */
1748 /* Free the mbuf */
1784 * into composite mbuf + cluster objects.
1798 struct mbuf *m;
1834 * with MCR_FAILOK such that we don't end up sleeping at the mbuf
1865 * By this time "needed" is MIN(mbuf, cluster, ref). Any left
1869 struct mbuf *ms;
1871 m = ms = (struct mbuf *)mp_list;
1883 * If auditing is enabled, construct the shadow mbuf
1894 ms = ((struct mbuf *)mca->mca_contents);
1899 * the mbuf+cluster objects are constructed. This
1903 * be freed along with the mbuf it was paired with.
2005 struct mbuf *m, *ms;
2012 while ((m = ms = (struct mbuf *)list) != NULL) {
2014 /* Do the mbuf sanity checks and record its transaction */
2024 * Use the shadow mbuf in the audit structure if we are
2025 * freeing, since the contents of the actual mbuf has been
2029 ms = (struct mbuf *)mca->mca_contents;
2073 * Allocate some number of mbuf clusters and place on cluster freelist.
2111 * to grow the pool asynchronously using the mbuf worker thread.
2148 * mbuf lock and the caller is okay to be blocked. For
2430 struct mbuf *m = (struct mbuf *)o;
2443 /* Reinitialize it as an mbuf slab */
2447 VERIFY(m == (struct mbuf *)sp->sl_base);
2458 * If auditing is enabled, construct the shadow mbuf
2464 struct mbuf *ms;
2467 ms = ((struct mbuf *)mca->mca_contents);
2476 /* Insert it into the mbuf class's slab list */
2628 * from the freelist of other mbuf classes. Only
2664 static inline struct mbuf *
2667 struct mbuf *m;
2700 struct mbuf *
2706 struct mbuf *
2712 struct mbuf *
2718 struct mbuf *
2724 struct mbuf *
2727 struct mbuf *m;
2735 struct mbuf *
2736 m_free(struct mbuf *m)
2738 struct mbuf *n = m->m_next;
2741 panic("m_free: freeing an already freed mbuf");
2807 __private_extern__ struct mbuf *
2808 m_clattach(struct mbuf *m, int type, caddr_t extbuf,
2858 * Allocate a new mbuf, since we didn't divorce
2859 * the composite mbuf + cluster pair above.
2877 /* m_mclget() add an mbuf cluster to a normal mbuf */
2878 struct mbuf *
2879 m_mclget(struct mbuf *m, int wait)
2895 /* Allocate an mbuf cluster */
2908 /* Free an mbuf cluster */
2916 * mcl_hasreference() checks if a cluster of an mbuf is referenced by
2917 * another mbuf
2920 m_mclhasreference(struct mbuf *m)
2948 /* m_mbigget() add an 4KB mbuf cluster to a normal mbuf */
2949 __private_extern__ struct mbuf *
2950 m_mbigget(struct mbuf *m, int wait)
2984 /* m_m16kget() add a 16KB mbuf cluster to a normal mbuf */
2985 __private_extern__ struct mbuf *
2986 m_m16kget(struct mbuf *m, int wait)
3004 m_copy_pkthdr(struct mbuf *to, struct mbuf *from)
3019 * Duplicate "from"'s mbuf pkthdr in "to".
3024 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
3039 * Return a list of mbuf hdrs that point to clusters. Try for num_needed;
3041 * first num_with_pkthdrs with mbuf hdrs configured as packet headers; these
3046 __private_extern__ struct mbuf *
3050 struct mbuf *m;
3051 struct mbuf **np, *top;
3083 /* Allocate the composite mbuf + cluster elements from the cache */
3093 m = (struct mbuf *)mp_list;
3150 * Return list of mbuf linked by m_nextpkt. Try for numlist, and if
3152 * each mbuf in the list is controlled by the parameter packetlen. Each
3153 * mbuf of the list may have a chain of mbufs linked by m_next. Each mbuf
3158 * The actual number of segments of a mbuf chain is return in the value
3161 __private_extern__ struct mbuf *
3165 struct mbuf **np, *top, *first = NULL;
3237 * up of exactly 1 mbuf. Otherwise, each segment chain is made up
3239 * the remaining data that cannot fit into the first mbuf.
3242 /* Allocate the elements in one shot from the mbuf cache */
3250 * mbuf (instead of a cluster) to store the residual data.
3259 struct mbuf *m;
3261 m = (struct mbuf *)mp_list;
3274 /* A second mbuf for this segment chain */
3275 m->m_next = (struct mbuf *)mp_list;
3305 * [mbuf + cluster 1] [mbuf + cluster 2] ... [mbuf + cluster N]
3307 * Every composite mbuf + cluster element comes from the intermediate
3341 * Attempt to allocate composite mbuf + cluster elements for
3363 * Attempt to allocate the rest of the composite mbuf + cluster
3392 struct mbuf *m;
3400 m = (struct mbuf *)mp_list;
3403 m = (struct mbuf *)rmp_list;
3477 * Best effort to get a mbuf cluster + pkthdr. Used by drivers to allocated
3480 __private_extern__ struct mbuf *
3490 * Best effort to get a mbuf cluster + pkthdr. Used by drivers to allocated
3493 struct mbuf *
3503 * Return a list of mbuf hdrs that point to clusters. Try for num_needed;
3505 * first num_with_pkthdrs with mbuf hdrs configured as packet headers. These
3509 struct mbuf *
3519 * Return a list of mbuf hdrs set up as packet hdrs chained together
3522 struct mbuf *
3525 struct mbuf *m;
3526 struct mbuf **np, *top;
3544 * Free an mbuf list (m_nextpkt) while following m_next. Returns the count
3548 m_freem_list(struct mbuf *m)
3550 struct mbuf *nextpkt;
3569 struct mbuf *next = m->m_next;
3574 panic("m_free: freeing an already freed mbuf");
3707 m_freem(struct mbuf *m)
3719 * of data in an mbuf.
3722 m_leadingspace(struct mbuf *m)
3735 * Compute the amount of space available after the end of data in an mbuf.
3738 m_trailingspace(struct mbuf *m)
3750 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain,
3753 struct mbuf *
3754 m_prepend(struct mbuf *m, int len, int how)
3756 struct mbuf *mn;
3776 * Replacement for old M_PREPEND macro: allocate new mbuf to prepend to
3779 struct mbuf *
3780 m_prepend_2(struct mbuf *m, int len, int how)
3794 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
3795 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
3800 struct mbuf *
3801 m_copym(struct mbuf *m, int off0, int len, int wait)
3803 struct mbuf *n, *mhdr = NULL, **np;
3805 struct mbuf *top;
3818 panic("m_copym: invalid mbuf chain");
3886 * Equivalent to m_copym except that all necessary mbuf hdrs are allocated
3887 * within this routine also, the last mbuf and offset accessed are passed
3888 * out and can be passed back in to avoid having to rescan the entire mbuf
3891 struct mbuf *
3892 m_copym_with_hdrs(struct mbuf *m, int off0, int len0, int wait,
3893 struct mbuf **m_last, int *m_off)
3895 struct mbuf *n, **np = NULL;
3897 struct mbuf *top = NULL;
3941 n = (struct mbuf *)list;
4015 * Copy data from an mbuf chain starting "off" bytes from the beginning,
4019 m_copydata(struct mbuf *m, int off, int len, caddr_t cp)
4028 panic("m_copydata: invalid mbuf chain");
4036 panic("m_copydata: invalid mbuf chain");
4047 * Concatenate mbuf chain n to m. Both chains must be of the same type
4051 m_cat(struct mbuf *m, struct mbuf *n)
4071 m_adj(struct mbuf *mp, int req_len)
4074 struct mbuf *m;
4099 * Trim from tail. Scan the mbuf chain,
4100 * calculating its length and finding the last mbuf.
4101 * If the adjustment only affects this mbuf, then just
4109 if (m->m_next == (struct mbuf *)0)
4125 * Find the mbuf with last data, adjust its length,
4144 * Rearange an mbuf chain so that len bytes are contiguous
4145 * and in the data area of an mbuf (so that mtod and dtom
4147 * mbuf chain on success, frees it and returns null on failure.
4153 struct mbuf *
4154 m_pullup(struct mbuf *n, int len)
4156 struct mbuf *m;
4161 * If first mbuf has no cluster, and has room for len bytes
4163 * otherwise allocate a new mbuf to prepend to the chain.
4211 * Partition an mbuf chain in two pieces, returning the tail --
4215 struct mbuf *
4216 m_split(struct mbuf *m0, int len0, int wait)
4218 struct mbuf *m, *n;
4275 struct mbuf *
4279 struct mbuf *m;
4280 struct mbuf *top = NULL, **mp = &top;
4325 * Place initial small packet/header at end of mbuf.
4369 /* Bail if we've maxed out the mbuf memory map */
4441 * Copy data from a buffer back into the indicated mbuf chain,
4442 * starting "off" bytes from the beginning, extending the mbuf
4446 m_copyback(struct mbuf *m0, int off, int len, caddr_t cp)
4449 struct mbuf *m = m0, *n;
4505 * Dup the mbuf chain passed in. The whole thing. No cute additional cruft.
4513 struct mbuf *
4514 m_dup(struct mbuf *m, int how)
4516 struct mbuf *n, **np;
4517 struct mbuf *top;
4526 * Quick check: if we have one mbuf and its data fits in an
4527 * mbuf with packet header, just copy and go.
4530 /* Then just move the data into an mbuf and be done... */
4610 static struct mbuf *
4611 m_expand(struct mbuf *m, struct mbuf **last)
4613 struct mbuf *top = NULL;
4614 struct mbuf **nm = &top;
4625 struct mbuf *n;
4665 struct mbuf *
4666 m_normalize(struct mbuf *m)
4668 struct mbuf *top = NULL;
4669 struct mbuf **nm = &top;
4673 struct mbuf *n;
4680 struct mbuf *last;
4702 m_mchtype(struct mbuf *m, int t)
4710 m_mtod(struct mbuf *m)
4715 struct mbuf *
4718 return ((struct mbuf *)((u_long)(x) & ~(MSIZE-1)));
4722 m_mcheck(struct mbuf *m)
4878 * Because we can run out of memory before filling the mbuf
4924 * hole in the kernel sub-map for the mbuf pool.
5138 * Given an address of a buffer (mbuf/cluster/big cluster), return
5152 * For the mbuf case, find the index of the cluster
5153 * used by the mbuf and use that index to locate the
5155 * mbuf index relative to the cluster base and use
5183 struct mbuf *m = addr;
5192 /* Save constructed mbuf fields */
5202 /* Restore constructed mbuf fields */
5207 mcl_audit_restore_mbuf(struct mbuf *m, mcache_audit_t *mca, boolean_t composite)
5209 struct mbuf *ms = (struct mbuf *)mca->mca_contents;
5212 struct mbuf *next = m->m_next;
5216 * We could have hand-picked the mbuf fields and restore
5219 * the mbuf layer will recheck and reinitialize anyway.
5225 * For a regular mbuf (no cluster attached) there's nothing
5235 mcl_audit_save_mbuf(struct mbuf *m, mcache_audit_t *mca)
5261 mcl_audit_mcheck_panic(struct mbuf *m)
5268 panic("mcl_audit: freed mbuf %p with type 0x%x (instead of 0x%x)\n%s\n",