• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.10.1/xnu-2782.1.97/bsd/kern/

Lines Matching +defs:from +defs:scratch +defs:flag

46  *    may be used to endorse or promote products derived from this software
271 * the CPU layer cache can be disabled by setting the MCF_NOCPUCACHE flag,
273 * detection may also be disabled by setting the MCF_NOLEAKLOG flag, e.g.
299 * remaining entries unused. For 16KB cluster, only one entry from the first
365 * gets removed (SLF_DETACHED) from the class's slab list. A chunk that is
372 * well as to convert or move a slab from one class to another; e.g. the
402 * whenever a new piece of memory mapped in from the VM crosses the 1MB
436 * Size of data from the beginning of an mbuf that covers m_hdr,
554 /* Lock to protect mleak tables from concurrent modification */
745 #define M_COPYBACK0_COPYBACK 0x0001 /* copyback from cp */
751 * This flag is set for all mbufs that come out of and into the composite
753 * are marked with such a flag have clusters attached to them, and will be
758 * such a time, this flag will be cleared from the mbufs and the objects
764 * This flag indicates that the external cluster is read-only, i.e. it is
765 * or was referred to by more than one mbufs. Once set, this flag is never
851 #define MEXT_INIT(m, buf, size, free, arg, rfa, ref, flag) { \
861 MEXT_FLAGS(m) = (flag); \
864 #define MBUF_CL_INIT(m, buf, rfa, ref, flag) \
865 MEXT_INIT(m, buf, m_maxsize(MC_CL), NULL, NULL, rfa, ref, flag)
867 #define MBUF_BIGCL_INIT(m, buf, rfa, ref, flag) \
868 MEXT_INIT(m, buf, m_maxsize(MC_BIGCL), m_bigfree, NULL, rfa, ref, flag)
870 #define MBUF_16KCL_INIT(m, buf, rfa, ref, flag) \
871 MEXT_INIT(m, buf, m_maxsize(MC_16KCL), m_16kfree, NULL, rfa, ref, flag)
874 * Macro to convert BSD malloc sleep flag to mcache's
1155 * we don't clear the flag when the refcount goes back to 1
1435 /* Module specific scratch space (32-bit alignment requirement) */
1549 /* populate the initial sizes and report from there on */
1652 * Obtain a slab of object(s) from the class's freelist.
1669 * a slab from the reverse direction, in hoping that this could
1781 /* If empty, remove this slab from the class's freelist */
1922 /* Remove the slab from the mbuf class's slab list */
1960 /* Remove the slab from the 2KB cluster class's slab list */
1987 * bucket layer. It returns one or more elements from the appropriate global
2107 * after the objects are obtained from either the bucket or slab layer
2178 * Obtain object(s) from the composite class's freelist.
2193 /* Get what we can from the freelist */
2310 * and clear EXTF_COMPOSITE flag from the mbuf, as we are
2370 * the bucket layer. It returns one or more composite elements from the
2372 * to obtain the rudimentary objects from their caches and construct them
2415 * allocate from the appropriate rudimentary caches and use
2421 * Mark these allocation requests as coming from a composite cache.
2596 * after the objects are obtained from either the bucket or slab layer
3231 * from the freelist of other mbuf classes. Only
3258 * Request mcache to reap extra elements from all of its caches;
3347 /* Check for scratch area overflow */
3488 * Perform `fast' allocation mbuf clusters from a cache of recently-freed
3504 u_int32_t flag;
3515 flag = MEXT_FLAGS(m);
3518 MBUF_CL_INIT(m, cl, rfa, 1, flag);
3658 * "Move" mbuf pkthdr from "from" to "to".
3659 * "from" must have M_PKTHDR set, and "to" must be empty.
3662 m_copy_pkthdr(struct mbuf *to, struct mbuf *from)
3664 VERIFY(from->m_flags & M_PKTHDR);
3666 /* Check for scratch area overflow */
3667 m_redzone_verify(from);
3670 /* Check for scratch area overflow */
3675 to->m_pkthdr = from->m_pkthdr; /* especially tags */
3676 m_classifier_init(from, 0); /* purge classifier info */
3677 m_tag_init(from, 1); /* purge all tags from src */
3678 m_scratch_init(from); /* clear src scratch area */
3679 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
3686 * Duplicate "from"'s mbuf pkthdr in "to".
3687 * "from" must have M_PKTHDR set, and "to" must be empty.
3691 m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how)
3693 VERIFY(from->m_flags & M_PKTHDR);
3695 /* Check for scratch area overflow */
3696 m_redzone_verify(from);
3699 /* Check for scratch area overflow */
3704 to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT);
3707 to->m_pkthdr = from->m_pkthdr;
3710 return (m_tag_copy_chain(to, from, how));
3714 m_copy_pftag(struct mbuf *to, struct mbuf *from)
3716 to->m_pkthdr.pf_mtag = from->m_pkthdr.pf_mtag;
3743 m_copy_classifier(struct mbuf *to, struct mbuf *from)
3746 VERIFY(from->m_flags & M_PKTHDR);
3748 to->m_pkthdr.pkt_proto = from->m_pkthdr.pkt_proto;
3749 to->m_pkthdr.pkt_flowsrc = from->m_pkthdr.pkt_flowsrc;
3750 to->m_pkthdr.pkt_flowid = from->m_pkthdr.pkt_flowid;
3751 to->m_pkthdr.pkt_flags = from->m_pkthdr.pkt_flags;
3752 (void) m_set_service_class(to, from->m_pkthdr.pkt_svc);
3753 to->m_pkthdr.pkt_ifainfo = from->m_pkthdr.pkt_ifainfo;
3755 to->m_pkthdr.pkt_bwseq = from->m_pkthdr.pkt_bwseq;
3776 u_int32_t flag;
3804 /* Allocate the composite mbuf + cluster elements from the cache */
3824 flag = MEXT_FLAGS(m);
3828 MBUF_16KCL_INIT(m, cl, rfa, 1, flag);
3830 MBUF_BIGCL_INIT(m, cl, rfa, 1, flag);
3832 MBUF_CL_INIT(m, cl, rfa, 1, flag);
3969 /* Allocate the elements in one shot from the mbuf cache */
4034 * Every composite mbuf + cluster element comes from the intermediate
4036 * the last composite element will come from the MC_MBUF_CL cache,
4120 u_int32_t flag;
4145 flag = MEXT_FLAGS(m);
4152 MBUF_16KCL_INIT(m, cl, rfa, 1, flag);
4154 MBUF_BIGCL_INIT(m, cl, rfa, 1, flag);
4156 MBUF_CL_INIT(m, cl, rfa, 1, flag);
4307 /* Check for scratch area overflow */
4526 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
4528 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
4592 * There is data to copy from the packet header mbuf
4691 * MCR_TRYHARD so that we may reclaim buffers from other places
4790 * Copy data from an mbuf chain starting "off" bytes from the beginning,
4838 /* splat the data from one into the other */
4857 * Trim from head.
4875 * Trim from tail. Scan the mbuf chain,
4902 * and toss data from remaining mbufs on chain.
5105 * Routine to copy from device local memory into mbufs.
5323 * Copy data from a buffer back into the indicated mbuf chain,
5324 * starting "off" bytes from the beginning, extending the mbuf
5362 * no way to recover from partial success.
5536 * copying data from old mbufs if requested.
6126 /* Nothing? Then try hard to get it from somewhere */
7204 /* Check for scratch area overflow */
7379 static unsigned char scratch[32];
7385 bzero(scratch, sizeof(scratch));
7386 scratch_pa = pmap_find_phys(kernel_pmap, (addr64_t)scratch);
7390 * Panic if a driver wrote to our scratch memory.
7392 for (k = 0; k < sizeof(scratch); k++)
7393 if (scratch[k])
7443 * Move the objects from the composite class freelist to