Deleted Added
sdiff udiff text old ( 149648 ) new ( 151976 )
full compact
1/*-
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 16 unchanged lines hidden (view full) ---

25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/kern/uipc_mbuf.c 149648 2005-08-30 21:31:42Z andre $");
34
35#include "opt_mac.h"
36#include "opt_param.h"
37#include "opt_mbuf_stress_test.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/kernel.h>

--- 39 unchanged lines hidden (view full) ---

81 &m_defraguseless, 0, "");
82SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragfailure, CTLFLAG_RD,
83 &m_defragfailure, 0, "");
84SYSCTL_INT(_kern_ipc, OID_AUTO, m_defragrandomfailures, CTLFLAG_RW,
85 &m_defragrandomfailures, 0, "");
86#endif
87
88/*
89 * Malloc-type for external ext_buf ref counts.
90 */
91static MALLOC_DEFINE(M_MBUF, "mbextcnt", "mbuf external ref counts");
92
93/*
94 * Allocate a given length worth of mbufs and/or clusters (whatever fits
95 * best) and return a pointer to the top of the allocated chain. If an
96 * existing mbuf chain is provided, then we will append the new chain
97 * to the existing one but still return the top of the newly allocated
98 * chain.
99 */
100struct mbuf *
101m_getm(struct mbuf *m, int len, int how, short type)

--- 85 unchanged lines hidden (view full) ---

187 *
188 * Returns:
189 * Nothing.
190 */
191void
192m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
193 void (*freef)(void *, void *), void *args, int flags, int type)
194{
195 u_int *ref_cnt = NULL;
196
197 /* XXX Shouldn't be adding EXT_CLUSTER with this API */
198 if (type == EXT_CLUSTER)
199 ref_cnt = (u_int *)uma_find_refcnt(zone_clust,
200 mb->m_ext.ext_buf);
201 else if (type == EXT_EXTREF)
202 ref_cnt = __DEVOLATILE(u_int *, mb->m_ext.ref_cnt);
203 mb->m_ext.ref_cnt = (ref_cnt == NULL) ?
204 malloc(sizeof(u_int), M_MBUF, M_NOWAIT) : (u_int *)ref_cnt;
205 if (mb->m_ext.ref_cnt != NULL) {
206 *(mb->m_ext.ref_cnt) = 1;
207 mb->m_flags |= (M_EXT | flags);
208 mb->m_ext.ext_buf = buf;
209 mb->m_data = mb->m_ext.ext_buf;
210 mb->m_ext.ext_size = size;
211 mb->m_ext.ext_free = freef;
212 mb->m_ext.ext_args = args;
213 mb->m_ext.ext_type = type;
214 }
215}
216
217/*
218 * Non-directly-exported function to clean up after mbufs with M_EXT
219 * storage attached to them if the reference count hits 0.
220 */
221void
222mb_free_ext(struct mbuf *m)
223{
224 u_int cnt;
225 int dofree;
226
227 /* Account for lazy ref count assign. */
228 if (m->m_ext.ref_cnt == NULL)
229 dofree = 1;
230 else
231 dofree = 0;
232
233 /*
234 * This is tricky. We need to make sure to decrement the
235 * refcount in a safe way but to also clean up if we're the
236 * last reference. This method seems to do it without race.
237 */
238 while (dofree == 0) {
239 cnt = *(m->m_ext.ref_cnt);
240 if (atomic_cmpset_int(m->m_ext.ref_cnt, cnt, cnt - 1)) {
241 if (cnt == 1)
242 dofree = 1;
243 break;
244 }
245 }
246
247 if (dofree) {
248 /*
249 * Do the free, should be safe.
250 */
251 if (m->m_ext.ext_type == EXT_PACKET) {
252 uma_zfree(zone_pack, m);
253 return;
254 } else if (m->m_ext.ext_type == EXT_CLUSTER) {
255 uma_zfree(zone_clust, m->m_ext.ext_buf);
256 m->m_ext.ext_buf = NULL;
257 } else {
258 (*(m->m_ext.ext_free))(m->m_ext.ext_buf,
259 m->m_ext.ext_args);
260 if (m->m_ext.ext_type != EXT_EXTREF) {
261 if (m->m_ext.ref_cnt != NULL)
262 free(__DEVOLATILE(u_int *,
263 m->m_ext.ref_cnt), M_MBUF);
264 m->m_ext.ref_cnt = NULL;
265 }
266 m->m_ext.ext_buf = NULL;
267 }
268 }
269 uma_zfree(zone_mbuf, m);
270}
271
272/*
273 * Clean up mbuf (chain) from any tags and packet headers.
274 * If "all" is set then the first mbuf in the chain will be
275 * cleaned too.
276 */
277void
278m_demote(struct mbuf *m0, int all)
279{
280 struct mbuf *m;

--- 247 unchanged lines hidden (view full) ---

528 n->m_pkthdr.len -= off0;
529 else
530 n->m_pkthdr.len = len;
531 copyhdr = 0;
532 }
533 n->m_len = min(len, m->m_len - off);
534 if (m->m_flags & M_EXT) {
535 n->m_data = m->m_data + off;
536 n->m_ext = m->m_ext;
537 n->m_flags |= M_EXT;
538 MEXT_ADD_REF(m);
539 n->m_ext.ref_cnt = m->m_ext.ref_cnt;
540 } else
541 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
542 (u_int)n->m_len);
543 if (len != M_COPYALL)
544 len -= n->m_len;
545 off = 0;
546 m = m->m_next;
547 np = &n->m_next;

--- 25 unchanged lines hidden (view full) ---

573 bcopy(s, t, (size_t)len);
574 return 0;
575}
576
577struct mbuf *
578m_copymdata(struct mbuf *m, struct mbuf *n, int off, int len,
579 int prep, int how)
580{
581 struct mbuf *mm, *x, *z;
582 caddr_t p;
583 int i, mlen, nlen = 0;
584 caddr_t buf[MLEN];
585
586 KASSERT(m != NULL && n != NULL, ("m_copymdata, no target or source"));
587 KASSERT(off >= 0, ("m_copymdata, negative off %d", off));
588 KASSERT(len >= 0, ("m_copymdata, negative len %d", len));
589 KASSERT(prep == 0 || prep == 1, ("m_copymdata, unknown direction %d", prep));
590
591 /* Make sure environment is sane. */
592 for (z = m; z != NULL; z = z->m_next) {
593 mlen += z->m_len;
594 if (!M_WRITABLE(z)) {
595 /* Make clusters writeable. */
596 if (z->m_flags & M_RDONLY)
597 return NULL; /* Can't handle ext ref. */
598 x = m_getcl(how, MT_DATA, 0);
599 if (!x)
600 return NULL;
601 bcopy(z->m_ext.ext_buf, x->m_ext.ext_buf, x->m_ext.ext_size);
602 p = x->m_ext.ext_buf + (z->m_data - z->m_ext.ext_buf);
603 MEXT_REM_REF(z); /* XXX */
604 z->m_data = p;
605 x->m_flags &= ~M_EXT;
606 (void)m_free(x);
607 }
608 }
609 mm = prep ? m : z;
610 for (z = n; z != NULL; z = z->m_next)
611 nlen += z->m_len;
612 if (len == M_COPYALL)
613 len = nlen - off;
614 if (off + len > nlen || len < 1)
615 return NULL;
616
617 /*
618 * Append/prepend the data. Allocating mbufs as necessary.
619 */
620 /* Shortcut if enough free space in first/last mbuf. */
621 if (!prep && M_TRAILINGSPACE(mm) >= len) {
622 m_apply(n, off, len, m_bcopyxxx, mtod(mm, caddr_t) +
623 mm->m_len);
624 mm->m_len += len;

--- 96 unchanged lines hidden (view full) ---

721 if (n == NULL)
722 goto nospace;
723
724 if (!m_dup_pkthdr(n, m, how))
725 goto nospace;
726 n->m_len = m->m_len;
727 if (m->m_flags & M_EXT) {
728 n->m_data = m->m_data;
729 n->m_ext = m->m_ext;
730 n->m_flags |= M_EXT;
731 MEXT_ADD_REF(m);
732 n->m_ext.ref_cnt = m->m_ext.ref_cnt;
733 } else {
734 n->m_data = n->m_pktdat + (m->m_data - m->m_pktdat );
735 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
736 }
737
738 m = m->m_next;
739 while (m) {
740 MGET(o, how, m->m_type);
741 if (o == NULL)
742 goto nospace;
743
744 n->m_next = o;
745 n = n->m_next;
746
747 n->m_len = m->m_len;
748 if (m->m_flags & M_EXT) {
749 n->m_data = m->m_data;
750 n->m_ext = m->m_ext;
751 n->m_flags |= M_EXT;
752 MEXT_ADD_REF(m);
753 n->m_ext.ref_cnt = m->m_ext.ref_cnt;
754 } else {
755 bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
756 }
757
758 m = m->m_next;
759 }
760 return top;
761nospace:

--- 366 unchanged lines hidden (view full) ---

1128 } else {
1129 MGET(n, wait, m->m_type);
1130 if (n == NULL)
1131 return (NULL);
1132 M_ALIGN(n, remain);
1133 }
1134extpacket:
1135 if (m->m_flags & M_EXT) {
1136 n->m_flags |= M_EXT;
1137 n->m_ext = m->m_ext;
1138 MEXT_ADD_REF(m);
1139 n->m_ext.ref_cnt = m->m_ext.ref_cnt;
1140 n->m_data = m->m_data + len;
1141 } else {
1142 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1143 }
1144 n->m_len = remain;
1145 m->m_len = len;
1146 n->m_next = m->m_next;
1147 m->m_next = NULL;
1148 return (n);

--- 518 unchanged lines hidden ---