Deleted Added
full compact
33c33
< __FBSDID("$FreeBSD: head/sys/kern/uipc_mbuf.c 149648 2005-08-30 21:31:42Z andre $");
---
> __FBSDID("$FreeBSD: head/sys/kern/uipc_mbuf.c 151976 2005-11-02 16:20:36Z andre $");
89,93d88
< * Malloc-type for external ext_buf ref counts.
< */
< static MALLOC_DEFINE(M_MBUF, "mbextcnt", "mbuf external ref counts");
<
< /*
195c190
< u_int *ref_cnt = NULL;
---
> KASSERT(type != EXT_CLUSTER, ("%s: EXT_CLUSTER not allowed", __func__));
197,204c192,193
< /* XXX Shouldn't be adding EXT_CLUSTER with this API */
< if (type == EXT_CLUSTER)
< ref_cnt = (u_int *)uma_find_refcnt(zone_clust,
< mb->m_ext.ext_buf);
< else if (type == EXT_EXTREF)
< ref_cnt = __DEVOLATILE(u_int *, mb->m_ext.ref_cnt);
< mb->m_ext.ref_cnt = (ref_cnt == NULL) ?
< malloc(sizeof(u_int), M_MBUF, M_NOWAIT) : (u_int *)ref_cnt;
---
> if (type != EXT_EXTREF)
> mb->m_ext.ref_cnt = (u_int *)uma_zalloc(zone_ext_refcnt, M_NOWAIT);
219c208
< * storage attached to them if the reference count hits 0.
---
> * storage attached to them if the reference count hits 1.
224,225c213,214
< u_int cnt;
< int dofree;
---
> KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
> KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
227,251c216,220
< /* Account for lazy ref count assign. */
< if (m->m_ext.ref_cnt == NULL)
< dofree = 1;
< else
< dofree = 0;
<
< /*
< * This is tricky. We need to make sure to decrement the
< * refcount in a safe way but to also clean up if we're the
< * last reference. This method seems to do it without race.
< */
< while (dofree == 0) {
< cnt = *(m->m_ext.ref_cnt);
< if (atomic_cmpset_int(m->m_ext.ref_cnt, cnt, cnt - 1)) {
< if (cnt == 1)
< dofree = 1;
< break;
< }
< }
<
< if (dofree) {
< /*
< * Do the free, should be safe.
< */
< if (m->m_ext.ext_type == EXT_PACKET) {
---
> /* Free attached storage if this mbuf is the only reference to it. */
> if (*(m->m_ext.ref_cnt) == 1 ||
> atomic_fetchadd_int(m->m_ext.ref_cnt, -1) == 1) {
> switch (m->m_ext.ext_type) {
> case EXT_CLUSTER:
253,257c222,240
< return;
< } else if (m->m_ext.ext_type == EXT_CLUSTER) {
< uma_zfree(zone_clust, m->m_ext.ext_buf);
< m->m_ext.ext_buf = NULL;
< } else {
---
> return; /* Job done. */
> break;
> case EXT_JUMBO9:
> uma_zfree(zone_jumbo9, m->m_ext.ext_buf);
> break;
> case EXT_JUMBO16:
> uma_zfree(zone_jumbo16, m->m_ext.ext_buf);
> break;
> case EXT_SFBUF:
> case EXT_NET_DRV:
> case EXT_MOD_TYPE:
> case EXT_DISPOSABLE:
> *(m->m_ext.ref_cnt) = 0;
> uma_zfree(zone_ext_refcnt, __DEVOLATILE(u_int *,
> m->m_ext.ref_cnt));
> /* FALLTHROUGH */
> case EXT_EXTREF:
> KASSERT(m->m_ext.ext_free != NULL,
> ("%s: ext_free not set", __func__));
260,266c243,246
< if (m->m_ext.ext_type != EXT_EXTREF) {
< if (m->m_ext.ref_cnt != NULL)
< free(__DEVOLATILE(u_int *,
< m->m_ext.ref_cnt), M_MBUF);
< m->m_ext.ref_cnt = NULL;
< }
< m->m_ext.ext_buf = NULL;
---
> break;
> default:
> KASSERT(m->m_ext.ext_type == 0,
> ("%s: unknown ext_type", __func__));
268a249,259
> /*
> * Free this mbuf back to the mbuf zone with all m_ext
> * information purged.
> */
> m->m_ext.ext_buf = NULL;
> m->m_ext.ext_free = NULL;
> m->m_ext.ext_args = NULL;
> m->m_ext.ref_cnt = NULL;
> m->m_ext.ext_size = 0;
> m->m_ext.ext_type = 0;
> m->m_flags &= ~M_EXT;
272a264,287
> * Attach the the cluster from *m to *n, set up m_ext in *n
> * and bump the refcount of the cluster.
> */
> static void
> mb_dupcl(struct mbuf *n, struct mbuf *m)
> {
> KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
> KASSERT(m->m_ext.ref_cnt != NULL, ("%s: ref_cnt not set", __func__));
> KASSERT((n->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
>
> if (*(m->m_ext.ref_cnt) == 1)
> *(m->m_ext.ref_cnt) += 1;
> else
> atomic_add_int(m->m_ext.ref_cnt, 1);
> n->m_ext.ext_buf = m->m_ext.ext_buf;
> n->m_ext.ext_free = m->m_ext.ext_free;
> n->m_ext.ext_args = m->m_ext.ext_args;
> n->m_ext.ext_size = m->m_ext.ext_size;
> n->m_ext.ref_cnt = m->m_ext.ref_cnt;
> n->m_ext.ext_type = m->m_ext.ext_type;
> n->m_flags |= M_EXT;
> }
>
> /*
536,539c551
< n->m_ext = m->m_ext;
< n->m_flags |= M_EXT;
< MEXT_ADD_REF(m);
< n->m_ext.ref_cnt = m->m_ext.ref_cnt;
---
> mb_dupcl(n, m);
581c593
< struct mbuf *mm, *x, *z;
---
> struct mbuf *mm, *x, *z, *prev = NULL;
583c595
< int i, mlen, nlen = 0;
---
> int i, nlen = 0;
591,606c603,607
< /* Make sure environment is sane. */
< for (z = m; z != NULL; z = z->m_next) {
< mlen += z->m_len;
< if (!M_WRITABLE(z)) {
< /* Make clusters writeable. */
< if (z->m_flags & M_RDONLY)
< return NULL; /* Can't handle ext ref. */
< x = m_getcl(how, MT_DATA, 0);
< if (!x)
< return NULL;
< bcopy(z->m_ext.ext_buf, x->m_ext.ext_buf, x->m_ext.ext_size);
< p = x->m_ext.ext_buf + (z->m_data - z->m_ext.ext_buf);
< MEXT_REM_REF(z); /* XXX */
< z->m_data = p;
< x->m_flags &= ~M_EXT;
< (void)m_free(x);
---
> mm = m;
> if (!prep) {
> while(mm->m_next) {
> prev = mm;
> mm = mm->m_next;
609d609
< mm = prep ? m : z;
616a617,631
> if (!M_WRITABLE(mm)) {
> /* XXX: Use proper m_xxx function instead. */
> x = m_getcl(how, MT_DATA, mm->m_flags);
> if (x == NULL)
> return NULL;
> bcopy(mm->m_ext.ext_buf, x->m_ext.ext_buf, x->m_ext.ext_size);
> p = x->m_ext.ext_buf + (mm->m_data - mm->m_ext.ext_buf);
> x->m_data = p;
> mm->m_next = NULL;
> if (mm != m)
> prev->m_next = x;
> m_free(mm);
> mm = x;
> }
>
729,732c744
< n->m_ext = m->m_ext;
< n->m_flags |= M_EXT;
< MEXT_ADD_REF(m);
< n->m_ext.ref_cnt = m->m_ext.ref_cnt;
---
> mb_dupcl(n, m);
750,753c762
< n->m_ext = m->m_ext;
< n->m_flags |= M_EXT;
< MEXT_ADD_REF(m);
< n->m_ext.ref_cnt = m->m_ext.ref_cnt;
---
> mb_dupcl(n, m);
1136,1139d1144
< n->m_flags |= M_EXT;
< n->m_ext = m->m_ext;
< MEXT_ADD_REF(m);
< n->m_ext.ref_cnt = m->m_ext.ref_cnt;
1140a1146
> mb_dupcl(n, m);