Deleted Added
sdiff udiff text old ( 74914 ) new ( 75105 )
full compact
1/*
2 * Copyright (c) 1982, 1986, 1988, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 17 unchanged lines hidden (view full) ---

26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)mbuf.h 8.5 (Berkeley) 2/19/95
34 * $FreeBSD: head/sys/sys/mbuf.h 74914 2001-03-28 09:17:56Z jhb $
35 */
36
37#ifndef _SYS_MBUF_H_
38#define _SYS_MBUF_H_
39
40#include <sys/lock.h>
41#include <sys/mutex.h> /* XXX */
42
43/*
44 * Mbufs are of a single size, MSIZE (machine/param.h), which
45 * includes overhead. An mbuf may add a single "mbuf cluster" of size
46 * MCLBYTES (also in machine/param.h), which has no additional overhead
47 * and is used instead of the internal data area; this is done when
48 * at least MINCLSIZE of data must be stored.
49 */

--- 199 unchanged lines hidden (view full) ---

249 u_int refcnt;
250};
251
252/*
253 * free list header definitions: mbffree_lst, mclfree_lst, mcntfree_lst
254 */
255struct mbffree_lst {
256 struct mbuf *m_head;
257 struct mtx m_mtx;
258};
259
260struct mclfree_lst {
261 union mcluster *m_head;
262 struct mtx m_mtx;
263};
264
265struct mcntfree_lst {
266 union mext_refcnt *m_head;
267 struct mtx m_mtx;
268};
269
270/*
271 * Wake up the next instance (if any) of a sleeping allocation - which is
272 * waiting for a {cluster, mbuf} to be freed.
273 *
274 * Must be called with the appropriate mutex held.
275 */

--- 20 unchanged lines hidden (view full) ---

296 atomic_subtract_int(&((m)->m_ext.ref_cnt->refcnt), 1); \
297} while(0)
298
299#define MEXT_ADD_REF(m) atomic_add_int(&((m)->m_ext.ref_cnt->refcnt), 1)
300
301#define _MEXT_ALLOC_CNT(m_cnt, how) do { \
302 union mext_refcnt *__mcnt; \
303 \
304 mtx_lock(&mcntfree.m_mtx); \
305 if (mcntfree.m_head == NULL) \
306 m_alloc_ref(1, (how)); \
307 __mcnt = mcntfree.m_head; \
308 if (__mcnt != NULL) { \
309 mcntfree.m_head = __mcnt->next_ref; \
310 mbstat.m_refree--; \
311 __mcnt->refcnt = 0; \
312 } \
313 mtx_unlock(&mcntfree.m_mtx); \
314 (m_cnt) = __mcnt; \
315} while (0)
316
317#define _MEXT_DEALLOC_CNT(m_cnt) do { \
318 union mext_refcnt *__mcnt = (m_cnt); \
319 \
320 mtx_lock(&mcntfree.m_mtx); \
321 __mcnt->next_ref = mcntfree.m_head; \
322 mcntfree.m_head = __mcnt; \
323 mbstat.m_refree++; \
324 mtx_unlock(&mcntfree.m_mtx); \
325} while (0)
326
327#define MEXT_INIT_REF(m, how) do { \
328 struct mbuf *__mmm = (m); \
329 \
330 _MEXT_ALLOC_CNT(__mmm->m_ext.ref_cnt, (how)); \
331 if (__mmm->m_ext.ref_cnt != NULL) \
332 MEXT_ADD_REF(__mmm); \

--- 34 unchanged lines hidden (view full) ---

367 } \
368} while (0)
369
370#define MGET(m, how, type) do { \
371 struct mbuf *_mm; \
372 int _mhow = (how); \
373 int _mtype = (type); \
374 \
375 mtx_lock(&mmbfree.m_mtx); \
376 _MGET(_mm, _mhow); \
377 if (_mm != NULL) { \
378 mbtypes[_mtype]++; \
379 mtx_unlock(&mmbfree.m_mtx); \
380 _MGET_SETUP(_mm, _mtype); \
381 } else { \
382 mtx_unlock(&mmbfree.m_mtx); \
383 atomic_add_long(&mbstat.m_drops, 1); \
384 } \
385 (m) = _mm; \
386} while (0)
387
388#define _MGETHDR_SETUP(m_set, m_set_type) do { \
389 (m_set)->m_type = (m_set_type); \
390 (m_set)->m_next = NULL; \
391 (m_set)->m_nextpkt = NULL; \

--- 4 unchanged lines hidden (view full) ---

396 (m_set)->m_pkthdr.aux = NULL; \
397} while (0)
398
399#define MGETHDR(m, how, type) do { \
400 struct mbuf *_mm; \
401 int _mhow = (how); \
402 int _mtype = (type); \
403 \
404 mtx_lock(&mmbfree.m_mtx); \
405 _MGET(_mm, _mhow); \
406 if (_mm != NULL) { \
407 mbtypes[_mtype]++; \
408 mtx_unlock(&mmbfree.m_mtx); \
409 _MGETHDR_SETUP(_mm, _mtype); \
410 } else { \
411 mtx_unlock(&mmbfree.m_mtx); \
412 atomic_add_long(&mbstat.m_drops, 1); \
413 } \
414 (m) = _mm; \
415} while (0)
416
417/*
418 * mbuf external storage macros:
419 *
420 * MCLGET allocates and refers an mcluster to an mbuf

--- 16 unchanged lines hidden (view full) ---

437 _mp = m_clalloc_wait(); \
438 } \
439 (p) = _mp; \
440} while (0)
441
442#define MCLGET(m, how) do { \
443 struct mbuf *_mm = (m); \
444 \
445 mtx_lock(&mclfree.m_mtx); \
446 _MCLALLOC(_mm->m_ext.ext_buf, (how)); \
447 mtx_unlock(&mclfree.m_mtx); \
448 if (_mm->m_ext.ext_buf != NULL) { \
449 MEXT_INIT_REF(_mm, (how)); \
450 if (_mm->m_ext.ref_cnt == NULL) { \
451 _MCLFREE(_mm->m_ext.ext_buf); \
452 _mm->m_ext.ext_buf = NULL; \
453 } else { \
454 _mm->m_data = _mm->m_ext.ext_buf; \
455 _mm->m_flags |= M_EXT; \
456 _mm->m_ext.ext_free = NULL; \
457 _mm->m_ext.ext_args = NULL; \
458 _mm->m_ext.ext_size = MCLBYTES; \
459 _mm->m_ext.ext_type = EXT_CLUSTER; \
460 } \
461 } else \
462 atomic_add_long(&mbstat.m_drops, 1); \
463} while (0)
464
465#define MEXTADD(m, buf, size, free, args, flags, type) do { \
466 struct mbuf *_mm = (m); \
467 \
468 MEXT_INIT_REF(_mm, M_TRYWAIT); \
469 if (_mm->m_ext.ref_cnt != NULL) { \
470 _mm->m_flags |= (M_EXT | (flags)); \

--- 4 unchanged lines hidden (view full) ---

475 _mm->m_ext.ext_args = (args); \
476 _mm->m_ext.ext_type = (type); \
477 } \
478} while (0)
479
480#define _MCLFREE(p) do { \
481 union mcluster *_mp = (union mcluster *)(p); \
482 \
483 mtx_lock(&mclfree.m_mtx); \
484 _mp->mcl_next = mclfree.m_head; \
485 mclfree.m_head = _mp; \
486 mbstat.m_clfree++; \
487 MBWAKEUP(m_clalloc_wid); \
488 mtx_unlock(&mclfree.m_mtx); \
489} while (0)
490
491/* MEXTFREE:
492 * If the atomic_cmpset_int() returns 0, then we effectively do nothing
493 * in terms of "cleaning up" (freeing the ext buf and ref. counter) as
494 * this means that either there are still references, or another thread
495 * is taking care of the clean-up.
496 */

--- 18 unchanged lines hidden (view full) ---

515 * Place the successor, if any, in n.
516 */
517#define MFREE(m, n) do { \
518 struct mbuf *_mm = (m); \
519 \
520 KASSERT(_mm->m_type != MT_FREE, ("freeing free mbuf")); \
521 if (_mm->m_flags & M_EXT) \
522 MEXTFREE(_mm); \
523 mtx_lock(&mmbfree.m_mtx); \
524 mbtypes[_mm->m_type]--; \
525 _mm->m_type = MT_FREE; \
526 mbtypes[MT_FREE]++; \
527 (n) = _mm->m_next; \
528 _mm->m_next = mmbfree.m_head; \
529 mmbfree.m_head = _mm; \
530 MBWAKEUP(m_mballoc_wid); \
531 mtx_unlock(&mmbfree.m_mtx); \
532} while (0)
533
534/*
535 * M_WRITABLE(m)
536 * Evaluate TRUE if it's safe to write to the mbuf m's data region (this
537 * can be both the local data payload, or an external buffer area,
538 * depending on whether M_EXT is set).
539 */

--- 74 unchanged lines hidden (view full) ---

614
615/*
616 * change mbuf to new type
617 */
618#define MCHTYPE(m, t) do { \
619 struct mbuf *_mm = (m); \
620 int _mt = (t); \
621 \
622 atomic_subtract_long(&mbtypes[_mm->m_type], 1); \
623 atomic_add_long(&mbtypes[_mt], 1); \
624 _mm->m_type = (_mt); \
625} while (0)
626
627/* length to m_copy to copy all */
628#define M_COPYALL 1000000000
629
630/* compatibility with 4.3 */
631#define m_copy(m, o, l) m_copym((m), (o), (l), M_DONTWAIT)

--- 15 unchanged lines hidden (view full) ---

647extern int max_datalen; /* MHLEN - max_hdr */
648extern struct mbstat mbstat;
649extern u_long mbtypes[MT_NTYPES]; /* per-type mbuf allocations */
650extern int mbuf_wait; /* mbuf sleep time */
651extern struct mbuf *mbutl; /* virtual address of mclusters */
652extern struct mclfree_lst mclfree;
653extern struct mbffree_lst mmbfree;
654extern struct mcntfree_lst mcntfree;
655extern int nmbclusters;
656extern int nmbufs;
657extern int nsfbufs;
658
659void m_adj(struct mbuf *, int);
660int m_alloc_ref(u_int, int);
661struct mbuf *m_aux_add(struct mbuf *, int, int);
662void m_aux_delete(struct mbuf *, struct mbuf *);

--- 27 unchanged lines hidden ---