mbuf.h revision 68622
1/* 2 * Copyright (c) 1982, 1986, 1988, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)mbuf.h 8.5 (Berkeley) 2/19/95 34 * $FreeBSD: head/sys/sys/mbuf.h 68622 2000-11-11 23:12:27Z bmilekic $ 35 */ 36 37#ifndef _SYS_MBUF_H_ 38#define _SYS_MBUF_H_ 39 40#include <sys/mutex.h> /* XXX */ 41 42/* 43 * Mbufs are of a single size, MSIZE (machine/param.h), which 44 * includes overhead. An mbuf may add a single "mbuf cluster" of size 45 * MCLBYTES (also in machine/param.h), which has no additional overhead 46 * and is used instead of the internal data area; this is done when 47 * at least MINCLSIZE of data must be stored. 48 */ 49 50#define MLEN (MSIZE - sizeof(struct m_hdr)) /* normal data len */ 51#define MHLEN (MLEN - sizeof(struct pkthdr)) /* data len w/pkthdr */ 52 53#define MINCLSIZE (MHLEN + 1) /* smallest amount to put in cluster */ 54#define M_MAXCOMPRESS (MHLEN / 2) /* max amount to copy for compression */ 55 56/* 57 * Maximum number of allocatable counters for external buffers. This 58 * ensures enough VM address space for the allocation of counters 59 * in the extreme case where all possible external buffers are allocated. 60 * 61 * Note: When new types of external storage are allocated, EXT_COUNTERS 62 * must be tuned accordingly. Practically, this isn't a big deal 63 * as each counter is only a word long, so we can fit 64 * (PAGE_SIZE / length of word) counters in a single page. 65 * 66 * XXX: Must increase this if using any of if_ti, if_wb, if_sk drivers, 67 * or any other drivers which may manage their own buffers and 68 * eventually attach them to mbufs. 69 */ 70#define EXT_COUNTERS (nmbclusters + nsfbufs) 71 72/* 73 * Macros for type conversion 74 * mtod(m, t) - convert mbuf pointer to data pointer of correct type 75 * dtom(x) - convert data pointer within mbuf to mbuf pointer (XXX) 76 */ 77#define mtod(m, t) ((t)((m)->m_data)) 78#define dtom(x) ((struct mbuf *)((intptr_t)(x) & ~(MSIZE-1))) 79 80/* header at beginning of each mbuf: */ 81struct m_hdr { 82 struct mbuf *mh_next; /* next buffer in chain */ 83 struct mbuf *mh_nextpkt; /* next chain in queue/record */ 84 caddr_t mh_data; /* location of data */ 85 int mh_len; /* amount of data in this mbuf */ 86 short mh_type; /* type of data in this mbuf */ 87 short mh_flags; /* flags; see below */ 88}; 89 90/* record/packet header in first mbuf of chain; valid if M_PKTHDR set */ 91struct pkthdr { 92 struct ifnet *rcvif; /* rcv interface */ 93 int len; /* total packet length */ 94 /* variables for ip and tcp reassembly */ 95 void *header; /* pointer to packet header */ 96 /* variables for hardware checksum */ 97 int csum_flags; /* flags regarding checksum */ 98 int csum_data; /* data field used by csum routines */ 99 struct mbuf *aux; /* extra data buffer; ipsec/others */ 100}; 101 102/* description of external storage mapped into mbuf, valid if M_EXT set */ 103struct m_ext { 104 caddr_t ext_buf; /* start of buffer */ 105 void (*ext_free) /* free routine if not the usual */ 106 __P((caddr_t, void *)); 107 void *ext_args; /* optional argument pointer */ 108 u_int ext_size; /* size of buffer, for ext_free */ 109 union mext_refcnt *ref_cnt; /* pointer to ref count info */ 110 short ext_type; /* type of external storage */ 111}; 112 113struct mbuf { 114 struct m_hdr m_hdr; 115 union { 116 struct { 117 struct pkthdr MH_pkthdr; /* M_PKTHDR set */ 118 union { 119 struct m_ext MH_ext; /* M_EXT set */ 120 char MH_databuf[MHLEN]; 121 } MH_dat; 122 } MH; 123 char M_databuf[MLEN]; /* !M_PKTHDR, !M_EXT */ 124 } M_dat; 125}; 126#define m_next m_hdr.mh_next 127#define m_len m_hdr.mh_len 128#define m_data m_hdr.mh_data 129#define m_type m_hdr.mh_type 130#define m_flags m_hdr.mh_flags 131#define m_nextpkt m_hdr.mh_nextpkt 132#define m_act m_nextpkt 133#define m_pkthdr M_dat.MH.MH_pkthdr 134#define m_ext M_dat.MH.MH_dat.MH_ext 135#define m_pktdat M_dat.MH.MH_dat.MH_databuf 136#define m_dat M_dat.M_databuf 137 138/* mbuf flags */ 139#define M_EXT 0x0001 /* has associated external storage */ 140#define M_PKTHDR 0x0002 /* start of record */ 141#define M_EOR 0x0004 /* end of record */ 142#define M_RDONLY 0x0008 /* associated data is marked read-only */ 143#define M_PROTO1 0x0010 /* protocol-specific */ 144#define M_PROTO2 0x0020 /* protocol-specific */ 145#define M_PROTO3 0x0040 /* protocol-specific */ 146#define M_PROTO4 0x0080 /* protocol-specific */ 147#define M_PROTO5 0x0100 /* protocol-specific */ 148 149/* mbuf pkthdr flags, also in m_flags */ 150#define M_BCAST 0x0200 /* send/received as link-level broadcast */ 151#define M_MCAST 0x0400 /* send/received as link-level multicast */ 152#define M_FRAG 0x0800 /* packet is a fragment of a larger packet */ 153#define M_FIRSTFRAG 0x1000 /* packet is first fragment */ 154#define M_LASTFRAG 0x2000 /* packet is last fragment */ 155 156/* external buffer types: identify ext_buf type */ 157#define EXT_CLUSTER 1 /* mbuf cluster */ 158#define EXT_SFBUF 2 /* sendfile(2)'s sf_bufs */ 159#define EXT_NET_DRV 100 /* custom ext_buf provided by net driver(s) */ 160 161/* flags copied when copying m_pkthdr */ 162#define M_COPYFLAGS (M_PKTHDR|M_EOR|M_PROTO1|M_PROTO1|M_PROTO2|M_PROTO3 | \ 163 M_PROTO4|M_PROTO5|M_BCAST|M_MCAST|M_FRAG|M_RDONLY) 164 165/* flags indicating hw checksum support and sw checksum requirements */ 166#define CSUM_IP 0x0001 /* will csum IP */ 167#define CSUM_TCP 0x0002 /* will csum TCP */ 168#define CSUM_UDP 0x0004 /* will csum UDP */ 169#define CSUM_IP_FRAGS 0x0008 /* will csum IP fragments */ 170#define CSUM_FRAGMENT 0x0010 /* will do IP fragmentation */ 171 172#define CSUM_IP_CHECKED 0x0100 /* did csum IP */ 173#define CSUM_IP_VALID 0x0200 /* ... the csum is valid */ 174#define CSUM_DATA_VALID 0x0400 /* csum_data field is valid */ 175#define CSUM_PSEUDO_HDR 0x0800 /* csum_data has pseudo hdr */ 176 177#define CSUM_DELAY_DATA (CSUM_TCP | CSUM_UDP) 178#define CSUM_DELAY_IP (CSUM_IP) /* XXX add ipv6 here too? */ 179 180/* mbuf types */ 181#define MT_FREE 0 /* should be on free list */ 182#define MT_DATA 1 /* dynamic (data) allocation */ 183#define MT_HEADER 2 /* packet header */ 184#if 0 185#define MT_SOCKET 3 /* socket structure */ 186#define MT_PCB 4 /* protocol control block */ 187#define MT_RTABLE 5 /* routing tables */ 188#define MT_HTABLE 6 /* IMP host tables */ 189#define MT_ATABLE 7 /* address resolution tables */ 190#endif 191#define MT_SONAME 8 /* socket name */ 192#if 0 193#define MT_SOOPTS 10 /* socket options */ 194#endif 195#define MT_FTABLE 11 /* fragment reassembly header */ 196#if 0 197#define MT_RIGHTS 12 /* access rights */ 198#define MT_IFADDR 13 /* interface address */ 199#endif 200#define MT_CONTROL 14 /* extra-data protocol message */ 201#define MT_OOBDATA 15 /* expedited data */ 202 203#define MT_NTYPES 16 /* number of mbuf types for mbtypes[] */ 204 205/* 206 * mbuf statistics 207 */ 208struct mbstat { 209 u_long m_mbufs; /* # mbufs obtained from page pool */ 210 u_long m_clusters; /* # clusters obtained from page pool */ 211 u_long m_clfree; /* # clusters on freelist (cache) */ 212 u_long m_refcnt; /* # ref counters obtained from page pool */ 213 u_long m_refree; /* # ref counters on freelist (cache) */ 214 u_long m_spare; /* spare field */ 215 u_long m_drops; /* times failed to find space */ 216 u_long m_wait; /* times waited for space */ 217 u_long m_drain; /* times drained protocols for space */ 218 u_long m_mcfail; /* times m_copym failed */ 219 u_long m_mpfail; /* times m_pullup failed */ 220 u_long m_msize; /* length of an mbuf */ 221 u_long m_mclbytes; /* length of an mbuf cluster */ 222 u_long m_minclsize; /* min length of data to allocate a cluster */ 223 u_long m_mlen; /* length of data in an mbuf */ 224 u_long m_mhlen; /* length of data in a header mbuf */ 225}; 226 227/* flags to m_get/MGET */ 228#define M_DONTWAIT 1 229#define M_WAIT 0 230 231/* 232 * Normal mbuf clusters are normally treated as character arrays 233 * after allocation, but use the first word of the buffer as a free list 234 * pointer while on the free list. 235 */ 236union mcluster { 237 union mcluster *mcl_next; 238 char mcl_buf[MCLBYTES]; 239}; 240 241/* 242 * The m_ext object reference counter structure. 243 */ 244union mext_refcnt { 245 union mext_refcnt *next_ref; 246 u_long refcnt; 247}; 248 249/* 250 * free list header definitions: mbffree_lst, mclfree_lst, mcntfree_lst 251 */ 252struct mbffree_lst { 253 struct mbuf *m_head; 254 struct mtx m_mtx; 255}; 256 257struct mclfree_lst { 258 union mcluster *m_head; 259 struct mtx m_mtx; 260}; 261 262struct mcntfree_lst { 263 union mext_refcnt *m_head; 264 struct mtx m_mtx; 265}; 266 267/* 268 * Wake up the next instance (if any) of a sleeping allocation - which is 269 * waiting for a {cluster, mbuf} to be freed. 270 * 271 * Must be called with the appropriate mutex held. 272 */ 273#define MBWAKEUP(m_wid) do { \ 274 if ((m_wid)) { \ 275 m_wid--; \ 276 wakeup_one(&(m_wid)); \ 277 } \ 278} while (0) 279 280/* 281 * mbuf external reference count management macros: 282 * 283 * MEXT_IS_REF(m): true if (m) is not the only mbuf referencing 284 * the external buffer ext_buf 285 * MEXT_REM_REF(m): remove reference to m_ext object 286 * MEXT_ADD_REF(m): add reference to m_ext object already 287 * referred to by (m) 288 * MEXT_INIT_REF(m): allocate and initialize an external 289 * object reference counter for (m) 290 */ 291#define MEXT_IS_REF(m) ((m)->m_ext.ref_cnt->refcnt > 1) 292 293#define MEXT_REM_REF(m) do { \ 294 KASSERT((m)->m_ext.ref_cnt->refcnt > 0, ("m_ext refcnt < 0")); \ 295 atomic_subtract_long(&((m)->m_ext.ref_cnt->refcnt), 1); \ 296} while(0) 297 298#define MEXT_ADD_REF(m) atomic_add_long(&((m)->m_ext.ref_cnt->refcnt), 1) 299 300#define _MEXT_ALLOC_CNT(m_cnt, how) do { \ 301 union mext_refcnt *__mcnt; \ 302 \ 303 mtx_enter(&mcntfree.m_mtx, MTX_DEF); \ 304 if (mcntfree.m_head == NULL) \ 305 m_alloc_ref(1, (how)); \ 306 __mcnt = mcntfree.m_head; \ 307 if (__mcnt != NULL) { \ 308 mcntfree.m_head = __mcnt->next_ref; \ 309 mbstat.m_refree--; \ 310 __mcnt->refcnt = 0; \ 311 } \ 312 mtx_exit(&mcntfree.m_mtx, MTX_DEF); \ 313 (m_cnt) = __mcnt; \ 314} while (0) 315 316#define _MEXT_DEALLOC_CNT(m_cnt) do { \ 317 union mext_refcnt *__mcnt = (m_cnt); \ 318 \ 319 mtx_enter(&mcntfree.m_mtx, MTX_DEF); \ 320 __mcnt->next_ref = mcntfree.m_head; \ 321 mcntfree.m_head = __mcnt; \ 322 mbstat.m_refree++; \ 323 mtx_exit(&mcntfree.m_mtx, MTX_DEF); \ 324} while (0) 325 326#define MEXT_INIT_REF(m, how) do { \ 327 struct mbuf *__mmm = (m); \ 328 \ 329 _MEXT_ALLOC_CNT(__mmm->m_ext.ref_cnt, (how)); \ 330 if (__mmm->m_ext.ref_cnt != NULL) \ 331 MEXT_ADD_REF(__mmm); \ 332} while (0) 333 334/* 335 * mbuf allocation/deallocation macros: 336 * 337 * MGET(struct mbuf *m, int how, int type) 338 * allocates an mbuf and initializes it to contain internal data. 339 * 340 * MGETHDR(struct mbuf *m, int how, int type) 341 * allocates an mbuf and initializes it to contain a packet header 342 * and internal data. 343 */ 344/* 345 * Lower-level macros for MGET(HDR)... Not to be used outside the 346 * subsystem ("non-exportable" macro names are prepended with "_"). 347 */ 348#define _MGET_SETUP(m_set, m_set_type) do { \ 349 (m_set)->m_type = (m_set_type); \ 350 (m_set)->m_next = NULL; \ 351 (m_set)->m_nextpkt = NULL; \ 352 (m_set)->m_data = (m_set)->m_dat; \ 353 (m_set)->m_flags = 0; \ 354} while (0) 355 356#define _MGET(m_mget, m_get_how) do { \ 357 if (mmbfree.m_head == NULL) \ 358 m_mballoc(1, (m_get_how)); \ 359 (m_mget) = mmbfree.m_head; \ 360 if ((m_mget) != NULL) { \ 361 mmbfree.m_head = (m_mget)->m_next; \ 362 mbtypes[MT_FREE]--; \ 363 } else { \ 364 if ((m_get_how) == M_WAIT) \ 365 (m_mget) = m_mballoc_wait(); \ 366 } \ 367} while (0) 368 369#define MGET(m, how, type) do { \ 370 struct mbuf *_mm; \ 371 int _mhow = (how); \ 372 int _mtype = (type); \ 373 \ 374 mtx_enter(&mmbfree.m_mtx, MTX_DEF); \ 375 _MGET(_mm, _mhow); \ 376 if (_mm != NULL) { \ 377 mbtypes[_mtype]++; \ 378 mtx_exit(&mmbfree.m_mtx, MTX_DEF); \ 379 _MGET_SETUP(_mm, _mtype); \ 380 } else \ 381 mtx_exit(&mmbfree.m_mtx, MTX_DEF); \ 382 (m) = _mm; \ 383} while (0) 384 385#define _MGETHDR_SETUP(m_set, m_set_type) do { \ 386 (m_set)->m_type = (m_set_type); \ 387 (m_set)->m_next = NULL; \ 388 (m_set)->m_nextpkt = NULL; \ 389 (m_set)->m_data = (m_set)->m_pktdat; \ 390 (m_set)->m_flags = M_PKTHDR; \ 391 (m_set)->m_pkthdr.rcvif = NULL; \ 392 (m_set)->m_pkthdr.csum_flags = 0; \ 393 (m_set)->m_pkthdr.aux = NULL; \ 394} while (0) 395 396#define MGETHDR(m, how, type) do { \ 397 struct mbuf *_mm; \ 398 int _mhow = (how); \ 399 int _mtype = (type); \ 400 \ 401 mtx_enter(&mmbfree.m_mtx, MTX_DEF); \ 402 _MGET(_mm, _mhow); \ 403 if (_mm != NULL) { \ 404 mbtypes[_mtype]++; \ 405 mtx_exit(&mmbfree.m_mtx, MTX_DEF); \ 406 _MGETHDR_SETUP(_mm, _mtype); \ 407 } else \ 408 mtx_exit(&mmbfree.m_mtx, MTX_DEF); \ 409 (m) = _mm; \ 410} while (0) 411 412/* 413 * mbuf external storage macros: 414 * 415 * MCLGET allocates and refers an mcluster to an mbuf 416 * MEXTADD sets up pre-allocated external storage and refers to mbuf 417 * MEXTFREE removes reference to external object and frees it if 418 * necessary 419 */ 420#define _MCLALLOC(p, how) do { \ 421 caddr_t _mp; \ 422 int _mhow = (how); \ 423 \ 424 if (mclfree.m_head == NULL) \ 425 m_clalloc(1, _mhow); \ 426 _mp = (caddr_t)mclfree.m_head; \ 427 if (_mp != NULL) { \ 428 mbstat.m_clfree--; \ 429 mclfree.m_head = ((union mcluster *)_mp)->mcl_next; \ 430 } else { \ 431 if (_mhow == M_WAIT) \ 432 _mp = m_clalloc_wait(); \ 433 } \ 434 (p) = _mp; \ 435} while (0) 436 437#define MCLGET(m, how) do { \ 438 struct mbuf *_mm = (m); \ 439 \ 440 mtx_enter(&mclfree.m_mtx, MTX_DEF); \ 441 _MCLALLOC(_mm->m_ext.ext_buf, (how)); \ 442 mtx_exit(&mclfree.m_mtx, MTX_DEF); \ 443 if (_mm->m_ext.ext_buf != NULL) { \ 444 MEXT_INIT_REF(_mm, (how)); \ 445 if (_mm->m_ext.ref_cnt == NULL) { \ 446 _MCLFREE(_mm->m_ext.ext_buf); \ 447 _mm->m_ext.ext_buf = NULL; \ 448 } else { \ 449 _mm->m_data = _mm->m_ext.ext_buf; \ 450 _mm->m_flags |= M_EXT; \ 451 _mm->m_ext.ext_free = NULL; \ 452 _mm->m_ext.ext_args = NULL; \ 453 _mm->m_ext.ext_size = MCLBYTES; \ 454 _mm->m_ext.ext_type = EXT_CLUSTER; \ 455 } \ 456 } \ 457} while (0) 458 459#define MEXTADD(m, buf, size, free, args, flags, type) do { \ 460 struct mbuf *_mm = (m); \ 461 \ 462 MEXT_INIT_REF(_mm, M_WAIT); \ 463 if (_mm->m_ext.ref_cnt != NULL) { \ 464 _mm->m_flags |= (M_EXT | (flags)); \ 465 _mm->m_ext.ext_buf = (caddr_t)(buf); \ 466 _mm->m_data = _mm->m_ext.ext_buf; \ 467 _mm->m_ext.ext_size = (size); \ 468 _mm->m_ext.ext_free = (free); \ 469 _mm->m_ext.ext_args = (args); \ 470 _mm->m_ext.ext_type = (type); \ 471 } \ 472} while (0) 473 474#define _MCLFREE(p) do { \ 475 union mcluster *_mp = (union mcluster *)(p); \ 476 \ 477 mtx_enter(&mclfree.m_mtx, MTX_DEF); \ 478 _mp->mcl_next = mclfree.m_head; \ 479 mclfree.m_head = _mp; \ 480 mbstat.m_clfree++; \ 481 MBWAKEUP(m_clalloc_wid); \ 482 mtx_exit(&mclfree.m_mtx, MTX_DEF); \ 483} while (0) 484 485#define MEXTFREE(m) do { \ 486 struct mbuf *_mmm = (m); \ 487 \ 488 if (MEXT_IS_REF(_mmm)) \ 489 MEXT_REM_REF(_mmm); \ 490 else if (_mmm->m_ext.ext_type != EXT_CLUSTER) { \ 491 (*(_mmm->m_ext.ext_free))(_mmm->m_ext.ext_buf, \ 492 _mmm->m_ext.ext_args); \ 493 _MEXT_DEALLOC_CNT(_mmm->m_ext.ref_cnt); \ 494 } else { \ 495 _MCLFREE(_mmm->m_ext.ext_buf); \ 496 _MEXT_DEALLOC_CNT(_mmm->m_ext.ref_cnt); \ 497 } \ 498 _mmm->m_flags &= ~M_EXT; \ 499} while (0) 500 501/* 502 * MFREE(struct mbuf *m, struct mbuf *n) 503 * Free a single mbuf and associated external storage. 504 * Place the successor, if any, in n. 505 */ 506#define MFREE(m, n) do { \ 507 struct mbuf *_mm = (m); \ 508 \ 509 KASSERT(_mm->m_type != MT_FREE, ("freeing free mbuf")); \ 510 if (_mm->m_flags & M_EXT) \ 511 MEXTFREE(_mm); \ 512 mtx_enter(&mmbfree.m_mtx, MTX_DEF); \ 513 mbtypes[_mm->m_type]--; \ 514 _mm->m_type = MT_FREE; \ 515 mbtypes[MT_FREE]++; \ 516 (n) = _mm->m_next; \ 517 _mm->m_next = mmbfree.m_head; \ 518 mmbfree.m_head = _mm; \ 519 MBWAKEUP(m_mballoc_wid); \ 520 mtx_exit(&mmbfree.m_mtx, MTX_DEF); \ 521} while (0) 522 523/* 524 * M_WRITABLE(m) 525 * Evaluate TRUE if it's safe to write to the mbuf m's data region (this 526 * can be both the local data payload, or an external buffer area, 527 * depending on whether M_EXT is set). 528 */ 529#define M_WRITABLE(m) (!((m)->m_flags & M_RDONLY) && (!((m)->m_flags \ 530 & M_EXT) || !MEXT_IS_REF(m))) 531 532/* 533 * Copy mbuf pkthdr from "from" to "to". 534 * from must have M_PKTHDR set, and to must be empty. 535 * aux pointer will be moved to `to'. 536 */ 537#define M_COPY_PKTHDR(to, from) do { \ 538 struct mbuf *_mfrom = (from); \ 539 struct mbuf *_mto = (to); \ 540 \ 541 _mto->m_data = _mto->m_pktdat; \ 542 _mto->m_flags = _mfrom->m_flags & M_COPYFLAGS; \ 543 _mto->m_pkthdr = _mfrom->m_pkthdr; \ 544 _mfrom->m_pkthdr.aux = (struct mbuf *)NULL; \ 545} while (0) 546 547/* 548 * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place 549 * an object of the specified size at the end of the mbuf, longword aligned. 550 */ 551#define M_ALIGN(m, len) do { \ 552 (m)->m_data += (MLEN - (len)) & ~(sizeof(long) - 1); \ 553} while (0) 554 555/* 556 * As above, for mbufs allocated with m_gethdr/MGETHDR 557 * or initialized by M_COPY_PKTHDR. 558 */ 559#define MH_ALIGN(m, len) do { \ 560 (m)->m_data += (MHLEN - (len)) & ~(sizeof(long) - 1); \ 561} while (0) 562 563/* 564 * Compute the amount of space available 565 * before the current start of data in an mbuf. 566 */ 567#define M_LEADINGSPACE(m) \ 568 ((m)->m_flags & M_EXT ? \ 569 /* (m)->m_data - (m)->m_ext.ext_buf */ 0 : \ 570 (m)->m_flags & M_PKTHDR ? (m)->m_data - (m)->m_pktdat : \ 571 (m)->m_data - (m)->m_dat) 572 573/* 574 * Compute the amount of space available 575 * after the end of data in an mbuf. 576 */ 577#define M_TRAILINGSPACE(m) \ 578 ((m)->m_flags & M_EXT ? (m)->m_ext.ext_buf + \ 579 (m)->m_ext.ext_size - ((m)->m_data + (m)->m_len) : \ 580 &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len)) 581 582/* 583 * Arrange to prepend space of size plen to mbuf m. 584 * If a new mbuf must be allocated, how specifies whether to wait. 585 * If how is M_DONTWAIT and allocation fails, the original mbuf chain 586 * is freed and m is set to NULL. 587 */ 588#define M_PREPEND(m, plen, how) do { \ 589 struct mbuf **_mmp = &(m); \ 590 struct mbuf *_mm = *_mmp; \ 591 int _mplen = (plen); \ 592 int __mhow = (how); \ 593 \ 594 if (M_LEADINGSPACE(_mm) >= _mplen) { \ 595 _mm->m_data -= _mplen; \ 596 _mm->m_len += _mplen; \ 597 } else \ 598 _mm = m_prepend(_mm, _mplen, __mhow); \ 599 if (_mm != NULL && _mm->m_flags & M_PKTHDR) \ 600 _mm->m_pkthdr.len += _mplen; \ 601 *_mmp = _mm; \ 602} while (0) 603 604/* 605 * change mbuf to new type 606 */ 607#define MCHTYPE(m, t) do { \ 608 struct mbuf *_mm = (m); \ 609 int _mt = (t); \ 610 \ 611 atomic_subtract_long(&mbtypes[_mm->m_type], 1); \ 612 atomic_add_long(&mbtypes[_mt], 1); \ 613 _mm->m_type = (_mt); \ 614} while (0) 615 616/* length to m_copy to copy all */ 617#define M_COPYALL 1000000000 618 619/* compatibility with 4.3 */ 620#define m_copy(m, o, l) m_copym((m), (o), (l), M_DONTWAIT) 621 622/* 623 * pkthdr.aux type tags. 624 */ 625struct mauxtag { 626 int af; 627 int type; 628}; 629 630#ifdef _KERNEL 631extern u_long m_clalloc_wid; /* mbuf cluster wait count */ 632extern u_long m_mballoc_wid; /* mbuf wait count */ 633extern int max_linkhdr; /* largest link-level header */ 634extern int max_protohdr; /* largest protocol header */ 635extern int max_hdr; /* largest link+protocol header */ 636extern int max_datalen; /* MHLEN - max_hdr */ 637extern struct mbstat mbstat; 638extern u_long mbtypes[MT_NTYPES]; /* per-type mbuf allocations */ 639extern int mbuf_wait; /* mbuf sleep time */ 640extern struct mbuf *mbutl; /* virtual address of mclusters */ 641extern struct mclfree_lst mclfree; 642extern struct mbffree_lst mmbfree; 643extern struct mcntfree_lst mcntfree; 644extern int nmbclusters; 645extern int nmbufs; 646extern int nsfbufs; 647 648void m_adj __P((struct mbuf *, int)); 649int m_alloc_ref __P((u_int, int)); 650void m_cat __P((struct mbuf *,struct mbuf *)); 651int m_clalloc __P((int, int)); 652caddr_t m_clalloc_wait __P((void)); 653void m_copyback __P((struct mbuf *, int, int, caddr_t)); 654void m_copydata __P((struct mbuf *,int,int,caddr_t)); 655struct mbuf *m_copym __P((struct mbuf *, int, int, int)); 656struct mbuf *m_copypacket __P((struct mbuf *, int)); 657struct mbuf *m_devget __P((char *, int, int, struct ifnet *, 658 void (*copy)(char *, caddr_t, u_int))); 659struct mbuf *m_dup __P((struct mbuf *, int)); 660struct mbuf *m_free __P((struct mbuf *)); 661void m_freem __P((struct mbuf *)); 662struct mbuf *m_get __P((int, int)); 663struct mbuf *m_getclr __P((int, int)); 664struct mbuf *m_gethdr __P((int, int)); 665int m_mballoc __P((int, int)); 666struct mbuf *m_mballoc_wait __P((void)); 667struct mbuf *m_prepend __P((struct mbuf *,int,int)); 668struct mbuf *m_pulldown __P((struct mbuf *, int, int, int *)); 669void m_print __P((const struct mbuf *m)); 670struct mbuf *m_pullup __P((struct mbuf *, int)); 671struct mbuf *m_split __P((struct mbuf *,int,int)); 672struct mbuf *m_aux_add __P((struct mbuf *, int, int)); 673struct mbuf *m_aux_find __P((struct mbuf *, int, int)); 674void m_aux_delete __P((struct mbuf *, struct mbuf *)); 675#endif /* _KERNEL */ 676 677#endif /* !_SYS_MBUF_H_ */ 678