uipc_mbuf.c revision 63203
159191Skris/* 259191Skris * Copyright (c) 1982, 1986, 1988, 1991, 1993 359191Skris * The Regents of the University of California. All rights reserved. 476866Skris * 576866Skris * Redistribution and use in source and binary forms, with or without 659191Skris * modification, are permitted provided that the following conditions 759191Skris * are met: 859191Skris * 1. Redistributions of source code must retain the above copyright 976866Skris * notice, this list of conditions and the following disclaimer. 1076866Skris * 2. Redistributions in binary form must reproduce the above copyright 1189837Skris * notice, this list of conditions and the following disclaimer in the 12109998Smarkm * documentation and/or other materials provided with the distribution. 13237657Sjkim * 3. All advertising materials mentioning features or use of this software 1476866Skris * must display the following acknowledgement: 1576866Skris * This product includes software developed by the University of 1676866Skris * California, Berkeley and its contributors. 1759191Skris * 4. Neither the name of the University nor the names of its contributors 1876866Skris * may be used to endorse or promote products derived from this software 1976866Skris * without specific prior written permission. 2076866Skris * 2176866Skris * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2259191Skris * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2379998Skris * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2459191Skris * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 2559191Skris * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2659191Skris * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2768651Skris * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2859191Skris * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2959191Skris * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3068651Skris * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3176866Skris * SUCH DAMAGE. 3276866Skris * 3389837Skris * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 34109998Smarkm * $FreeBSD: head/sys/kern/uipc_mbuf.c 63203 2000-07-15 06:02:48Z alfred $ 35160814Ssimon */ 36194206Ssimon 37194206Ssimon#include "opt_param.h" 3876866Skris#include <sys/param.h> 3976866Skris#include <sys/systm.h> 4076866Skris#include <sys/malloc.h> 4176866Skris#include <sys/mbuf.h> 4268651Skris#include <sys/kernel.h> 4368651Skris#include <sys/sysctl.h> 44109998Smarkm#include <sys/domain.h> 4589837Skris#include <sys/protosw.h> 4668651Skris 4776866Skris#include <vm/vm.h> 48100936Snectar#include <vm/vm_kern.h> 49109998Smarkm#include <vm/vm_extern.h> 50100936Snectar 51109998Smarkm#ifdef INVARIANTS 52109998Smarkm#include <machine/cpu.h> 53160814Ssimon#endif 54160814Ssimon 55160814Ssimonstatic void mbinit __P((void *)); 56216166SsimonSYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL) 57216166Ssimon 58216166Ssimonstruct mbuf *mbutl; 5959191Skrischar *mclrefcnt; 6076866Skrisstruct mbstat mbstat; 6159191Skrisu_long mbtypes[MT_NTYPES]; 6276866Skrisstruct mbuf *mmbfree; 6376866Skrisunion mcluster *mclfree; 6476866Skrisint max_linkhdr; 65142425Snectarint max_protohdr; 6676866Skrisint max_hdr; 6776866Skrisint max_datalen; 6876866Skrisint nmbclusters; 6976866Skrisint nmbufs; 7076866Skrisu_int m_mballoc_wid = 0; 7176866Skrisu_int m_clalloc_wid = 0; 7279998Skris 73109998SmarkmSYSCTL_DECL(_kern_ipc); 74142425SnectarSYSCTL_INT(_kern_ipc, KIPC_MAX_LINKHDR, max_linkhdr, CTLFLAG_RW, 75194206Ssimon &max_linkhdr, 0, ""); 76194206SsimonSYSCTL_INT(_kern_ipc, KIPC_MAX_PROTOHDR, max_protohdr, CTLFLAG_RW, 77215697Ssimon &max_protohdr, 0, ""); 7876866SkrisSYSCTL_INT(_kern_ipc, KIPC_MAX_HDR, max_hdr, CTLFLAG_RW, &max_hdr, 0, ""); 7976866SkrisSYSCTL_INT(_kern_ipc, KIPC_MAX_DATALEN, max_datalen, CTLFLAG_RW, 8076866Skris &max_datalen, 0, ""); 8176866SkrisSYSCTL_INT(_kern_ipc, OID_AUTO, mbuf_wait, CTLFLAG_RW, 8276866Skris &mbuf_wait, 0, ""); 8359191SkrisSYSCTL_STRUCT(_kern_ipc, KIPC_MBSTAT, mbstat, CTLFLAG_RW, &mbstat, mbstat, ""); 8459191SkrisSYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mbtypes, CTLFLAG_RD, mbtypes, 8559191Skris sizeof(mbtypes), "LU", ""); 86246772SjkimSYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, 8759191Skris &nmbclusters, 0, "Maximum number of mbuf clusters available"); 8859191SkrisSYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0, 8959191Skris "Maximum number of mbufs available"); 90246772Sjkim#ifndef NMBCLUSTERS 9159191Skris#define NMBCLUSTERS (512 + MAXUSERS * 16) 9259191Skris#endif 9359191SkrisTUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters); 9459191SkrisTUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs); 9559191Skris 9659191Skrisstatic void m_reclaim __P((void)); 9759191Skris 9859191Skris/* "number of clusters of pages" */ 9959191Skris#define NCL_INIT 1 10059191Skris 10159191Skris#define NMB_INIT 16 102215697Ssimon 103215697Ssimon/* ARGSUSED*/ 10459191Skrisstatic void 10559191Skrismbinit(dummy) 10659191Skris void *dummy; 10759191Skris{ 10859191Skris int s; 109215697Ssimon 110215697Ssimon mmbfree = NULL; mclfree = NULL; 111215697Ssimon mbstat.m_msize = MSIZE; 112237657Sjkim mbstat.m_mclbytes = MCLBYTES; 113237657Sjkim mbstat.m_minclsize = MINCLSIZE; 114237657Sjkim mbstat.m_mlen = MLEN; 11559191Skris mbstat.m_mhlen = MHLEN; 11659191Skris 11759191Skris s = splimp(); 11859191Skris if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0) 11959191Skris goto bad; 12059191Skris#if MCLBYTES <= PAGE_SIZE 12159191Skris if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 12259191Skris goto bad; 12359191Skris#else 12459191Skris /* It's OK to call contigmalloc in this context. */ 12559191Skris if (m_clalloc(16, M_WAIT) == 0) 12659191Skris goto bad; 12759191Skris#endif 12859191Skris splx(s); 12959191Skris return; 13059191Skrisbad: 13159191Skris panic("mbinit"); 13276866Skris} 13376866Skris 134127128Snectar/* 135216166Ssimon * Allocate at least nmb mbufs and place on mbuf free list. 136127128Snectar * Must be called at splimp. 13776866Skris */ 13876866Skris/* ARGSUSED */ 13976866Skrisint 14076866Skrism_mballoc(nmb, how) 141127128Snectar register int nmb; 14276866Skris int how; 14376866Skris{ 14476866Skris register caddr_t p; 14576866Skris register int i; 14676866Skris int nbytes; 14776866Skris 14876866Skris /* 14976866Skris * If we've hit the mbuf limit, stop allocating from mb_map, 15076866Skris * (or trying to) in order to avoid dipping into the section of 15176866Skris * mb_map which we've "reserved" for clusters. 15276866Skris */ 15389837Skris if ((nmb + mbstat.m_mbufs) > nmbufs) 15476866Skris return (0); 15589837Skris 15689837Skris /* 157160814Ssimon * Once we run out of map space, it will be impossible to get 158160814Ssimon * any more (nothing is ever freed back to the map) 15989837Skris * -- however you are not dead as m_reclaim might 160109998Smarkm * still be able to free a substantial amount of space. 161109998Smarkm * 162109998Smarkm * XXX Furthermore, we can also work with "recycled" mbufs (when 163109998Smarkm * we're calling with M_WAIT the sleep procedure will be woken 164109998Smarkm * up when an mbuf is freed. See m_mballoc_wait()). 165109998Smarkm */ 166109998Smarkm if (mb_map_full) 167109998Smarkm return (0); 168160814Ssimon 169160814Ssimon nbytes = round_page(nmb * MSIZE); 170109998Smarkm p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT); 171109998Smarkm if (p == 0 && how == M_WAIT) { 172109998Smarkm mbstat.m_wait++; 173109998Smarkm p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK); 174237657Sjkim } 175237657Sjkim 176237657Sjkim /* 177237657Sjkim * Either the map is now full, or `how' is M_NOWAIT and there 178237657Sjkim * are no pages left. 179237657Sjkim */ 180237657Sjkim if (p == NULL) 181237657Sjkim return (0); 182237657Sjkim 183237657Sjkim nmb = nbytes / MSIZE; 184237657Sjkim for (i = 0; i < nmb; i++) { 185237657Sjkim ((struct mbuf *)p)->m_next = mmbfree; 186237657Sjkim mmbfree = (struct mbuf *)p; 18776866Skris p += MSIZE; 18876866Skris } 18959191Skris mbstat.m_mbufs += nmb; 19059191Skris mbtypes[MT_FREE] += nmb; 19159191Skris return (1); 19259191Skris} 19359191Skris 19459191Skris/* 19559191Skris * Once the mb_map has been exhausted and if the call to the allocation macros 196160814Ssimon * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely 197160814Ssimon * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a 19859191Skris * designated (mbuf_wait) time. 19959191Skris */ 20076866Skrisstruct mbuf * 20159191Skrism_mballoc_wait(int caller, int type) 20276866Skris{ 20376866Skris struct mbuf *p; 20476866Skris int s; 20559191Skris 20676866Skris m_mballoc_wid++; 20776866Skris if ((tsleep(&m_mballoc_wid, PVM, "mballc", mbuf_wait)) == EWOULDBLOCK) 20876866Skris m_mballoc_wid--; 20959191Skris 21076866Skris /* 21189837Skris * Now that we (think) that we've got something, we will redo an 21276866Skris * MGET, but avoid getting into another instance of m_mballoc_wait() 21376866Skris * XXX: We retry to fetch _even_ if the sleep timed out. This is left 21476866Skris * this way, purposely, in the [unlikely] case that an mbuf was 21576866Skris * freed but the sleep was not awakened in time. 21659191Skris */ 21776866Skris p = NULL; 21876866Skris switch (caller) { 21976866Skris case MGET_C: 22059191Skris MGET(p, M_DONTWAIT, type); 22159191Skris break; 22259191Skris case MGETHDR_C: 22359191Skris MGETHDR(p, M_DONTWAIT, type); 224111147Snectar break; 225111147Snectar default: 226111147Snectar panic("m_mballoc_wait: invalid caller (%d)", caller); 227111147Snectar } 22859191Skris 229111147Snectar s = splimp(); 230111147Snectar if (p != NULL) { /* We waited and got something... */ 231111147Snectar mbstat.m_wait++; 232111147Snectar /* Wake up another if we have more free. */ 233111147Snectar if (mmbfree != NULL) 234111147Snectar MMBWAKEUP(); 235111147Snectar } 236111147Snectar splx(s); 237111147Snectar return (p); 238111147Snectar} 239111147Snectar 24059191Skris#if MCLBYTES > PAGE_SIZE 241111147Snectarstatic int i_want_my_mcl; 242111147Snectar 243111147Snectarstatic void 244111147Snectarkproc_mclalloc(void) 245111147Snectar{ 246111147Snectar int status; 247111147Snectar 248111147Snectar while (1) { 24979998Skris tsleep(&i_want_my_mcl, PVM, "mclalloc", 0); 25079998Skris 25179998Skris for (; i_want_my_mcl; i_want_my_mcl--) { 25279998Skris if (m_clalloc(1, M_WAIT) == 0) 25379998Skris printf("m_clalloc failed even in process context!\n"); 25459191Skris } 25579998Skris } 25679998Skris} 25779998Skris 25879998Skrisstatic struct proc *mclallocproc; 25979998Skrisstatic struct kproc_desc mclalloc_kp = { 26079998Skris "mclalloc", 26179998Skris kproc_mclalloc, 26279998Skris &mclallocproc 26389837Skris}; 26479998SkrisSYSINIT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start, 26559191Skris &mclalloc_kp); 26679998Skris#endif 26779998Skris 26859191Skris/* 26979998Skris * Allocate some number of mbuf clusters 27079998Skris * and place on cluster free list. 27179998Skris * Must be called at splimp. 27279998Skris */ 27379998Skris/* ARGSUSED */ 27479998Skrisint 27579998Skrism_clalloc(ncl, how) 27679998Skris register int ncl; 27779998Skris int how; 27879998Skris{ 27979998Skris register caddr_t p; 28059191Skris register int i; 28179998Skris int npg; 28279998Skris 28379998Skris /* 28479998Skris * If we've hit the mcluster number limit, stop allocating from 28579998Skris * mb_map, (or trying to) in order to avoid dipping into the section 28679998Skris * of mb_map which we've "reserved" for mbufs. 28779998Skris */ 28879998Skris if ((ncl + mbstat.m_clusters) > nmbclusters) { 28979998Skris mbstat.m_drops++; 29079998Skris return (0); 29179998Skris } 29279998Skris 29376866Skris /* 29476866Skris * Once we run out of map space, it will be impossible 29576866Skris * to get any more (nothing is ever freed back to the 29676866Skris * map). From this point on, we solely rely on freed 29776866Skris * mclusters. 298237657Sjkim */ 29976866Skris if (mb_map_full) { 30076866Skris mbstat.m_drops++; 30176866Skris return (0); 30276866Skris } 30376866Skris 30476866Skris#if MCLBYTES > PAGE_SIZE 30576866Skris if (how != M_WAIT) { 30676866Skris i_want_my_mcl += ncl; 30776866Skris wakeup(&i_want_my_mcl); 30876866Skris mbstat.m_wait++; 30976866Skris p = 0; 31076866Skris } else { 31176866Skris p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul, 31276866Skris ~0ul, PAGE_SIZE, 0, mb_map); 31376866Skris } 31476866Skris#else 31576866Skris npg = ncl; 31676866Skris p = (caddr_t)kmem_malloc(mb_map, ctob(npg), 31776866Skris how != M_WAIT ? M_NOWAIT : M_WAITOK); 31876866Skris ncl = ncl * PAGE_SIZE / MCLBYTES; 31976866Skris#endif 32076866Skris /* 32176866Skris * Either the map is now full, or `how' is M_NOWAIT and there 32276866Skris * are no pages left. 32376866Skris */ 32476866Skris if (p == NULL) { 32576866Skris mbstat.m_drops++; 32676866Skris return (0); 32776866Skris } 32876866Skris 32976866Skris for (i = 0; i < ncl; i++) { 33076866Skris ((union mcluster *)p)->mcl_next = mclfree; 33176866Skris mclfree = (union mcluster *)p; 33276866Skris p += MCLBYTES; 33376866Skris mbstat.m_clfree++; 33476866Skris } 33576866Skris mbstat.m_clusters += ncl; 33676866Skris return (1); 33776866Skris} 33876866Skris 33976866Skris/* 34076866Skris * Once the mb_map submap has been exhausted and the allocation is called with 34176866Skris * M_WAIT, we rely on the mclfree union pointers. If nothing is free, we will 34276866Skris * sleep for a designated amount of time (mbuf_wait) or until we're woken up 34376866Skris * due to sudden mcluster availability. 34476866Skris */ 34576866Skriscaddr_t 34676866Skrism_clalloc_wait(void) 34776866Skris{ 34876866Skris caddr_t p; 34976866Skris int s; 35076866Skris 35176866Skris#ifdef __i386__ 35276866Skris /* If in interrupt context, and INVARIANTS, maintain sanity and die. */ 35376866Skris KASSERT(intr_nesting_level == 0, ("CLALLOC: CANNOT WAIT IN INTERRUPT")); 35476866Skris#endif 35576866Skris 35676866Skris /* Sleep until something's available or until we expire. */ 35776866Skris m_clalloc_wid++; 35876866Skris if ((tsleep(&m_clalloc_wid, PVM, "mclalc", mbuf_wait)) == EWOULDBLOCK) 35976866Skris m_clalloc_wid--; 36076866Skris 36176866Skris /* 36276866Skris * Now that we (think) that we've got something, we will redo and 36376866Skris * MGET, but avoid getting into another instance of m_clalloc_wait() 36476866Skris */ 36576866Skris p = NULL; 36676866Skris MCLALLOC(p, M_DONTWAIT); 36789837Skris 36876866Skris s = splimp(); 36976866Skris if (p != NULL) { /* We waited and got something... */ 37076866Skris mbstat.m_wait++; 37176866Skris /* Wake up another if we have more free. */ 37276866Skris if (mclfree != NULL) 37376866Skris MCLWAKEUP(); 37476866Skris } 37576866Skris 37676866Skris splx(s); 37776866Skris return (p); 37876866Skris} 37976866Skris 38076866Skris/* 38176866Skris * When MGET fails, ask protocols to free space when short of memory, 38276866Skris * then re-attempt to allocate an mbuf. 38376866Skris */ 38476866Skrisstruct mbuf * 38576866Skrism_retry(i, t) 38689837Skris int i, t; 38789837Skris{ 38889837Skris register struct mbuf *m; 38989837Skris 39089837Skris /* 39189837Skris * Must only do the reclaim if not in an interrupt context. 39289837Skris */ 39389837Skris if (i == M_WAIT) { 39489837Skris#ifdef __i386__ 39589837Skris KASSERT(intr_nesting_level == 0, 39689837Skris ("MBALLOC: CANNOT WAIT IN INTERRUPT")); 39789837Skris#endif 39889837Skris m_reclaim(); 39989837Skris } 40089837Skris 40189837Skris /* 40289837Skris * Both m_mballoc_wait and m_retry must be nulled because 40389837Skris * when the MGET macro is run from here, we deffinately do _not_ 40489837Skris * want to enter an instance of m_mballoc_wait() or m_retry() (again!) 405109998Smarkm */ 40689837Skris#define m_mballoc_wait(caller,type) (struct mbuf *)0 407109998Smarkm#define m_retry(i, t) (struct mbuf *)0 408109998Smarkm MGET(m, i, t); 409109998Smarkm#undef m_retry 410109998Smarkm#undef m_mballoc_wait 411109998Smarkm 412109998Smarkm if (m != NULL) 413160814Ssimon mbstat.m_wait++; 414160814Ssimon else 415160814Ssimon mbstat.m_drops++; 416160814Ssimon 417160814Ssimon return (m); 418160814Ssimon} 419160814Ssimon 420160814Ssimon/* 421160814Ssimon * As above; retry an MGETHDR. 422160814Ssimon */ 423160814Ssimonstruct mbuf * 424160814Ssimonm_retryhdr(i, t) 425160814Ssimon int i, t; 426194206Ssimon{ 427160814Ssimon register struct mbuf *m; 428160814Ssimon 429194206Ssimon /* 430160814Ssimon * Must only do the reclaim if not in an interrupt context. 431160814Ssimon */ 432160814Ssimon if (i == M_WAIT) { 433160814Ssimon#ifdef __i386__ 434160814Ssimon KASSERT(intr_nesting_level == 0, 435160814Ssimon ("MBALLOC: CANNOT WAIT IN INTERRUPT")); 436160814Ssimon#endif 437160814Ssimon m_reclaim(); 438160814Ssimon } 439160814Ssimon 440160814Ssimon#define m_mballoc_wait(caller,type) (struct mbuf *)0 441160814Ssimon#define m_retryhdr(i, t) (struct mbuf *)0 442160814Ssimon MGETHDR(m, i, t); 443160814Ssimon#undef m_retryhdr 444160814Ssimon#undef m_mballoc_wait 445160814Ssimon 446160814Ssimon if (m != NULL) 447160814Ssimon mbstat.m_wait++; 448160814Ssimon else 449160814Ssimon mbstat.m_drops++; 450194206Ssimon 451194206Ssimon return (m); 452194206Ssimon} 453194206Ssimon 454194206Ssimonstatic void 455194206Ssimonm_reclaim() 456194206Ssimon{ 457194206Ssimon register struct domain *dp; 458194206Ssimon register struct protosw *pr; 459194206Ssimon int s = splimp(); 460194206Ssimon 461194206Ssimon for (dp = domains; dp; dp = dp->dom_next) 462194206Ssimon for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 463194206Ssimon if (pr->pr_drain) 464194206Ssimon (*pr->pr_drain)(); 465194206Ssimon splx(s); 466194206Ssimon mbstat.m_drain++; 467194206Ssimon} 468194206Ssimon 469194206Ssimon/* 470194206Ssimon * Space allocation routines. 471194206Ssimon * These are also available as macros 472194206Ssimon * for critical paths. 473194206Ssimon */ 474194206Ssimonstruct mbuf * 475194206Ssimonm_get(how, type) 476194206Ssimon int how, type; 477194206Ssimon{ 478194206Ssimon register struct mbuf *m; 479194206Ssimon 480216166Ssimon MGET(m, how, type); 481194206Ssimon return (m); 482194206Ssimon} 48376866Skris 48476866Skrisstruct mbuf * 48559191Skrism_gethdr(how, type) 48659191Skris int how, type; 48759191Skris{ 48859191Skris register struct mbuf *m; 48959191Skris 49059191Skris MGETHDR(m, how, type); 49159191Skris return (m); 49259191Skris} 49359191Skris 49459191Skrisstruct mbuf * 49559191Skrism_getclr(how, type) 49659191Skris int how, type; 49759191Skris{ 49859191Skris register struct mbuf *m; 49959191Skris 50059191Skris MGET(m, how, type); 50159191Skris if (m == 0) 50259191Skris return (0); 50359191Skris bzero(mtod(m, caddr_t), MLEN); 50459191Skris return (m); 50559191Skris} 50659191Skris 50759191Skrisstruct mbuf * 50859191Skrism_free(m) 50976866Skris struct mbuf *m; 51059191Skris{ 51176866Skris register struct mbuf *n; 51276866Skris 51359191Skris MFREE(m, n); 51459191Skris return (n); 51576866Skris} 51659191Skris 51776866Skrisvoid 51876866Skrism_freem(m) 51976866Skris register struct mbuf *m; 52059191Skris{ 52176866Skris register struct mbuf *n; 522216166Ssimon 52376866Skris if (m == NULL) 52476866Skris return; 52576866Skris do { 526109998Smarkm /* 527109998Smarkm * we do need to check non-first mbuf, since some of existing 528109998Smarkm * code does not call M_PREPEND properly. 529109998Smarkm * (example: call to bpf_mtap from drivers) 530109998Smarkm */ 531109998Smarkm if ((m->m_flags & M_PKTHDR) != 0 && m->m_pkthdr.aux) { 532109998Smarkm m_freem(m->m_pkthdr.aux); 533216166Ssimon m->m_pkthdr.aux = NULL; 534109998Smarkm } 535109998Smarkm MFREE(m, n); 536109998Smarkm m = n; 53789837Skris } while (m); 53876866Skris} 53989837Skris 54076866Skris/* 54176866Skris * Mbuffer utility routines. 54276866Skris */ 54376866Skris 54476866Skris/* 54576866Skris * Lesser-used path for M_PREPEND: 54676866Skris * allocate new mbuf to prepend to chain, 54776866Skris * copy junk along. 54876866Skris */ 54976866Skrisstruct mbuf * 55076866Skrism_prepend(m, len, how) 55176866Skris register struct mbuf *m; 55276866Skris int len, how; 55376866Skris{ 55476866Skris struct mbuf *mn; 55576866Skris 55676866Skris MGET(mn, how, m->m_type); 55776866Skris if (mn == (struct mbuf *)NULL) { 55876866Skris m_freem(m); 55976866Skris return ((struct mbuf *)NULL); 56076866Skris } 56176866Skris if (m->m_flags & M_PKTHDR) { 562160814Ssimon M_COPY_PKTHDR(mn, m); 56376866Skris m->m_flags &= ~M_PKTHDR; 56476866Skris } 56576866Skris mn->m_next = m; 56676866Skris m = mn; 56776866Skris if (len < MHLEN) 56876866Skris MH_ALIGN(m, len); 56976866Skris m->m_len = len; 57076866Skris return (m); 57176866Skris} 572160814Ssimon 573160814Ssimon/* 574160814Ssimon * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 57576866Skris * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 576160814Ssimon * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 57776866Skris * Note that the copy is read-only, because clusters are not copied, 57876866Skris * only their reference counts are incremented. 57976866Skris */ 58076866Skris#define MCFail (mbstat.m_mcfail) 58176866Skris 58276866Skrisstruct mbuf * 58376866Skrism_copym(m, off0, len, wait) 58476866Skris register struct mbuf *m; 58576866Skris int off0, wait; 58676866Skris register int len; 58776866Skris{ 58876866Skris register struct mbuf *n, **np; 58976866Skris register int off = off0; 59076866Skris struct mbuf *top; 59176866Skris int copyhdr = 0; 59276866Skris 59376866Skris KASSERT(off >= 0, ("m_copym, negative off %d", off)); 59476866Skris KASSERT(len >= 0, ("m_copym, negative len %d", len)); 59576866Skris if (off == 0 && m->m_flags & M_PKTHDR) 59676866Skris copyhdr = 1; 59776866Skris while (off > 0) { 59876866Skris KASSERT(m != NULL, ("m_copym, offset > size of mbuf chain")); 599111147Snectar if (off < m->m_len) 600111147Snectar break; 601111147Snectar off -= m->m_len; 602111147Snectar m = m->m_next; 603111147Snectar } 604111147Snectar np = ⊤ 605111147Snectar top = 0; 60676866Skris while (len > 0) { 60776866Skris if (m == 0) { 608100936Snectar KASSERT(len == M_COPYALL, 609100936Snectar ("m_copym, length > size of mbuf chain")); 610100936Snectar break; 611100936Snectar } 612100936Snectar MGET(n, wait, m->m_type); 613100936Snectar *np = n; 614100936Snectar if (n == 0) 615100936Snectar goto nospace; 616100936Snectar if (copyhdr) { 617100936Snectar M_COPY_PKTHDR(n, m); 618100936Snectar if (len == M_COPYALL) 619100936Snectar n->m_pkthdr.len -= off0; 620100936Snectar else 621100936Snectar n->m_pkthdr.len = len; 622100936Snectar copyhdr = 0; 623100936Snectar } 624100936Snectar n->m_len = min(len, m->m_len - off); 625100936Snectar if (m->m_flags & M_EXT) { 626100936Snectar n->m_data = m->m_data + off; 627100936Snectar if(!m->m_ext.ext_ref) 628100936Snectar mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 629100936Snectar else 630100936Snectar (*(m->m_ext.ext_ref))(m->m_ext.ext_buf, 631100936Snectar m->m_ext.ext_size); 632100936Snectar n->m_ext = m->m_ext; 633100936Snectar n->m_flags |= M_EXT; 634100936Snectar } else 635100936Snectar bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 636100936Snectar (unsigned)n->m_len); 637100936Snectar if (len != M_COPYALL) 638100936Snectar len -= n->m_len; 639100936Snectar off = 0; 640100936Snectar m = m->m_next; 641109998Smarkm np = &n->m_next; 642109998Smarkm } 643109998Smarkm if (top == 0) 644109998Smarkm MCFail++; 645109998Smarkm return (top); 646109998Smarkmnospace: 647109998Smarkm m_freem(top); 648109998Smarkm MCFail++; 649109998Smarkm return (0); 650109998Smarkm} 651109998Smarkm 652109998Smarkm/* 653100936Snectar * Copy an entire packet, including header (which must be present). 654100936Snectar * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'. 655100936Snectar * Note that the copy is read-only, because clusters are not copied, 656100936Snectar * only their reference counts are incremented. 657100936Snectar */ 658100936Snectarstruct mbuf * 659100936Snectarm_copypacket(m, how) 660100936Snectar struct mbuf *m; 661100936Snectar int how; 662100936Snectar{ 663100936Snectar struct mbuf *top, *n, *o; 664100936Snectar 665100936Snectar MGET(n, how, m->m_type); 666109998Smarkm top = n; 667109998Smarkm if (!n) 668109998Smarkm goto nospace; 669109998Smarkm 670109998Smarkm M_COPY_PKTHDR(n, m); 671109998Smarkm n->m_len = m->m_len; 672109998Smarkm if (m->m_flags & M_EXT) { 673109998Smarkm n->m_data = m->m_data; 674109998Smarkm if(!m->m_ext.ext_ref) 675109998Smarkm mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 676109998Smarkm else 677109998Smarkm (*(m->m_ext.ext_ref))(m->m_ext.ext_buf, 678109998Smarkm m->m_ext.ext_size); 679109998Smarkm n->m_ext = m->m_ext; 680109998Smarkm n->m_flags |= M_EXT; 681109998Smarkm } else { 682109998Smarkm bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 683109998Smarkm } 684109998Smarkm 685109998Smarkm m = m->m_next; 686109998Smarkm while (m) { 687109998Smarkm MGET(o, how, m->m_type); 688109998Smarkm if (!o) 689111147Snectar goto nospace; 690111147Snectar 691109998Smarkm n->m_next = o; 692111147Snectar n = n->m_next; 693111147Snectar 694111147Snectar n->m_len = m->m_len; 695111147Snectar if (m->m_flags & M_EXT) { 696109998Smarkm n->m_data = m->m_data; 697160814Ssimon if(!m->m_ext.ext_ref) 698160814Ssimon mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 699160814Ssimon else 700160814Ssimon (*(m->m_ext.ext_ref))(m->m_ext.ext_buf, 701160814Ssimon m->m_ext.ext_size); 702160814Ssimon n->m_ext = m->m_ext; 703160814Ssimon n->m_flags |= M_EXT; 704160814Ssimon } else { 705160814Ssimon bcopy(mtod(m, char *), mtod(n, char *), n->m_len); 706160814Ssimon } 707160814Ssimon 708160814Ssimon m = m->m_next; 709160814Ssimon } 710160814Ssimon return top; 711160814Ssimonnospace: 712160814Ssimon m_freem(top); 713160814Ssimon MCFail++; 714160814Ssimon return 0; 715160814Ssimon} 716160814Ssimon 717160814Ssimon/* 718160814Ssimon * Copy data from an mbuf chain starting "off" bytes from the beginning, 719160814Ssimon * continuing for "len" bytes, into the indicated buffer. 720160814Ssimon */ 721160814Ssimonvoid 722160814Ssimonm_copydata(m, off, len, cp) 723160814Ssimon register struct mbuf *m; 724160814Ssimon register int off; 725160814Ssimon register int len; 726216166Ssimon caddr_t cp; 727216166Ssimon{ 728216166Ssimon register unsigned count; 729216166Ssimon 730216166Ssimon KASSERT(off >= 0, ("m_copydata, negative off %d", off)); 731216166Ssimon KASSERT(len >= 0, ("m_copydata, negative len %d", len)); 732216166Ssimon while (off > 0) { 733216166Ssimon KASSERT(m != NULL, ("m_copydata, offset > size of mbuf chain")); 734216166Ssimon if (off < m->m_len) 735216166Ssimon break; 736216166Ssimon off -= m->m_len; 737216166Ssimon m = m->m_next; 738216166Ssimon } 739216166Ssimon while (len > 0) { 740216166Ssimon KASSERT(m != NULL, ("m_copydata, length > size of mbuf chain")); 741216166Ssimon count = min(m->m_len - off, len); 742216166Ssimon bcopy(mtod(m, caddr_t) + off, cp, count); 743216166Ssimon len -= count; 744216166Ssimon cp += count; 745216166Ssimon off = 0; 746216166Ssimon m = m->m_next; 747216166Ssimon } 748216166Ssimon} 749216166Ssimon 750216166Ssimon/* 751216166Ssimon * Copy a packet header mbuf chain into a completely new chain, including 752216166Ssimon * copying any mbuf clusters. Use this instead of m_copypacket() when 753216166Ssimon * you need a writable copy of an mbuf chain. 754216166Ssimon */ 755216166Ssimonstruct mbuf * 756216166Ssimonm_dup(m, how) 757216166Ssimon struct mbuf *m; 758216166Ssimon int how; 759216166Ssimon{ 760216166Ssimon struct mbuf **p, *top = NULL; 761216166Ssimon int remain, moff, nsize; 762216166Ssimon 763216166Ssimon /* Sanity check */ 764216166Ssimon if (m == NULL) 765216166Ssimon return (0); 766279264Sdelphij KASSERT((m->m_flags & M_PKTHDR) != 0, ("%s: !PKTHDR", __FUNCTION__)); 767279264Sdelphij 768279264Sdelphij /* While there's more data, get a new mbuf, tack it on, and fill it */ 76976866Skris remain = m->m_pkthdr.len; 77076866Skris moff = 0; 77176866Skris p = ⊤ 77276866Skris while (remain > 0 || top == NULL) { /* allow m->m_pkthdr.len == 0 */ 77376866Skris struct mbuf *n; 77476866Skris 77576866Skris /* Get the next new mbuf */ 77676866Skris MGET(n, how, m->m_type); 77776866Skris if (n == NULL) 77876866Skris goto nospace; 77976866Skris if (top == NULL) { /* first one, must be PKTHDR */ 780162911Ssimon M_COPY_PKTHDR(n, m); 781215697Ssimon nsize = MHLEN; 782215697Ssimon } else /* not the first one */ 783215697Ssimon nsize = MLEN; 784215697Ssimon if (remain >= MINCLSIZE) { 78576866Skris MCLGET(n, how); 78659191Skris if ((n->m_flags & M_EXT) == 0) { 78759191Skris (void)m_free(n); 78889837Skris goto nospace; 78989837Skris } 79089837Skris nsize = MCLBYTES; 79189837Skris } 79259191Skris n->m_len = 0; 79389837Skris 79489837Skris /* Link it into the new chain */ 79589837Skris *p = n; 79689837Skris p = &n->m_next; 79789837Skris 79859191Skris /* Copy data from original mbuf(s) into new mbuf */ 79989837Skris while (n->m_len < nsize && m != NULL) { 80089837Skris int chunk = min(nsize - n->m_len, m->m_len - moff); 80189837Skris 80289837Skris bcopy(m->m_data + moff, n->m_data + n->m_len, chunk); 80389837Skris moff += chunk; 804160814Ssimon n->m_len += chunk; 805160814Ssimon remain -= chunk; 806160814Ssimon if (moff == m->m_len) { 807160814Ssimon m = m->m_next; 80889837Skris moff = 0; 80989837Skris } 81089837Skris } 81189837Skris 81289837Skris /* Check correct total mbuf length */ 81389837Skris KASSERT((remain > 0 && m != NULL) || (remain == 0 && m == NULL), 81489837Skris ("%s: bogus m_pkthdr.len", __FUNCTION__)); 81589837Skris } 81689837Skris return (top); 81789837Skris 81889837Skrisnospace: 81989837Skris m_freem(top); 82089837Skris MCFail++; 82189837Skris return (0); 82289837Skris} 82389837Skris 82489837Skris/* 82589837Skris * Concatenate mbuf chain n to m. 826160814Ssimon * Both chains must be of the same type (e.g. MT_DATA). 827160814Ssimon * Any m_pkthdr is not updated. 828160814Ssimon */ 829160814Ssimonvoid 830160814Ssimonm_cat(m, n) 831160814Ssimon register struct mbuf *m, *n; 832160814Ssimon{ 833160814Ssimon while (m->m_next) 834160814Ssimon m = m->m_next; 835160814Ssimon while (n) { 836160814Ssimon if (m->m_flags & M_EXT || 837160814Ssimon m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 838160814Ssimon /* just join the two chains */ 839160814Ssimon m->m_next = n; 84089837Skris return; 84168651Skris } 84268651Skris /* splat the data from one into the other */ 84368651Skris bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 844160814Ssimon (u_int)n->m_len); 845160814Ssimon m->m_len += n->m_len; 84668651Skris n = m_free(n); 84768651Skris } 848160814Ssimon} 849160814Ssimon 85068651Skrisvoid 851160814Ssimonm_adj(mp, req_len) 852160814Ssimon struct mbuf *mp; 853160814Ssimon int req_len; 854160814Ssimon{ 85568651Skris register int len = req_len; 85668651Skris register struct mbuf *m; 85768651Skris register int count; 85868651Skris 85968651Skris if ((m = mp) == NULL) 86068651Skris return; 861160814Ssimon if (len >= 0) { 862160814Ssimon /* 863160814Ssimon * Trim from head. 86468651Skris */ 86568651Skris while (m != NULL && len > 0) { 86668651Skris if (m->m_len <= len) { 86768651Skris len -= m->m_len; 86868651Skris m->m_len = 0; 86968651Skris m = m->m_next; 87068651Skris } else { 87168651Skris m->m_len -= len; 87268651Skris m->m_data += len; 87368651Skris len = 0; 87468651Skris } 87568651Skris } 87668651Skris m = mp; 87768651Skris if (mp->m_flags & M_PKTHDR) 878142425Snectar m->m_pkthdr.len -= (req_len - len); 879142425Snectar } else { 880142425Snectar /* 881142425Snectar * Trim from tail. Scan the mbuf chain, 882142425Snectar * calculating its length and finding the last mbuf. 883142425Snectar * If the adjustment only affects this mbuf, then just 884142425Snectar * adjust and return. Otherwise, rescan and truncate 885142425Snectar * after the remaining size. 886142425Snectar */ 887142425Snectar len = -len; 888142425Snectar count = 0; 889142425Snectar for (;;) { 890142425Snectar count += m->m_len; 891142425Snectar if (m->m_next == (struct mbuf *)0) 89268651Skris break; 89368651Skris m = m->m_next; 89468651Skris } 89568651Skris if (m->m_len >= len) { 89668651Skris m->m_len -= len; 89768651Skris if (mp->m_flags & M_PKTHDR) 89868651Skris mp->m_pkthdr.len -= len; 89968651Skris return; 90068651Skris } 90168651Skris count -= len; 90268651Skris if (count < 0) 90359191Skris count = 0; 90459191Skris /* 90568651Skris * Correct length for chain is "count". 90668651Skris * Find the mbuf with last data, adjust its length, 90759191Skris * and toss data from remaining mbufs on chain. 90868651Skris */ 90968651Skris m = mp; 91068651Skris if (m->m_flags & M_PKTHDR) 91168651Skris m->m_pkthdr.len = count; 91259191Skris for (; m; m = m->m_next) { 91359191Skris if (m->m_len >= count) { 91459191Skris m->m_len = count; 91559191Skris break; 91659191Skris } 91759191Skris count -= m->m_len; 91859191Skris } 91959191Skris while (m->m_next) 92059191Skris (m = m->m_next) ->m_len = 0; 92159191Skris } 92259191Skris} 92359191Skris 92459191Skris/* 925194206Ssimon * Rearange an mbuf chain so that len bytes are contiguous 926194206Ssimon * and in the data area of an mbuf (so that mtod and dtom 927194206Ssimon * will work for a structure of size len). Returns the resulting 928194206Ssimon * mbuf chain on success, frees it and returns null on failure. 929194206Ssimon * If there is room, it will add up to max_protohdr-len extra bytes to the 93059191Skris * contiguous region in an attempt to avoid being called next time. 93159191Skris */ 93259191Skris#define MPFail (mbstat.m_mpfail) 93389837Skris 93489837Skrisstruct mbuf * 93589837Skrism_pullup(n, len) 93689837Skris register struct mbuf *n; 93789837Skris int len; 93889837Skris{ 93959191Skris register struct mbuf *m; 94089837Skris register int count; 94189837Skris int space; 94289837Skris 94368651Skris /* 944120631Snectar * If first mbuf has no cluster, and has room for len bytes 94576866Skris * without shifting current data, pullup into it, 94668651Skris * otherwise allocate a new mbuf to prepend to the chain. 94776866Skris */ 94868651Skris if ((n->m_flags & M_EXT) == 0 && 94976866Skris n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 95076866Skris if (n->m_len >= len) 95176866Skris return (n); 95276866Skris m = n; 95376866Skris n = n->m_next; 95476866Skris len -= m->m_len; 95576866Skris } else { 95676866Skris if (len > MHLEN) 95768651Skris goto bad; 95868651Skris MGET(m, M_DONTWAIT, n->m_type); 95979998Skris if (m == 0) 96079998Skris goto bad; 96179998Skris m->m_len = 0; 96289837Skris if (n->m_flags & M_PKTHDR) { 96379998Skris M_COPY_PKTHDR(m, n); 96479998Skris n->m_flags &= ~M_PKTHDR; 96579998Skris } 966109998Smarkm } 967109998Smarkm space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 968109998Smarkm do { 969109998Smarkm count = min(min(max(len, max_protohdr), space), n->m_len); 970109998Smarkm bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 971109998Smarkm (unsigned)count); 972109998Smarkm len -= count; 973109998Smarkm m->m_len += count; 974142425Snectar n->m_len -= count; 975142425Snectar space -= count; 976142425Snectar if (n->m_len) 977142425Snectar n->m_data += count; 978142425Snectar else 979142425Snectar n = m_free(n); 980142425Snectar } while (len > 0 && n); 981160814Ssimon if (len > 0) { 982142425Snectar (void) m_free(m); 983160814Ssimon goto bad; 984142425Snectar } 985160814Ssimon m->m_next = n; 986160814Ssimon return (m); 987160814Ssimonbad: 988160814Ssimon m_freem(n); 989160814Ssimon MPFail++; 990160814Ssimon return (0); 991160814Ssimon} 992160814Ssimon 993160814Ssimon/* 994160814Ssimon * Partition an mbuf chain in two pieces, returning the tail -- 995160814Ssimon * all but the first len0 bytes. In case of failure, it returns NULL and 996160814Ssimon * attempts to restore the chain to its original state. 997194206Ssimon */ 998194206Ssimonstruct mbuf * 999194206Ssimonm_split(m0, len0, wait) 1000194206Ssimon register struct mbuf *m0; 1001194206Ssimon int len0, wait; 1002194206Ssimon{ 1003194206Ssimon register struct mbuf *m, *n; 1004194206Ssimon unsigned len = len0, remain; 1005194206Ssimon 1006194206Ssimon for (m = m0; m && len > m->m_len; m = m->m_next) 1007194206Ssimon len -= m->m_len; 1008194206Ssimon if (m == 0) 1009194206Ssimon return (0); 1010194206Ssimon remain = m->m_len - len; 1011194206Ssimon if (m0->m_flags & M_PKTHDR) { 1012194206Ssimon MGETHDR(n, wait, m0->m_type); 1013194206Ssimon if (n == 0) 1014194206Ssimon return (0); 1015194206Ssimon n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 1016194206Ssimon n->m_pkthdr.len = m0->m_pkthdr.len - len0; 1017194206Ssimon m0->m_pkthdr.len = len0; 1018194206Ssimon if (m->m_flags & M_EXT) 1019194206Ssimon goto extpacket; 1020194206Ssimon if (remain > MHLEN) { 1021194206Ssimon /* m can't be the lead packet */ 1022194206Ssimon MH_ALIGN(n, 0); 1023194206Ssimon n->m_next = m_split(m, len, wait); 1024194206Ssimon if (n->m_next == 0) { 1025194206Ssimon (void) m_free(n); 1026194206Ssimon return (0); 1027194206Ssimon } else 1028215697Ssimon return (n); 1029215697Ssimon } else 1030215697Ssimon MH_ALIGN(n, remain); 1031215697Ssimon } else if (remain == 0) { 1032215697Ssimon n = m->m_next; 1033215697Ssimon m->m_next = 0; 1034215697Ssimon return (n); 1035215697Ssimon } else { 1036215697Ssimon MGET(n, wait, m->m_type); 1037215697Ssimon if (n == 0) 1038215697Ssimon return (0); 103976866Skris M_ALIGN(n, remain); 1040 } 1041extpacket: 1042 if (m->m_flags & M_EXT) { 1043 n->m_flags |= M_EXT; 1044 n->m_ext = m->m_ext; 1045 if(!m->m_ext.ext_ref) 1046 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 1047 else 1048 (*(m->m_ext.ext_ref))(m->m_ext.ext_buf, 1049 m->m_ext.ext_size); 1050 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 1051 n->m_data = m->m_data + len; 1052 } else { 1053 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 1054 } 1055 n->m_len = remain; 1056 m->m_len = len; 1057 n->m_next = m->m_next; 1058 m->m_next = 0; 1059 return (n); 1060} 1061/* 1062 * Routine to copy from device local memory into mbufs. 1063 */ 1064struct mbuf * 1065m_devget(buf, totlen, off0, ifp, copy) 1066 char *buf; 1067 int totlen, off0; 1068 struct ifnet *ifp; 1069 void (*copy) __P((char *from, caddr_t to, u_int len)); 1070{ 1071 register struct mbuf *m; 1072 struct mbuf *top = 0, **mp = ⊤ 1073 register int off = off0, len; 1074 register char *cp; 1075 char *epkt; 1076 1077 cp = buf; 1078 epkt = cp + totlen; 1079 if (off) { 1080 cp += off + 2 * sizeof(u_short); 1081 totlen -= 2 * sizeof(u_short); 1082 } 1083 MGETHDR(m, M_DONTWAIT, MT_DATA); 1084 if (m == 0) 1085 return (0); 1086 m->m_pkthdr.rcvif = ifp; 1087 m->m_pkthdr.len = totlen; 1088 m->m_len = MHLEN; 1089 1090 while (totlen > 0) { 1091 if (top) { 1092 MGET(m, M_DONTWAIT, MT_DATA); 1093 if (m == 0) { 1094 m_freem(top); 1095 return (0); 1096 } 1097 m->m_len = MLEN; 1098 } 1099 len = min(totlen, epkt - cp); 1100 if (len >= MINCLSIZE) { 1101 MCLGET(m, M_DONTWAIT); 1102 if (m->m_flags & M_EXT) 1103 m->m_len = len = min(len, MCLBYTES); 1104 else 1105 len = m->m_len; 1106 } else { 1107 /* 1108 * Place initial small packet/header at end of mbuf. 1109 */ 1110 if (len < m->m_len) { 1111 if (top == 0 && len + max_linkhdr <= m->m_len) 1112 m->m_data += max_linkhdr; 1113 m->m_len = len; 1114 } else 1115 len = m->m_len; 1116 } 1117 if (copy) 1118 copy(cp, mtod(m, caddr_t), (unsigned)len); 1119 else 1120 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 1121 cp += len; 1122 *mp = m; 1123 mp = &m->m_next; 1124 totlen -= len; 1125 if (cp == epkt) 1126 cp = buf; 1127 } 1128 return (top); 1129} 1130 1131/* 1132 * Copy data from a buffer back into the indicated mbuf chain, 1133 * starting "off" bytes from the beginning, extending the mbuf 1134 * chain if necessary. 1135 */ 1136void 1137m_copyback(m0, off, len, cp) 1138 struct mbuf *m0; 1139 register int off; 1140 register int len; 1141 caddr_t cp; 1142{ 1143 register int mlen; 1144 register struct mbuf *m = m0, *n; 1145 int totlen = 0; 1146 1147 if (m0 == 0) 1148 return; 1149 while (off > (mlen = m->m_len)) { 1150 off -= mlen; 1151 totlen += mlen; 1152 if (m->m_next == 0) { 1153 n = m_getclr(M_DONTWAIT, m->m_type); 1154 if (n == 0) 1155 goto out; 1156 n->m_len = min(MLEN, len + off); 1157 m->m_next = n; 1158 } 1159 m = m->m_next; 1160 } 1161 while (len > 0) { 1162 mlen = min (m->m_len - off, len); 1163 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 1164 cp += mlen; 1165 len -= mlen; 1166 mlen += off; 1167 off = 0; 1168 totlen += mlen; 1169 if (len == 0) 1170 break; 1171 if (m->m_next == 0) { 1172 n = m_get(M_DONTWAIT, m->m_type); 1173 if (n == 0) 1174 break; 1175 n->m_len = min(MLEN, len); 1176 m->m_next = n; 1177 } 1178 m = m->m_next; 1179 } 1180out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 1181 m->m_pkthdr.len = totlen; 1182} 1183 1184void 1185m_print(const struct mbuf *m) 1186{ 1187 int len; 1188 const struct mbuf *m2; 1189 1190 len = m->m_pkthdr.len; 1191 m2 = m; 1192 while (len) { 1193 printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-"); 1194 len -= m2->m_len; 1195 m2 = m2->m_next; 1196 } 1197 return; 1198} 1199