1/* 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 34 */ 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/proc.h> 39#include <sys/malloc.h> 40#include <sys/map.h> 41#define MBTYPES 42#include <sys/mbuf.h> 43#include <sys/kernel.h> 44#include <sys/syslog.h> 45#include <sys/domain.h> 46#include <sys/protosw.h> 47 48#include <vm/vm.h> 49
| 1/* 2 * Copyright (c) 1982, 1986, 1988, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 34 */ 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/proc.h> 39#include <sys/malloc.h> 40#include <sys/map.h> 41#define MBTYPES 42#include <sys/mbuf.h> 43#include <sys/kernel.h> 44#include <sys/syslog.h> 45#include <sys/domain.h> 46#include <sys/protosw.h> 47 48#include <vm/vm.h> 49
|
| 50void m_reclaim __P(()); 51
|
50extern vm_map_t mb_map; 51struct mbuf *mbutl; 52char *mclrefcnt; 53
| 52extern vm_map_t mb_map; 53struct mbuf *mbutl; 54char *mclrefcnt; 55
|
| 56void
|
54mbinit() 55{ 56 int s; 57 58#if CLBYTES < 4096 59#define NCL_INIT (4096/CLBYTES) 60#else 61#define NCL_INIT 1 62#endif 63 s = splimp(); 64 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 65 goto bad; 66 splx(s); 67 return; 68bad: 69 panic("mbinit"); 70} 71 72/* 73 * Allocate some number of mbuf clusters 74 * and place on cluster free list. 75 * Must be called at splimp. 76 */ 77/* ARGSUSED */
| 57mbinit() 58{ 59 int s; 60 61#if CLBYTES < 4096 62#define NCL_INIT (4096/CLBYTES) 63#else 64#define NCL_INIT 1 65#endif 66 s = splimp(); 67 if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) 68 goto bad; 69 splx(s); 70 return; 71bad: 72 panic("mbinit"); 73} 74 75/* 76 * Allocate some number of mbuf clusters 77 * and place on cluster free list. 78 * Must be called at splimp. 79 */ 80/* ARGSUSED */
|
| 81int
|
78m_clalloc(ncl, nowait) 79 register int ncl; 80 int nowait; 81{ 82 static int logged; 83 register caddr_t p; 84 register int i; 85 int npg; 86 87 npg = ncl * CLSIZE; 88 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), !nowait); 89 if (p == NULL) { 90 if (logged == 0) { 91 logged++; 92 log(LOG_ERR, "mb_map full\n"); 93 } 94 return (0); 95 } 96 ncl = ncl * CLBYTES / MCLBYTES; 97 for (i = 0; i < ncl; i++) { 98 ((union mcluster *)p)->mcl_next = mclfree; 99 mclfree = (union mcluster *)p; 100 p += MCLBYTES; 101 mbstat.m_clfree++; 102 } 103 mbstat.m_clusters += ncl; 104 return (1); 105} 106 107/* 108 * When MGET failes, ask protocols to free space when short of memory, 109 * then re-attempt to allocate an mbuf. 110 */ 111struct mbuf * 112m_retry(i, t) 113 int i, t; 114{ 115 register struct mbuf *m; 116 117 m_reclaim(); 118#define m_retry(i, t) (struct mbuf *)0 119 MGET(m, i, t); 120#undef m_retry 121 return (m); 122} 123 124/* 125 * As above; retry an MGETHDR. 126 */ 127struct mbuf * 128m_retryhdr(i, t) 129 int i, t; 130{ 131 register struct mbuf *m; 132 133 m_reclaim(); 134#define m_retryhdr(i, t) (struct mbuf *)0 135 MGETHDR(m, i, t); 136#undef m_retryhdr 137 return (m); 138} 139
| 82m_clalloc(ncl, nowait) 83 register int ncl; 84 int nowait; 85{ 86 static int logged; 87 register caddr_t p; 88 register int i; 89 int npg; 90 91 npg = ncl * CLSIZE; 92 p = (caddr_t)kmem_malloc(mb_map, ctob(npg), !nowait); 93 if (p == NULL) { 94 if (logged == 0) { 95 logged++; 96 log(LOG_ERR, "mb_map full\n"); 97 } 98 return (0); 99 } 100 ncl = ncl * CLBYTES / MCLBYTES; 101 for (i = 0; i < ncl; i++) { 102 ((union mcluster *)p)->mcl_next = mclfree; 103 mclfree = (union mcluster *)p; 104 p += MCLBYTES; 105 mbstat.m_clfree++; 106 } 107 mbstat.m_clusters += ncl; 108 return (1); 109} 110 111/* 112 * When MGET failes, ask protocols to free space when short of memory, 113 * then re-attempt to allocate an mbuf. 114 */ 115struct mbuf * 116m_retry(i, t) 117 int i, t; 118{ 119 register struct mbuf *m; 120 121 m_reclaim(); 122#define m_retry(i, t) (struct mbuf *)0 123 MGET(m, i, t); 124#undef m_retry 125 return (m); 126} 127 128/* 129 * As above; retry an MGETHDR. 130 */ 131struct mbuf * 132m_retryhdr(i, t) 133 int i, t; 134{ 135 register struct mbuf *m; 136 137 m_reclaim(); 138#define m_retryhdr(i, t) (struct mbuf *)0 139 MGETHDR(m, i, t); 140#undef m_retryhdr 141 return (m); 142} 143
|
| 144void
|
140m_reclaim() 141{ 142 register struct domain *dp; 143 register struct protosw *pr; 144 int s = splimp(); 145 146 for (dp = domains; dp; dp = dp->dom_next) 147 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 148 if (pr->pr_drain) 149 (*pr->pr_drain)(); 150 splx(s); 151 mbstat.m_drain++; 152} 153 154/* 155 * Space allocation routines. 156 * These are also available as macros 157 * for critical paths. 158 */ 159struct mbuf * 160m_get(nowait, type) 161 int nowait, type; 162{ 163 register struct mbuf *m; 164 165 MGET(m, nowait, type); 166 return (m); 167} 168 169struct mbuf * 170m_gethdr(nowait, type) 171 int nowait, type; 172{ 173 register struct mbuf *m; 174 175 MGETHDR(m, nowait, type); 176 return (m); 177} 178 179struct mbuf * 180m_getclr(nowait, type) 181 int nowait, type; 182{ 183 register struct mbuf *m; 184 185 MGET(m, nowait, type); 186 if (m == 0) 187 return (0); 188 bzero(mtod(m, caddr_t), MLEN); 189 return (m); 190} 191 192struct mbuf * 193m_free(m) 194 struct mbuf *m; 195{ 196 register struct mbuf *n; 197 198 MFREE(m, n); 199 return (n); 200} 201 202void 203m_freem(m) 204 register struct mbuf *m; 205{ 206 register struct mbuf *n; 207 208 if (m == NULL) 209 return; 210 do { 211 MFREE(m, n); 212 } while (m = n); 213} 214 215/* 216 * Mbuffer utility routines. 217 */ 218 219/* 220 * Lesser-used path for M_PREPEND: 221 * allocate new mbuf to prepend to chain, 222 * copy junk along. 223 */ 224struct mbuf * 225m_prepend(m, len, how) 226 register struct mbuf *m; 227 int len, how; 228{ 229 struct mbuf *mn; 230 231 MGET(mn, how, m->m_type); 232 if (mn == (struct mbuf *)NULL) { 233 m_freem(m); 234 return ((struct mbuf *)NULL); 235 } 236 if (m->m_flags & M_PKTHDR) { 237 M_COPY_PKTHDR(mn, m); 238 m->m_flags &= ~M_PKTHDR; 239 } 240 mn->m_next = m; 241 m = mn; 242 if (len < MHLEN) 243 MH_ALIGN(m, len); 244 m->m_len = len; 245 return (m); 246} 247 248/* 249 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 250 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 251 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 252 */ 253int MCFail; 254 255struct mbuf * 256m_copym(m, off0, len, wait) 257 register struct mbuf *m; 258 int off0, wait; 259 register int len; 260{ 261 register struct mbuf *n, **np; 262 register int off = off0; 263 struct mbuf *top; 264 int copyhdr = 0; 265 266 if (off < 0 || len < 0) 267 panic("m_copym"); 268 if (off == 0 && m->m_flags & M_PKTHDR) 269 copyhdr = 1; 270 while (off > 0) { 271 if (m == 0) 272 panic("m_copym"); 273 if (off < m->m_len) 274 break; 275 off -= m->m_len; 276 m = m->m_next; 277 } 278 np = ⊤ 279 top = 0; 280 while (len > 0) { 281 if (m == 0) { 282 if (len != M_COPYALL) 283 panic("m_copym"); 284 break; 285 } 286 MGET(n, wait, m->m_type); 287 *np = n; 288 if (n == 0) 289 goto nospace; 290 if (copyhdr) { 291 M_COPY_PKTHDR(n, m); 292 if (len == M_COPYALL) 293 n->m_pkthdr.len -= off0; 294 else 295 n->m_pkthdr.len = len; 296 copyhdr = 0; 297 } 298 n->m_len = min(len, m->m_len - off); 299 if (m->m_flags & M_EXT) { 300 n->m_data = m->m_data + off; 301 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 302 n->m_ext = m->m_ext; 303 n->m_flags |= M_EXT; 304 } else 305 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 306 (unsigned)n->m_len); 307 if (len != M_COPYALL) 308 len -= n->m_len; 309 off = 0; 310 m = m->m_next; 311 np = &n->m_next; 312 } 313 if (top == 0) 314 MCFail++; 315 return (top); 316nospace: 317 m_freem(top); 318 MCFail++; 319 return (0); 320} 321 322/* 323 * Copy data from an mbuf chain starting "off" bytes from the beginning, 324 * continuing for "len" bytes, into the indicated buffer. 325 */
| 145m_reclaim() 146{ 147 register struct domain *dp; 148 register struct protosw *pr; 149 int s = splimp(); 150 151 for (dp = domains; dp; dp = dp->dom_next) 152 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) 153 if (pr->pr_drain) 154 (*pr->pr_drain)(); 155 splx(s); 156 mbstat.m_drain++; 157} 158 159/* 160 * Space allocation routines. 161 * These are also available as macros 162 * for critical paths. 163 */ 164struct mbuf * 165m_get(nowait, type) 166 int nowait, type; 167{ 168 register struct mbuf *m; 169 170 MGET(m, nowait, type); 171 return (m); 172} 173 174struct mbuf * 175m_gethdr(nowait, type) 176 int nowait, type; 177{ 178 register struct mbuf *m; 179 180 MGETHDR(m, nowait, type); 181 return (m); 182} 183 184struct mbuf * 185m_getclr(nowait, type) 186 int nowait, type; 187{ 188 register struct mbuf *m; 189 190 MGET(m, nowait, type); 191 if (m == 0) 192 return (0); 193 bzero(mtod(m, caddr_t), MLEN); 194 return (m); 195} 196 197struct mbuf * 198m_free(m) 199 struct mbuf *m; 200{ 201 register struct mbuf *n; 202 203 MFREE(m, n); 204 return (n); 205} 206 207void 208m_freem(m) 209 register struct mbuf *m; 210{ 211 register struct mbuf *n; 212 213 if (m == NULL) 214 return; 215 do { 216 MFREE(m, n); 217 } while (m = n); 218} 219 220/* 221 * Mbuffer utility routines. 222 */ 223 224/* 225 * Lesser-used path for M_PREPEND: 226 * allocate new mbuf to prepend to chain, 227 * copy junk along. 228 */ 229struct mbuf * 230m_prepend(m, len, how) 231 register struct mbuf *m; 232 int len, how; 233{ 234 struct mbuf *mn; 235 236 MGET(mn, how, m->m_type); 237 if (mn == (struct mbuf *)NULL) { 238 m_freem(m); 239 return ((struct mbuf *)NULL); 240 } 241 if (m->m_flags & M_PKTHDR) { 242 M_COPY_PKTHDR(mn, m); 243 m->m_flags &= ~M_PKTHDR; 244 } 245 mn->m_next = m; 246 m = mn; 247 if (len < MHLEN) 248 MH_ALIGN(m, len); 249 m->m_len = len; 250 return (m); 251} 252 253/* 254 * Make a copy of an mbuf chain starting "off0" bytes from the beginning, 255 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 256 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. 257 */ 258int MCFail; 259 260struct mbuf * 261m_copym(m, off0, len, wait) 262 register struct mbuf *m; 263 int off0, wait; 264 register int len; 265{ 266 register struct mbuf *n, **np; 267 register int off = off0; 268 struct mbuf *top; 269 int copyhdr = 0; 270 271 if (off < 0 || len < 0) 272 panic("m_copym"); 273 if (off == 0 && m->m_flags & M_PKTHDR) 274 copyhdr = 1; 275 while (off > 0) { 276 if (m == 0) 277 panic("m_copym"); 278 if (off < m->m_len) 279 break; 280 off -= m->m_len; 281 m = m->m_next; 282 } 283 np = ⊤ 284 top = 0; 285 while (len > 0) { 286 if (m == 0) { 287 if (len != M_COPYALL) 288 panic("m_copym"); 289 break; 290 } 291 MGET(n, wait, m->m_type); 292 *np = n; 293 if (n == 0) 294 goto nospace; 295 if (copyhdr) { 296 M_COPY_PKTHDR(n, m); 297 if (len == M_COPYALL) 298 n->m_pkthdr.len -= off0; 299 else 300 n->m_pkthdr.len = len; 301 copyhdr = 0; 302 } 303 n->m_len = min(len, m->m_len - off); 304 if (m->m_flags & M_EXT) { 305 n->m_data = m->m_data + off; 306 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 307 n->m_ext = m->m_ext; 308 n->m_flags |= M_EXT; 309 } else 310 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 311 (unsigned)n->m_len); 312 if (len != M_COPYALL) 313 len -= n->m_len; 314 off = 0; 315 m = m->m_next; 316 np = &n->m_next; 317 } 318 if (top == 0) 319 MCFail++; 320 return (top); 321nospace: 322 m_freem(top); 323 MCFail++; 324 return (0); 325} 326 327/* 328 * Copy data from an mbuf chain starting "off" bytes from the beginning, 329 * continuing for "len" bytes, into the indicated buffer. 330 */
|
| 331void
|
326m_copydata(m, off, len, cp) 327 register struct mbuf *m; 328 register int off; 329 register int len; 330 caddr_t cp; 331{ 332 register unsigned count; 333 334 if (off < 0 || len < 0) 335 panic("m_copydata"); 336 while (off > 0) { 337 if (m == 0) 338 panic("m_copydata"); 339 if (off < m->m_len) 340 break; 341 off -= m->m_len; 342 m = m->m_next; 343 } 344 while (len > 0) { 345 if (m == 0) 346 panic("m_copydata"); 347 count = min(m->m_len - off, len); 348 bcopy(mtod(m, caddr_t) + off, cp, count); 349 len -= count; 350 cp += count; 351 off = 0; 352 m = m->m_next; 353 } 354} 355 356/* 357 * Concatenate mbuf chain n to m. 358 * Both chains must be of the same type (e.g. MT_DATA). 359 * Any m_pkthdr is not updated. 360 */
| 332m_copydata(m, off, len, cp) 333 register struct mbuf *m; 334 register int off; 335 register int len; 336 caddr_t cp; 337{ 338 register unsigned count; 339 340 if (off < 0 || len < 0) 341 panic("m_copydata"); 342 while (off > 0) { 343 if (m == 0) 344 panic("m_copydata"); 345 if (off < m->m_len) 346 break; 347 off -= m->m_len; 348 m = m->m_next; 349 } 350 while (len > 0) { 351 if (m == 0) 352 panic("m_copydata"); 353 count = min(m->m_len - off, len); 354 bcopy(mtod(m, caddr_t) + off, cp, count); 355 len -= count; 356 cp += count; 357 off = 0; 358 m = m->m_next; 359 } 360} 361 362/* 363 * Concatenate mbuf chain n to m. 364 * Both chains must be of the same type (e.g. MT_DATA). 365 * Any m_pkthdr is not updated. 366 */
|
| 367void
|
361m_cat(m, n) 362 register struct mbuf *m, *n; 363{ 364 while (m->m_next) 365 m = m->m_next; 366 while (n) { 367 if (m->m_flags & M_EXT || 368 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 369 /* just join the two chains */ 370 m->m_next = n; 371 return; 372 } 373 /* splat the data from one into the other */ 374 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 375 (u_int)n->m_len); 376 m->m_len += n->m_len; 377 n = m_free(n); 378 } 379} 380
| 368m_cat(m, n) 369 register struct mbuf *m, *n; 370{ 371 while (m->m_next) 372 m = m->m_next; 373 while (n) { 374 if (m->m_flags & M_EXT || 375 m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { 376 /* just join the two chains */ 377 m->m_next = n; 378 return; 379 } 380 /* splat the data from one into the other */ 381 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 382 (u_int)n->m_len); 383 m->m_len += n->m_len; 384 n = m_free(n); 385 } 386} 387
|
| 388void
|
381m_adj(mp, req_len) 382 struct mbuf *mp; 383 int req_len; 384{ 385 register int len = req_len; 386 register struct mbuf *m; 387 register count; 388 389 if ((m = mp) == NULL) 390 return; 391 if (len >= 0) { 392 /* 393 * Trim from head. 394 */ 395 while (m != NULL && len > 0) { 396 if (m->m_len <= len) { 397 len -= m->m_len; 398 m->m_len = 0; 399 m = m->m_next; 400 } else { 401 m->m_len -= len; 402 m->m_data += len; 403 len = 0; 404 } 405 } 406 m = mp; 407 if (mp->m_flags & M_PKTHDR) 408 m->m_pkthdr.len -= (req_len - len); 409 } else { 410 /* 411 * Trim from tail. Scan the mbuf chain, 412 * calculating its length and finding the last mbuf. 413 * If the adjustment only affects this mbuf, then just 414 * adjust and return. Otherwise, rescan and truncate 415 * after the remaining size. 416 */ 417 len = -len; 418 count = 0; 419 for (;;) { 420 count += m->m_len; 421 if (m->m_next == (struct mbuf *)0) 422 break; 423 m = m->m_next; 424 } 425 if (m->m_len >= len) { 426 m->m_len -= len; 427 if (mp->m_flags & M_PKTHDR) 428 mp->m_pkthdr.len -= len; 429 return; 430 } 431 count -= len; 432 if (count < 0) 433 count = 0; 434 /* 435 * Correct length for chain is "count". 436 * Find the mbuf with last data, adjust its length, 437 * and toss data from remaining mbufs on chain. 438 */ 439 m = mp; 440 if (m->m_flags & M_PKTHDR) 441 m->m_pkthdr.len = count; 442 for (; m; m = m->m_next) { 443 if (m->m_len >= count) { 444 m->m_len = count; 445 break; 446 } 447 count -= m->m_len; 448 } 449 while (m = m->m_next) 450 m->m_len = 0; 451 } 452} 453 454/* 455 * Rearange an mbuf chain so that len bytes are contiguous 456 * and in the data area of an mbuf (so that mtod and dtom 457 * will work for a structure of size len). Returns the resulting 458 * mbuf chain on success, frees it and returns null on failure. 459 * If there is room, it will add up to max_protohdr-len extra bytes to the 460 * contiguous region in an attempt to avoid being called next time. 461 */ 462int MPFail; 463 464struct mbuf * 465m_pullup(n, len) 466 register struct mbuf *n; 467 int len; 468{ 469 register struct mbuf *m; 470 register int count; 471 int space; 472 473 /* 474 * If first mbuf has no cluster, and has room for len bytes 475 * without shifting current data, pullup into it, 476 * otherwise allocate a new mbuf to prepend to the chain. 477 */ 478 if ((n->m_flags & M_EXT) == 0 && 479 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 480 if (n->m_len >= len) 481 return (n); 482 m = n; 483 n = n->m_next; 484 len -= m->m_len; 485 } else { 486 if (len > MHLEN) 487 goto bad; 488 MGET(m, M_DONTWAIT, n->m_type); 489 if (m == 0) 490 goto bad; 491 m->m_len = 0; 492 if (n->m_flags & M_PKTHDR) { 493 M_COPY_PKTHDR(m, n); 494 n->m_flags &= ~M_PKTHDR; 495 } 496 } 497 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 498 do { 499 count = min(min(max(len, max_protohdr), space), n->m_len); 500 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 501 (unsigned)count); 502 len -= count; 503 m->m_len += count; 504 n->m_len -= count; 505 space -= count; 506 if (n->m_len) 507 n->m_data += count; 508 else 509 n = m_free(n); 510 } while (len > 0 && n); 511 if (len > 0) { 512 (void) m_free(m); 513 goto bad; 514 } 515 m->m_next = n; 516 return (m); 517bad: 518 m_freem(n); 519 MPFail++; 520 return (0); 521} 522 523/* 524 * Partition an mbuf chain in two pieces, returning the tail -- 525 * all but the first len0 bytes. In case of failure, it returns NULL and 526 * attempts to restore the chain to its original state. 527 */ 528struct mbuf * 529m_split(m0, len0, wait) 530 register struct mbuf *m0; 531 int len0, wait; 532{ 533 register struct mbuf *m, *n; 534 unsigned len = len0, remain; 535 536 for (m = m0; m && len > m->m_len; m = m->m_next) 537 len -= m->m_len; 538 if (m == 0) 539 return (0); 540 remain = m->m_len - len; 541 if (m0->m_flags & M_PKTHDR) { 542 MGETHDR(n, wait, m0->m_type); 543 if (n == 0) 544 return (0); 545 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 546 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 547 m0->m_pkthdr.len = len0; 548 if (m->m_flags & M_EXT) 549 goto extpacket; 550 if (remain > MHLEN) { 551 /* m can't be the lead packet */ 552 MH_ALIGN(n, 0); 553 n->m_next = m_split(m, len, wait); 554 if (n->m_next == 0) { 555 (void) m_free(n); 556 return (0); 557 } else 558 return (n); 559 } else 560 MH_ALIGN(n, remain); 561 } else if (remain == 0) { 562 n = m->m_next; 563 m->m_next = 0; 564 return (n); 565 } else { 566 MGET(n, wait, m->m_type); 567 if (n == 0) 568 return (0); 569 M_ALIGN(n, remain); 570 } 571extpacket: 572 if (m->m_flags & M_EXT) { 573 n->m_flags |= M_EXT; 574 n->m_ext = m->m_ext; 575 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 576 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 577 n->m_data = m->m_data + len; 578 } else { 579 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 580 } 581 n->m_len = remain; 582 m->m_len = len; 583 n->m_next = m->m_next; 584 m->m_next = 0; 585 return (n); 586} 587/* 588 * Routine to copy from device local memory into mbufs. 589 */ 590struct mbuf * 591m_devget(buf, totlen, off0, ifp, copy) 592 char *buf; 593 int totlen, off0; 594 struct ifnet *ifp; 595 void (*copy)(); 596{ 597 register struct mbuf *m; 598 struct mbuf *top = 0, **mp = ⊤ 599 register int off = off0, len; 600 register char *cp; 601 char *epkt; 602 603 cp = buf; 604 epkt = cp + totlen; 605 if (off) { 606 cp += off + 2 * sizeof(u_short); 607 totlen -= 2 * sizeof(u_short); 608 } 609 MGETHDR(m, M_DONTWAIT, MT_DATA); 610 if (m == 0) 611 return (0); 612 m->m_pkthdr.rcvif = ifp; 613 m->m_pkthdr.len = totlen; 614 m->m_len = MHLEN; 615 616 while (totlen > 0) { 617 if (top) { 618 MGET(m, M_DONTWAIT, MT_DATA); 619 if (m == 0) { 620 m_freem(top); 621 return (0); 622 } 623 m->m_len = MLEN; 624 } 625 len = min(totlen, epkt - cp); 626 if (len >= MINCLSIZE) { 627 MCLGET(m, M_DONTWAIT); 628 if (m->m_flags & M_EXT) 629 m->m_len = len = min(len, MCLBYTES); 630 else 631 len = m->m_len; 632 } else { 633 /* 634 * Place initial small packet/header at end of mbuf. 635 */ 636 if (len < m->m_len) { 637 if (top == 0 && len + max_linkhdr <= m->m_len) 638 m->m_data += max_linkhdr; 639 m->m_len = len; 640 } else 641 len = m->m_len; 642 } 643 if (copy) 644 copy(cp, mtod(m, caddr_t), (unsigned)len); 645 else 646 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 647 cp += len; 648 *mp = m; 649 mp = &m->m_next; 650 totlen -= len; 651 if (cp == epkt) 652 cp = buf; 653 } 654 return (top); 655}
| 389m_adj(mp, req_len) 390 struct mbuf *mp; 391 int req_len; 392{ 393 register int len = req_len; 394 register struct mbuf *m; 395 register count; 396 397 if ((m = mp) == NULL) 398 return; 399 if (len >= 0) { 400 /* 401 * Trim from head. 402 */ 403 while (m != NULL && len > 0) { 404 if (m->m_len <= len) { 405 len -= m->m_len; 406 m->m_len = 0; 407 m = m->m_next; 408 } else { 409 m->m_len -= len; 410 m->m_data += len; 411 len = 0; 412 } 413 } 414 m = mp; 415 if (mp->m_flags & M_PKTHDR) 416 m->m_pkthdr.len -= (req_len - len); 417 } else { 418 /* 419 * Trim from tail. Scan the mbuf chain, 420 * calculating its length and finding the last mbuf. 421 * If the adjustment only affects this mbuf, then just 422 * adjust and return. Otherwise, rescan and truncate 423 * after the remaining size. 424 */ 425 len = -len; 426 count = 0; 427 for (;;) { 428 count += m->m_len; 429 if (m->m_next == (struct mbuf *)0) 430 break; 431 m = m->m_next; 432 } 433 if (m->m_len >= len) { 434 m->m_len -= len; 435 if (mp->m_flags & M_PKTHDR) 436 mp->m_pkthdr.len -= len; 437 return; 438 } 439 count -= len; 440 if (count < 0) 441 count = 0; 442 /* 443 * Correct length for chain is "count". 444 * Find the mbuf with last data, adjust its length, 445 * and toss data from remaining mbufs on chain. 446 */ 447 m = mp; 448 if (m->m_flags & M_PKTHDR) 449 m->m_pkthdr.len = count; 450 for (; m; m = m->m_next) { 451 if (m->m_len >= count) { 452 m->m_len = count; 453 break; 454 } 455 count -= m->m_len; 456 } 457 while (m = m->m_next) 458 m->m_len = 0; 459 } 460} 461 462/* 463 * Rearange an mbuf chain so that len bytes are contiguous 464 * and in the data area of an mbuf (so that mtod and dtom 465 * will work for a structure of size len). Returns the resulting 466 * mbuf chain on success, frees it and returns null on failure. 467 * If there is room, it will add up to max_protohdr-len extra bytes to the 468 * contiguous region in an attempt to avoid being called next time. 469 */ 470int MPFail; 471 472struct mbuf * 473m_pullup(n, len) 474 register struct mbuf *n; 475 int len; 476{ 477 register struct mbuf *m; 478 register int count; 479 int space; 480 481 /* 482 * If first mbuf has no cluster, and has room for len bytes 483 * without shifting current data, pullup into it, 484 * otherwise allocate a new mbuf to prepend to the chain. 485 */ 486 if ((n->m_flags & M_EXT) == 0 && 487 n->m_data + len < &n->m_dat[MLEN] && n->m_next) { 488 if (n->m_len >= len) 489 return (n); 490 m = n; 491 n = n->m_next; 492 len -= m->m_len; 493 } else { 494 if (len > MHLEN) 495 goto bad; 496 MGET(m, M_DONTWAIT, n->m_type); 497 if (m == 0) 498 goto bad; 499 m->m_len = 0; 500 if (n->m_flags & M_PKTHDR) { 501 M_COPY_PKTHDR(m, n); 502 n->m_flags &= ~M_PKTHDR; 503 } 504 } 505 space = &m->m_dat[MLEN] - (m->m_data + m->m_len); 506 do { 507 count = min(min(max(len, max_protohdr), space), n->m_len); 508 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 509 (unsigned)count); 510 len -= count; 511 m->m_len += count; 512 n->m_len -= count; 513 space -= count; 514 if (n->m_len) 515 n->m_data += count; 516 else 517 n = m_free(n); 518 } while (len > 0 && n); 519 if (len > 0) { 520 (void) m_free(m); 521 goto bad; 522 } 523 m->m_next = n; 524 return (m); 525bad: 526 m_freem(n); 527 MPFail++; 528 return (0); 529} 530 531/* 532 * Partition an mbuf chain in two pieces, returning the tail -- 533 * all but the first len0 bytes. In case of failure, it returns NULL and 534 * attempts to restore the chain to its original state. 535 */ 536struct mbuf * 537m_split(m0, len0, wait) 538 register struct mbuf *m0; 539 int len0, wait; 540{ 541 register struct mbuf *m, *n; 542 unsigned len = len0, remain; 543 544 for (m = m0; m && len > m->m_len; m = m->m_next) 545 len -= m->m_len; 546 if (m == 0) 547 return (0); 548 remain = m->m_len - len; 549 if (m0->m_flags & M_PKTHDR) { 550 MGETHDR(n, wait, m0->m_type); 551 if (n == 0) 552 return (0); 553 n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; 554 n->m_pkthdr.len = m0->m_pkthdr.len - len0; 555 m0->m_pkthdr.len = len0; 556 if (m->m_flags & M_EXT) 557 goto extpacket; 558 if (remain > MHLEN) { 559 /* m can't be the lead packet */ 560 MH_ALIGN(n, 0); 561 n->m_next = m_split(m, len, wait); 562 if (n->m_next == 0) { 563 (void) m_free(n); 564 return (0); 565 } else 566 return (n); 567 } else 568 MH_ALIGN(n, remain); 569 } else if (remain == 0) { 570 n = m->m_next; 571 m->m_next = 0; 572 return (n); 573 } else { 574 MGET(n, wait, m->m_type); 575 if (n == 0) 576 return (0); 577 M_ALIGN(n, remain); 578 } 579extpacket: 580 if (m->m_flags & M_EXT) { 581 n->m_flags |= M_EXT; 582 n->m_ext = m->m_ext; 583 mclrefcnt[mtocl(m->m_ext.ext_buf)]++; 584 m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ 585 n->m_data = m->m_data + len; 586 } else { 587 bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); 588 } 589 n->m_len = remain; 590 m->m_len = len; 591 n->m_next = m->m_next; 592 m->m_next = 0; 593 return (n); 594} 595/* 596 * Routine to copy from device local memory into mbufs. 597 */ 598struct mbuf * 599m_devget(buf, totlen, off0, ifp, copy) 600 char *buf; 601 int totlen, off0; 602 struct ifnet *ifp; 603 void (*copy)(); 604{ 605 register struct mbuf *m; 606 struct mbuf *top = 0, **mp = ⊤ 607 register int off = off0, len; 608 register char *cp; 609 char *epkt; 610 611 cp = buf; 612 epkt = cp + totlen; 613 if (off) { 614 cp += off + 2 * sizeof(u_short); 615 totlen -= 2 * sizeof(u_short); 616 } 617 MGETHDR(m, M_DONTWAIT, MT_DATA); 618 if (m == 0) 619 return (0); 620 m->m_pkthdr.rcvif = ifp; 621 m->m_pkthdr.len = totlen; 622 m->m_len = MHLEN; 623 624 while (totlen > 0) { 625 if (top) { 626 MGET(m, M_DONTWAIT, MT_DATA); 627 if (m == 0) { 628 m_freem(top); 629 return (0); 630 } 631 m->m_len = MLEN; 632 } 633 len = min(totlen, epkt - cp); 634 if (len >= MINCLSIZE) { 635 MCLGET(m, M_DONTWAIT); 636 if (m->m_flags & M_EXT) 637 m->m_len = len = min(len, MCLBYTES); 638 else 639 len = m->m_len; 640 } else { 641 /* 642 * Place initial small packet/header at end of mbuf. 643 */ 644 if (len < m->m_len) { 645 if (top == 0 && len + max_linkhdr <= m->m_len) 646 m->m_data += max_linkhdr; 647 m->m_len = len; 648 } else 649 len = m->m_len; 650 } 651 if (copy) 652 copy(cp, mtod(m, caddr_t), (unsigned)len); 653 else 654 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 655 cp += len; 656 *mp = m; 657 mp = &m->m_next; 658 totlen -= len; 659 if (cp == epkt) 660 cp = buf; 661 } 662 return (top); 663}
|