Deleted Added
sdiff udiff text old ( 184778 ) new ( 185893 )
full compact
1/*-
2 * Copyright (c) 2004, 2005,
3 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/kern/kern_mbuf.c 184778 2008-11-09 01:53:06Z kmacy $");
30
31#include "opt_mac.h"
32#include "opt_param.h"
33
34#include <sys/param.h>
35#include <sys/malloc.h>
36#include <sys/systm.h>
37#include <sys/mbuf.h>
38#include <sys/domain.h>
39#include <sys/eventhandler.h>
40#include <sys/kernel.h>
41#include <sys/protosw.h>
42#include <sys/smp.h>
43#include <sys/sysctl.h>
44
45#include <security/mac/mac_framework.h>
46
47#include <vm/vm.h>
48#include <vm/vm_page.h>
49#include <vm/uma.h>
50#include <vm/uma_int.h>
51#include <vm/uma_dbg.h>
52
53/*
54 * In FreeBSD, Mbufs and Mbuf Clusters are allocated from UMA
55 * Zones.
56 *
57 * Mbuf Clusters (2K, contiguous) are allocated from the Cluster
58 * Zone. The Zone can be capped at kern.ipc.nmbclusters, if the
59 * administrator so desires.
60 *
61 * Mbufs are allocated from a UMA Master Zone called the Mbuf
62 * Zone.
63 *
64 * Additionally, FreeBSD provides a Packet Zone, which it
65 * configures as a Secondary Zone to the Mbuf Master Zone,
66 * thus sharing backend Slab kegs with the Mbuf Master Zone.
67 *
68 * Thus common-case allocations and locking are simplified:
69 *
70 * m_clget() m_getcl()
71 * | |
72 * | .------------>[(Packet Cache)] m_get(), m_gethdr()
73 * | | [ Packet ] |
74 * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ]
75 * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ]
76 * | \________ |
77 * [ Cluster Keg ] \ /
78 * | [ Mbuf Keg ]
79 * [ Cluster Slabs ] |
80 * | [ Mbuf Slabs ]
81 * \____________(VM)_________________/
82 *
83 *
84 * Whenever an object is allocated with uma_zalloc() out of
85 * one of the Zones its _ctor_ function is executed. The same
86 * for any deallocation through uma_zfree() the _dtor_ function
87 * is executed.
88 *
89 * Caches are per-CPU and are filled from the Master Zone.
90 *
91 * Whenever an object is allocated from the underlying global
92 * memory pool it gets pre-initialized with the _zinit_ functions.
93 * When the Keg's are overfull objects get decomissioned with
94 * _zfini_ functions and free'd back to the global memory pool.
95 *
96 */
97
98int nmbclusters; /* limits number of mbuf clusters */
99int nmbjumbop; /* limits number of page size jumbo clusters */
100int nmbjumbo9; /* limits number of 9k jumbo clusters */
101int nmbjumbo16; /* limits number of 16k jumbo clusters */
102struct mbstat mbstat;
103
104static void
105tunable_mbinit(void *dummy)
106{
107 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
108
109 /* This has to be done before VM init. */
110 if (nmbclusters == 0)
111 nmbclusters = 1024 + maxusers * 64;
112 nmbjumbop = nmbclusters / 2;
113 nmbjumbo9 = nmbjumbop / 2;
114 nmbjumbo16 = nmbjumbo9 / 2;
115}
116SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
117
118/* XXX: These should be tuneables. Can't change UMA limits on the fly. */
119static int
120sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
121{
122 int error, newnmbclusters;
123
124 newnmbclusters = nmbclusters;
125 error = sysctl_handle_int(oidp, &newnmbclusters, 0, req);
126 if (error == 0 && req->newptr) {
127 if (newnmbclusters > nmbclusters) {
128 nmbclusters = newnmbclusters;
129 uma_zone_set_max(zone_clust, nmbclusters);
130 EVENTHANDLER_INVOKE(nmbclusters_change);
131 } else
132 error = EINVAL;
133 }
134 return (error);
135}
136SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbclusters, CTLTYPE_INT|CTLFLAG_RW,
137&nmbclusters, 0, sysctl_nmbclusters, "IU",
138 "Maximum number of mbuf clusters allowed");
139
140static int
141sysctl_nmbjumbop(SYSCTL_HANDLER_ARGS)
142{
143 int error, newnmbjumbop;
144
145 newnmbjumbop = nmbjumbop;
146 error = sysctl_handle_int(oidp, &newnmbjumbop, 0, req);
147 if (error == 0 && req->newptr) {
148 if (newnmbjumbop> nmbjumbop) {
149 nmbjumbop = newnmbjumbop;
150 uma_zone_set_max(zone_jumbop, nmbjumbop);
151 } else
152 error = EINVAL;
153 }
154 return (error);
155}
156SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbop, CTLTYPE_INT|CTLFLAG_RW,
157&nmbjumbop, 0, sysctl_nmbjumbop, "IU",
158 "Maximum number of mbuf page size jumbo clusters allowed");
159
160
161static int
162sysctl_nmbjumbo9(SYSCTL_HANDLER_ARGS)
163{
164 int error, newnmbjumbo9;
165
166 newnmbjumbo9 = nmbjumbo9;
167 error = sysctl_handle_int(oidp, &newnmbjumbo9, 0, req);
168 if (error == 0 && req->newptr) {
169 if (newnmbjumbo9> nmbjumbo9) {
170 nmbjumbo9 = newnmbjumbo9;
171 uma_zone_set_max(zone_jumbo9, nmbjumbo9);
172 } else
173 error = EINVAL;
174 }
175 return (error);
176}
177SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo9, CTLTYPE_INT|CTLFLAG_RW,
178&nmbjumbo9, 0, sysctl_nmbjumbo9, "IU",
179 "Maximum number of mbuf 9k jumbo clusters allowed");
180
181static int
182sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS)
183{
184 int error, newnmbjumbo16;
185
186 newnmbjumbo16 = nmbjumbo16;
187 error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req);
188 if (error == 0 && req->newptr) {
189 if (newnmbjumbo16> nmbjumbo16) {
190 nmbjumbo16 = newnmbjumbo16;
191 uma_zone_set_max(zone_jumbo16, nmbjumbo16);
192 } else
193 error = EINVAL;
194 }
195 return (error);
196}
197SYSCTL_PROC(_kern_ipc, OID_AUTO, nmbjumbo16, CTLTYPE_INT|CTLFLAG_RW,
198&nmbjumbo16, 0, sysctl_nmbjumbo16, "IU",
199 "Maximum number of mbuf 16k jumbo clusters allowed");
200
201
202
203SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat,
204 "Mbuf general information and statistics");
205
206/*
207 * Zones from which we allocate.
208 */
209uma_zone_t zone_mbuf;
210uma_zone_t zone_clust;
211uma_zone_t zone_pack;
212uma_zone_t zone_jumbop;
213uma_zone_t zone_jumbo9;
214uma_zone_t zone_jumbo16;
215uma_zone_t zone_ext_refcnt;
216
217/*
218 * Local prototypes.
219 */
220static int mb_ctor_mbuf(void *, int, void *, int);
221static int mb_ctor_clust(void *, int, void *, int);
222static int mb_ctor_pack(void *, int, void *, int);
223static void mb_dtor_mbuf(void *, int, void *);
224static void mb_dtor_clust(void *, int, void *);
225static void mb_dtor_pack(void *, int, void *);
226static int mb_zinit_pack(void *, int, int);
227static void mb_zfini_pack(void *, int);
228
229static void mb_reclaim(void *);
230static void mbuf_init(void *);
231static void *mbuf_jumbo_alloc(uma_zone_t, int, u_int8_t *, int);
232static void mbuf_jumbo_free(void *, int, u_int8_t);
233
234static MALLOC_DEFINE(M_JUMBOFRAME, "jumboframes", "mbuf jumbo frame buffers");
235
236/* Ensure that MSIZE doesn't break dtom() - it must be a power of 2 */
237CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
238
239/*
240 * Initialize FreeBSD Network buffer allocation.
241 */
242SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL);
243static void
244mbuf_init(void *dummy)
245{
246
247 /*
248 * Configure UMA zones for Mbufs, Clusters, and Packets.
249 */
250 zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
251 mb_ctor_mbuf, mb_dtor_mbuf,
252#ifdef INVARIANTS
253 trash_init, trash_fini,
254#else
255 NULL, NULL,
256#endif
257 MSIZE - 1, UMA_ZONE_MAXBUCKET);
258
259 zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
260 mb_ctor_clust, mb_dtor_clust,
261#ifdef INVARIANTS
262 trash_init, trash_fini,
263#else
264 NULL, NULL,
265#endif
266 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
267 if (nmbclusters > 0)
268 uma_zone_set_max(zone_clust, nmbclusters);
269
270 zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
271 mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);
272
273 /* Make jumbo frame zone too. Page size, 9k and 16k. */
274 zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
275 mb_ctor_clust, mb_dtor_clust,
276#ifdef INVARIANTS
277 trash_init, trash_fini,
278#else
279 NULL, NULL,
280#endif
281 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
282 if (nmbjumbop > 0)
283 uma_zone_set_max(zone_jumbop, nmbjumbop);
284
285 zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
286 mb_ctor_clust, mb_dtor_clust,
287#ifdef INVARIANTS
288 trash_init, trash_fini,
289#else
290 NULL, NULL,
291#endif
292 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
293 if (nmbjumbo9 > 0)
294 uma_zone_set_max(zone_jumbo9, nmbjumbo9);
295 uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc);
296 uma_zone_set_freef(zone_jumbo9, mbuf_jumbo_free);
297
298 zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
299 mb_ctor_clust, mb_dtor_clust,
300#ifdef INVARIANTS
301 trash_init, trash_fini,
302#else
303 NULL, NULL,
304#endif
305 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
306 if (nmbjumbo16 > 0)
307 uma_zone_set_max(zone_jumbo16, nmbjumbo16);
308 uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc);
309 uma_zone_set_freef(zone_jumbo16, mbuf_jumbo_free);
310
311 zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int),
312 NULL, NULL,
313 NULL, NULL,
314 UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
315
316 /* uma_prealloc() goes here... */
317
318 /*
319 * Hook event handler for low-memory situation, used to
320 * drain protocols and push data back to the caches (UMA
321 * later pushes it back to VM).
322 */
323 EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
324 EVENTHANDLER_PRI_FIRST);
325
326 /*
327 * [Re]set counters and local statistics knobs.
328 * XXX Some of these should go and be replaced, but UMA stat
329 * gathering needs to be revised.
330 */
331 mbstat.m_mbufs = 0;
332 mbstat.m_mclusts = 0;
333 mbstat.m_drain = 0;
334 mbstat.m_msize = MSIZE;
335 mbstat.m_mclbytes = MCLBYTES;
336 mbstat.m_minclsize = MINCLSIZE;
337 mbstat.m_mlen = MLEN;
338 mbstat.m_mhlen = MHLEN;
339 mbstat.m_numtypes = MT_NTYPES;
340
341 mbstat.m_mcfail = mbstat.m_mpfail = 0;
342 mbstat.sf_iocnt = 0;
343 mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
344}
345
346/*
347 * UMA backend page allocator for the jumbo frame zones.
348 *
349 * Allocates kernel virtual memory that is backed by contiguous physical
350 * pages.
351 */
352static void *
353mbuf_jumbo_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
354{
355
356 /* Inform UMA that this allocator uses kernel_map/object. */
357 *flags = UMA_SLAB_KERNEL;
358 return (contigmalloc(bytes, M_JUMBOFRAME, wait, (vm_paddr_t)0,
359 ~(vm_paddr_t)0, 1, 0));
360}
361
362/*
363 * UMA backend page deallocator for the jumbo frame zones.
364 */
365static void
366mbuf_jumbo_free(void *mem, int size, u_int8_t flags)
367{
368
369 contigfree(mem, size, M_JUMBOFRAME);
370}
371
372/*
373 * Constructor for Mbuf master zone.
374 *
375 * The 'arg' pointer points to a mb_args structure which
376 * contains call-specific information required to support the
377 * mbuf allocation API. See mbuf.h.
378 */
379static int
380mb_ctor_mbuf(void *mem, int size, void *arg, int how)
381{
382 struct mbuf *m;
383 struct mb_args *args;
384#ifdef MAC
385 int error;
386#endif
387 int flags;
388 short type;
389
390#ifdef INVARIANTS
391 trash_ctor(mem, size, arg, how);
392#endif
393 m = (struct mbuf *)mem;
394 args = (struct mb_args *)arg;
395 flags = args->flags;
396 type = args->type;
397
398 /*
399 * The mbuf is initialized later. The caller has the
400 * responsibility to set up any MAC labels too.
401 */
402 if (type == MT_NOINIT)
403 return (0);
404
405 m->m_next = NULL;
406 m->m_nextpkt = NULL;
407 m->m_len = 0;
408 m->m_flags = flags;
409 m->m_type = type;
410 if (flags & M_PKTHDR) {
411 m->m_data = m->m_pktdat;
412 m->m_pkthdr.rcvif = NULL;
413 m->m_pkthdr.header = NULL;
414 m->m_pkthdr.len = 0;
415 m->m_pkthdr.csum_flags = 0;
416 m->m_pkthdr.csum_data = 0;
417 m->m_pkthdr.tso_segsz = 0;
418 m->m_pkthdr.ether_vtag = 0;
419 SLIST_INIT(&m->m_pkthdr.tags);
420#ifdef MAC
421 /* If the label init fails, fail the alloc */
422 error = mac_mbuf_init(m, how);
423 if (error)
424 return (error);
425#endif
426 } else
427 m->m_data = m->m_dat;
428 return (0);
429}
430
431/*
432 * The Mbuf master zone destructor.
433 */
434static void
435mb_dtor_mbuf(void *mem, int size, void *arg)
436{
437 struct mbuf *m;
438 unsigned long flags;
439
440 m = (struct mbuf *)mem;
441 flags = (unsigned long)arg;
442
443 if ((flags & MB_NOTAGS) == 0 && (m->m_flags & M_PKTHDR) != 0)
444 m_tag_delete_chain(m, NULL);
445 KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
446 KASSERT((m->m_flags & M_NOFREE) == 0, ("%s: M_NOFREE set", __func__));
447#ifdef INVARIANTS
448 trash_dtor(mem, size, arg);
449#endif
450}
451
452/*
453 * The Mbuf Packet zone destructor.
454 */
455static void
456mb_dtor_pack(void *mem, int size, void *arg)
457{
458 struct mbuf *m;
459
460 m = (struct mbuf *)mem;
461 if ((m->m_flags & M_PKTHDR) != 0)
462 m_tag_delete_chain(m, NULL);
463
464 /* Make sure we've got a clean cluster back. */
465 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
466 KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__));
467 KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__));
468 KASSERT(m->m_ext.ext_arg1 == NULL, ("%s: ext_arg1 != NULL", __func__));
469 KASSERT(m->m_ext.ext_arg2 == NULL, ("%s: ext_arg2 != NULL", __func__));
470 KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__));
471 KASSERT(m->m_ext.ext_type == EXT_PACKET, ("%s: ext_type != EXT_PACKET", __func__));
472 KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__));
473#ifdef INVARIANTS
474 trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
475#endif
476 /*
477 * If there are processes blocked on zone_clust, waiting for pages
478 * to be freed up, * cause them to be woken up by draining the
479 * packet zone. We are exposed to a race here * (in the check for
480 * the UMA_ZFLAG_FULL) where we might miss the flag set, but that
481 * is deliberate. We don't want to acquire the zone lock for every
482 * mbuf free.
483 */
484 if (uma_zone_exhausted_nolock(zone_clust))
485 zone_drain(zone_pack);
486}
487
488/*
489 * The Cluster and Jumbo[PAGESIZE|9|16] zone constructor.
490 *
491 * Here the 'arg' pointer points to the Mbuf which we
492 * are configuring cluster storage for. If 'arg' is
493 * empty we allocate just the cluster without setting
494 * the mbuf to it. See mbuf.h.
495 */
496static int
497mb_ctor_clust(void *mem, int size, void *arg, int how)
498{
499 struct mbuf *m;
500 u_int *refcnt;
501 int type;
502 uma_zone_t zone;
503
504#ifdef INVARIANTS
505 trash_ctor(mem, size, arg, how);
506#endif
507 switch (size) {
508 case MCLBYTES:
509 type = EXT_CLUSTER;
510 zone = zone_clust;
511 break;
512#if MJUMPAGESIZE != MCLBYTES
513 case MJUMPAGESIZE:
514 type = EXT_JUMBOP;
515 zone = zone_jumbop;
516 break;
517#endif
518 case MJUM9BYTES:
519 type = EXT_JUMBO9;
520 zone = zone_jumbo9;
521 break;
522 case MJUM16BYTES:
523 type = EXT_JUMBO16;
524 zone = zone_jumbo16;
525 break;
526 default:
527 panic("unknown cluster size");
528 break;
529 }
530
531 m = (struct mbuf *)arg;
532 refcnt = uma_find_refcnt(zone, mem);
533 *refcnt = 1;
534 if (m != NULL) {
535 m->m_ext.ext_buf = (caddr_t)mem;
536 m->m_data = m->m_ext.ext_buf;
537 m->m_flags |= M_EXT;
538 m->m_ext.ext_free = NULL;
539 m->m_ext.ext_arg1 = NULL;
540 m->m_ext.ext_arg2 = NULL;
541 m->m_ext.ext_size = size;
542 m->m_ext.ext_type = type;
543 m->m_ext.ref_cnt = refcnt;
544 }
545
546 return (0);
547}
548
549/*
550 * The Mbuf Cluster zone destructor.
551 */
552static void
553mb_dtor_clust(void *mem, int size, void *arg)
554{
555#ifdef INVARIANTS
556 uma_zone_t zone;
557
558 zone = m_getzone(size);
559 KASSERT(*(uma_find_refcnt(zone, mem)) <= 1,
560 ("%s: refcnt incorrect %u", __func__,
561 *(uma_find_refcnt(zone, mem))) );
562
563 trash_dtor(mem, size, arg);
564#endif
565}
566
567/*
568 * The Packet secondary zone's init routine, executed on the
569 * object's transition from mbuf keg slab to zone cache.
570 */
571static int
572mb_zinit_pack(void *mem, int size, int how)
573{
574 struct mbuf *m;
575
576 m = (struct mbuf *)mem; /* m is virgin. */
577 if (uma_zalloc_arg(zone_clust, m, how) == NULL ||
578 m->m_ext.ext_buf == NULL)
579 return (ENOMEM);
580 m->m_ext.ext_type = EXT_PACKET; /* Override. */
581#ifdef INVARIANTS
582 trash_init(m->m_ext.ext_buf, MCLBYTES, how);
583#endif
584 return (0);
585}
586
587/*
588 * The Packet secondary zone's fini routine, executed on the
589 * object's transition from zone cache to keg slab.
590 */
591static void
592mb_zfini_pack(void *mem, int size)
593{
594 struct mbuf *m;
595
596 m = (struct mbuf *)mem;
597#ifdef INVARIANTS
598 trash_fini(m->m_ext.ext_buf, MCLBYTES);
599#endif
600 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL);
601#ifdef INVARIANTS
602 trash_dtor(mem, size, NULL);
603#endif
604}
605
606/*
607 * The "packet" keg constructor.
608 */
609static int
610mb_ctor_pack(void *mem, int size, void *arg, int how)
611{
612 struct mbuf *m;
613 struct mb_args *args;
614#ifdef MAC
615 int error;
616#endif
617 int flags;
618 short type;
619
620 m = (struct mbuf *)mem;
621 args = (struct mb_args *)arg;
622 flags = args->flags;
623 type = args->type;
624
625#ifdef INVARIANTS
626 trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
627#endif
628 m->m_next = NULL;
629 m->m_nextpkt = NULL;
630 m->m_data = m->m_ext.ext_buf;
631 m->m_len = 0;
632 m->m_flags = (flags | M_EXT);
633 m->m_type = type;
634
635 if (flags & M_PKTHDR) {
636 m->m_pkthdr.rcvif = NULL;
637 m->m_pkthdr.len = 0;
638 m->m_pkthdr.header = NULL;
639 m->m_pkthdr.csum_flags = 0;
640 m->m_pkthdr.csum_data = 0;
641 m->m_pkthdr.tso_segsz = 0;
642 m->m_pkthdr.ether_vtag = 0;
643 SLIST_INIT(&m->m_pkthdr.tags);
644#ifdef MAC
645 /* If the label init fails, fail the alloc */
646 error = mac_mbuf_init(m, how);
647 if (error)
648 return (error);
649#endif
650 }
651 /* m_ext is already initialized. */
652
653 return (0);
654}
655
656/*
657 * This is the protocol drain routine.
658 *
659 * No locks should be held when this is called. The drain routines have to
660 * presently acquire some locks which raises the possibility of lock order
661 * reversal.
662 */
663static void
664mb_reclaim(void *junk)
665{
666 struct domain *dp;
667 struct protosw *pr;
668
669 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK | WARN_PANIC, NULL,
670 "mb_reclaim()");
671
672 for (dp = domains; dp != NULL; dp = dp->dom_next)
673 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
674 if (pr->pr_drain != NULL)
675 (*pr->pr_drain)();
676}