Deleted Added
full compact
kern_mbuf.c (150644) kern_mbuf.c (151976)
1/*-
2 * Copyright (c) 2004, 2005,
3 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 12 unchanged lines hidden (view full) ---

21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2004, 2005,
3 * Bosko Milekic <bmilekic@FreeBSD.org>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 12 unchanged lines hidden (view full) ---

21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/kern/kern_mbuf.c 150644 2005-09-27 20:28:43Z rwatson $");
29__FBSDID("$FreeBSD: head/sys/kern/kern_mbuf.c 151976 2005-11-02 16:20:36Z andre $");
30
31#include "opt_mac.h"
32#include "opt_param.h"
33
34#include <sys/param.h>
35#include <sys/mac.h>
36#include <sys/malloc.h>
37#include <sys/systm.h>

--- 35 unchanged lines hidden (view full) ---

73 * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ]
74 * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ]
75 * | \________ |
76 * [ Cluster Keg ] \ /
77 * | [ Mbuf Keg ]
78 * [ Cluster Slabs ] |
79 * | [ Mbuf Slabs ]
80 * \____________(VM)_________________/
30
31#include "opt_mac.h"
32#include "opt_param.h"
33
34#include <sys/param.h>
35#include <sys/mac.h>
36#include <sys/malloc.h>
37#include <sys/systm.h>

--- 35 unchanged lines hidden (view full) ---

73 * [(Cluster Cache)] [ Secondary ] [ (Mbuf Cache) ]
74 * [ Cluster Zone ] [ Zone ] [ Mbuf Master Zone ]
75 * | \________ |
76 * [ Cluster Keg ] \ /
77 * | [ Mbuf Keg ]
78 * [ Cluster Slabs ] |
79 * | [ Mbuf Slabs ]
80 * \____________(VM)_________________/
81 *
82 *
83 * Whenever a object is allocated with uma_zalloc() out of the
84 * one of the Zones its _ctor_ function is executed. The same
85 * for any deallocation through uma_zfree() the _dror_ function
86 * is executed.
87 *
88 * Caches are per-CPU and are filled from the Master Zone.
89 *
90 * Whenever a object is allocated from the underlying global
91 * memory pool it gets pre-initialized with the _zinit_ functions.
92 * When the Keg's are overfull objects get decomissioned with
93 * _zfini_ functions and free'd back to the global memory pool.
94 *
81 */
82
95 */
96
83int nmbclusters;
97int nmbclusters; /* limits number of mbuf clusters */
98int nmbjumbo9; /* limits number of 9k jumbo clusters */
99int nmbjumbo16; /* limits number of 16k jumbo clusters */
84struct mbstat mbstat;
85
86static void
87tunable_mbinit(void *dummy)
88{
89
90 /* This has to be done before VM init. */
91 nmbclusters = 1024 + maxusers * 64;
92 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
93}
94SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
95
96SYSCTL_DECL(_kern_ipc);
100struct mbstat mbstat;
101
102static void
103tunable_mbinit(void *dummy)
104{
105
106 /* This has to be done before VM init. */
107 nmbclusters = 1024 + maxusers * 64;
108 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
109}
110SYSINIT(tunable_mbinit, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_mbinit, NULL);
111
112SYSCTL_DECL(_kern_ipc);
113/* XXX: These should be tuneables. Can't change UMA limits on the fly. */
97SYSCTL_INT(_kern_ipc, OID_AUTO, nmbclusters, CTLFLAG_RW, &nmbclusters, 0,
98 "Maximum number of mbuf clusters allowed");
114SYSCTL_INT(_kern_ipc, OID_AUTO, nmbclusters, CTLFLAG_RW, &nmbclusters, 0,
115 "Maximum number of mbuf clusters allowed");
116SYSCTL_INT(_kern_ipc, OID_AUTO, nmbjumbo9, CTLFLAG_RW, &nmbjumbo9, 0,
117 "Maximum number of mbuf 9k jumbo clusters allowed");
118SYSCTL_INT(_kern_ipc, OID_AUTO, nmbjumbo16, CTLFLAG_RW, &nmbjumbo16, 0,
119 "Maximum number of mbuf 16k jumbo clusters allowed");
99SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat,
100 "Mbuf general information and statistics");
101
102/*
103 * Zones from which we allocate.
104 */
105uma_zone_t zone_mbuf;
106uma_zone_t zone_clust;
107uma_zone_t zone_pack;
120SYSCTL_STRUCT(_kern_ipc, OID_AUTO, mbstat, CTLFLAG_RD, &mbstat, mbstat,
121 "Mbuf general information and statistics");
122
123/*
124 * Zones from which we allocate.
125 */
126uma_zone_t zone_mbuf;
127uma_zone_t zone_clust;
128uma_zone_t zone_pack;
129uma_zone_t zone_jumbo9;
130uma_zone_t zone_jumbo16;
131uma_zone_t zone_ext_refcnt;
108
109/*
110 * Local prototypes.
111 */
112static int mb_ctor_mbuf(void *, int, void *, int);
113static int mb_ctor_clust(void *, int, void *, int);
114static int mb_ctor_pack(void *, int, void *, int);
115static void mb_dtor_mbuf(void *, int, void *);
132
133/*
134 * Local prototypes.
135 */
136static int mb_ctor_mbuf(void *, int, void *, int);
137static int mb_ctor_clust(void *, int, void *, int);
138static int mb_ctor_pack(void *, int, void *, int);
139static void mb_dtor_mbuf(void *, int, void *);
116static void mb_dtor_clust(void *, int, void *); /* XXX */
117static void mb_dtor_pack(void *, int, void *); /* XXX */
118static int mb_init_pack(void *, int, int);
119static void mb_fini_pack(void *, int);
140static void mb_dtor_clust(void *, int, void *);
141static void mb_dtor_pack(void *, int, void *);
142static int mb_zinit_pack(void *, int, int);
143static void mb_zfini_pack(void *, int);
120
121static void mb_reclaim(void *);
122static void mbuf_init(void *);
123
124/* Ensure that MSIZE doesn't break dtom() - it must be a power of 2 */
125CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
126
127/*
128 * Initialize FreeBSD Network buffer allocation.
129 */
130SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL)
131static void
132mbuf_init(void *dummy)
133{
134
135 /*
136 * Configure UMA zones for Mbufs, Clusters, and Packets.
137 */
144
145static void mb_reclaim(void *);
146static void mbuf_init(void *);
147
148/* Ensure that MSIZE doesn't break dtom() - it must be a power of 2 */
149CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
150
151/*
152 * Initialize FreeBSD Network buffer allocation.
153 */
154SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL)
155static void
156mbuf_init(void *dummy)
157{
158
159 /*
160 * Configure UMA zones for Mbufs, Clusters, and Packets.
161 */
138 zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE, mb_ctor_mbuf,
139 mb_dtor_mbuf,
162 zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
163 mb_ctor_mbuf, mb_dtor_mbuf,
140#ifdef INVARIANTS
164#ifdef INVARIANTS
141 trash_init, trash_fini, MSIZE - 1, UMA_ZONE_MAXBUCKET);
165 trash_init, trash_fini,
142#else
166#else
143 NULL, NULL, MSIZE - 1, UMA_ZONE_MAXBUCKET);
167 NULL, NULL,
144#endif
168#endif
169 MSIZE - 1, UMA_ZONE_MAXBUCKET);
170
145 zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
171 zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
146 mb_ctor_clust,
172 mb_ctor_clust, mb_dtor_clust,
147#ifdef INVARIANTS
173#ifdef INVARIANTS
148 mb_dtor_clust, trash_init, trash_fini, UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
174 trash_init, trash_fini,
149#else
175#else
150 mb_dtor_clust, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
176 NULL, NULL,
151#endif
177#endif
178 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
152 if (nmbclusters > 0)
153 uma_zone_set_max(zone_clust, nmbclusters);
179 if (nmbclusters > 0)
180 uma_zone_set_max(zone_clust, nmbclusters);
181
154 zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
182 zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
155 mb_dtor_pack, mb_init_pack, mb_fini_pack, zone_mbuf);
183 mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);
156
184
157 /* uma_prealloc() goes here */
185 /* Make jumbo frame zone too. 9k and 16k. */
186 zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
187 mb_ctor_clust, mb_dtor_clust,
188#ifdef INVARIANTS
189 trash_init, trash_fini,
190#else
191 NULL, NULL,
192#endif
193 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
194 if (nmbjumbo9 > 0)
195 uma_zone_set_max(zone_jumbo9, nmbjumbo9);
158
196
197 zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
198 mb_ctor_clust, mb_dtor_clust,
199#ifdef INVARIANTS
200 trash_init, trash_fini,
201#else
202 NULL, NULL,
203#endif
204 UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
205 if (nmbjumbo16 > 0)
206 uma_zone_set_max(zone_jumbo16, nmbjumbo16);
207
208 zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int),
209 NULL, NULL,
210 NULL, NULL,
211 UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
212
213 /* uma_prealloc() goes here... */
214
159 /*
160 * Hook event handler for low-memory situation, used to
161 * drain protocols and push data back to the caches (UMA
162 * later pushes it back to VM).
163 */
164 EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
165 EVENTHANDLER_PRI_FIRST);
166

--- 17 unchanged lines hidden (view full) ---

184 mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
185}
186
187/*
188 * Constructor for Mbuf master zone.
189 *
190 * The 'arg' pointer points to a mb_args structure which
191 * contains call-specific information required to support the
215 /*
216 * Hook event handler for low-memory situation, used to
217 * drain protocols and push data back to the caches (UMA
218 * later pushes it back to VM).
219 */
220 EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
221 EVENTHANDLER_PRI_FIRST);
222

--- 17 unchanged lines hidden (view full) ---

240 mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
241}
242
243/*
244 * Constructor for Mbuf master zone.
245 *
246 * The 'arg' pointer points to a mb_args structure which
247 * contains call-specific information required to support the
192 * mbuf allocation API.
248 * mbuf allocation API. See mbuf.h.
193 */
194static int
195mb_ctor_mbuf(void *mem, int size, void *arg, int how)
196{
197 struct mbuf *m;
198 struct mb_args *args;
199#ifdef MAC
200 int error;

--- 4 unchanged lines hidden (view full) ---

205#ifdef INVARIANTS
206 trash_ctor(mem, size, arg, how);
207#endif
208 m = (struct mbuf *)mem;
209 args = (struct mb_args *)arg;
210 flags = args->flags;
211 type = args->type;
212
249 */
250static int
251mb_ctor_mbuf(void *mem, int size, void *arg, int how)
252{
253 struct mbuf *m;
254 struct mb_args *args;
255#ifdef MAC
256 int error;

--- 4 unchanged lines hidden (view full) ---

261#ifdef INVARIANTS
262 trash_ctor(mem, size, arg, how);
263#endif
264 m = (struct mbuf *)mem;
265 args = (struct mb_args *)arg;
266 flags = args->flags;
267 type = args->type;
268
213 m->m_type = type;
269 /*
270 * The mbuf is initialized later. The caller has the
271 * responseability to setup any MAC labels too.
272 */
273 if (type == MT_NOINIT)
274 return (0);
275
214 m->m_next = NULL;
215 m->m_nextpkt = NULL;
276 m->m_next = NULL;
277 m->m_nextpkt = NULL;
278 m->m_len = 0;
216 m->m_flags = flags;
279 m->m_flags = flags;
280 m->m_type = type;
217 if (flags & M_PKTHDR) {
218 m->m_data = m->m_pktdat;
219 m->m_pkthdr.rcvif = NULL;
281 if (flags & M_PKTHDR) {
282 m->m_data = m->m_pktdat;
283 m->m_pkthdr.rcvif = NULL;
284 m->m_pkthdr.len = 0;
285 m->m_pkthdr.header = NULL;
220 m->m_pkthdr.csum_flags = 0;
286 m->m_pkthdr.csum_flags = 0;
287 m->m_pkthdr.csum_data = 0;
221 SLIST_INIT(&m->m_pkthdr.tags);
222#ifdef MAC
223 /* If the label init fails, fail the alloc */
224 error = mac_init_mbuf(m, how);
225 if (error)
226 return (error);
227#endif
228 } else
229 m->m_data = m->m_dat;
230 return (0);
231}
232
233/*
288 SLIST_INIT(&m->m_pkthdr.tags);
289#ifdef MAC
290 /* If the label init fails, fail the alloc */
291 error = mac_init_mbuf(m, how);
292 if (error)
293 return (error);
294#endif
295 } else
296 m->m_data = m->m_dat;
297 return (0);
298}
299
300/*
234 * The Mbuf master zone and Packet secondary zone destructor.
301 * The Mbuf master zone destructor.
235 */
236static void
237mb_dtor_mbuf(void *mem, int size, void *arg)
238{
239 struct mbuf *m;
240
241 m = (struct mbuf *)mem;
242 if ((m->m_flags & M_PKTHDR) != 0)
243 m_tag_delete_chain(m, NULL);
302 */
303static void
304mb_dtor_mbuf(void *mem, int size, void *arg)
305{
306 struct mbuf *m;
307
308 m = (struct mbuf *)mem;
309 if ((m->m_flags & M_PKTHDR) != 0)
310 m_tag_delete_chain(m, NULL);
311 KASSERT((m->m_flags & M_EXT) == 0, ("%s: M_EXT set", __func__));
244#ifdef INVARIANTS
245 trash_dtor(mem, size, arg);
246#endif
247}
248
312#ifdef INVARIANTS
313 trash_dtor(mem, size, arg);
314#endif
315}
316
249/* XXX Only because of stats */
317/*
318 * The Mbuf Packet zone destructor.
319 */
250static void
251mb_dtor_pack(void *mem, int size, void *arg)
252{
253 struct mbuf *m;
254
255 m = (struct mbuf *)mem;
256 if ((m->m_flags & M_PKTHDR) != 0)
257 m_tag_delete_chain(m, NULL);
320static void
321mb_dtor_pack(void *mem, int size, void *arg)
322{
323 struct mbuf *m;
324
325 m = (struct mbuf *)mem;
326 if ((m->m_flags & M_PKTHDR) != 0)
327 m_tag_delete_chain(m, NULL);
328
329 /* Make sure we've got a clean cluster back. */
330 KASSERT((m->m_flags & M_EXT) == M_EXT, ("%s: M_EXT not set", __func__));
331 KASSERT(m->m_ext.ext_buf != NULL, ("%s: ext_buf == NULL", __func__));
332 KASSERT(m->m_ext.ext_free == NULL, ("%s: ext_free != NULL", __func__));
333 KASSERT(m->m_ext.ext_args == NULL, ("%s: ext_args != NULL", __func__));
334 KASSERT(m->m_ext.ext_size == MCLBYTES, ("%s: ext_size != MCLBYTES", __func__));
335 KASSERT(m->m_ext.ext_type == EXT_CLUSTER, ("%s: ext_type != EXT_CLUSTER", __func__));
336 KASSERT(*m->m_ext.ref_cnt == 1, ("%s: ref_cnt != 1", __func__));
258#ifdef INVARIANTS
259 trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
260#endif
261}
262
263/*
337#ifdef INVARIANTS
338 trash_dtor(m->m_ext.ext_buf, MCLBYTES, arg);
339#endif
340}
341
342/*
264 * The Cluster zone constructor.
343 * The Cluster and Jumbo[9|16] zone constructor.
265 *
266 * Here the 'arg' pointer points to the Mbuf which we
344 *
345 * Here the 'arg' pointer points to the Mbuf which we
267 * are configuring cluster storage for.
346 * are configuring cluster storage for. If 'arg' is
347 * empty we allocate just the cluster without setting
348 * the mbuf to it. See mbuf.h.
268 */
269static int
270mb_ctor_clust(void *mem, int size, void *arg, int how)
271{
272 struct mbuf *m;
349 */
350static int
351mb_ctor_clust(void *mem, int size, void *arg, int how)
352{
353 struct mbuf *m;
354 u_int *refcnt;
355 int type = 0;
273
274#ifdef INVARIANTS
275 trash_ctor(mem, size, arg, how);
276#endif
277 m = (struct mbuf *)arg;
356
357#ifdef INVARIANTS
358 trash_ctor(mem, size, arg, how);
359#endif
360 m = (struct mbuf *)arg;
278 m->m_ext.ext_buf = (caddr_t)mem;
279 m->m_data = m->m_ext.ext_buf;
280 m->m_flags |= M_EXT;
281 m->m_ext.ext_free = NULL;
282 m->m_ext.ext_args = NULL;
283 m->m_ext.ext_size = MCLBYTES;
284 m->m_ext.ext_type = EXT_CLUSTER;
285 m->m_ext.ref_cnt = NULL; /* Lazy counter assign. */
361 if (m != NULL) {
362 switch (size) {
363 case MCLBYTES:
364 type = EXT_CLUSTER;
365 break;
366 case MJUM9BYTES:
367 type = EXT_JUMBO9;
368 break;
369 case MJUM16BYTES:
370 type = EXT_JUMBO16;
371 break;
372 default:
373 panic("unknown cluster size");
374 break;
375 }
376 m->m_ext.ext_buf = (caddr_t)mem;
377 m->m_data = m->m_ext.ext_buf;
378 m->m_flags |= M_EXT;
379 m->m_ext.ext_free = NULL;
380 m->m_ext.ext_args = NULL;
381 m->m_ext.ext_size = size;
382 m->m_ext.ext_type = type;
383 m->m_ext.ref_cnt = uma_find_refcnt(zone_clust, mem);
384 *m->m_ext.ref_cnt = 1;
385 } else {
386 refcnt = uma_find_refcnt(zone_clust, mem);
387 *refcnt = 1;
388 }
286 return (0);
287}
288
389 return (0);
390}
391
289/* XXX */
392/*
393 * The Mbuf Cluster zone destructor.
394 */
290static void
291mb_dtor_clust(void *mem, int size, void *arg)
292{
395static void
396mb_dtor_clust(void *mem, int size, void *arg)
397{
398 u_int *refcnt;
399
400 refcnt = uma_find_refcnt(zone_clust, mem);
401 KASSERT(*refcnt == 1, ("%s: refcnt incorrect %u", __func__, *refcnt));
402 *refcnt = 0;
293#ifdef INVARIANTS
294 trash_dtor(mem, size, arg);
295#endif
296}
297
298/*
299 * The Packet secondary zone's init routine, executed on the
403#ifdef INVARIANTS
404 trash_dtor(mem, size, arg);
405#endif
406}
407
408/*
409 * The Packet secondary zone's init routine, executed on the
300 * object's transition from keg slab to zone cache.
410 * object's transition from mbuf keg slab to zone cache.
301 */
302static int
411 */
412static int
303mb_init_pack(void *mem, int size, int how)
413mb_zinit_pack(void *mem, int size, int how)
304{
305 struct mbuf *m;
306
414{
415 struct mbuf *m;
416
307 m = (struct mbuf *)mem;
308 m->m_ext.ext_buf = NULL;
417 m = (struct mbuf *)mem; /* m is virgin. */
309 uma_zalloc_arg(zone_clust, m, how);
310 if (m->m_ext.ext_buf == NULL)
311 return (ENOMEM);
312#ifdef INVARIANTS
313 trash_init(m->m_ext.ext_buf, MCLBYTES, how);
314#endif
315 return (0);
316}
317
318/*
319 * The Packet secondary zone's fini routine, executed on the
320 * object's transition from zone cache to keg slab.
321 */
322static void
418 uma_zalloc_arg(zone_clust, m, how);
419 if (m->m_ext.ext_buf == NULL)
420 return (ENOMEM);
421#ifdef INVARIANTS
422 trash_init(m->m_ext.ext_buf, MCLBYTES, how);
423#endif
424 return (0);
425}
426
427/*
428 * The Packet secondary zone's fini routine, executed on the
429 * object's transition from zone cache to keg slab.
430 */
431static void
323mb_fini_pack(void *mem, int size)
432mb_zfini_pack(void *mem, int size)
324{
325 struct mbuf *m;
326
327 m = (struct mbuf *)mem;
328#ifdef INVARIANTS
329 trash_fini(m->m_ext.ext_buf, MCLBYTES);
330#endif
331 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL);
433{
434 struct mbuf *m;
435
436 m = (struct mbuf *)mem;
437#ifdef INVARIANTS
438 trash_fini(m->m_ext.ext_buf, MCLBYTES);
439#endif
440 uma_zfree_arg(zone_clust, m->m_ext.ext_buf, NULL);
332 m->m_ext.ext_buf = NULL;
333#ifdef INVARIANTS
334 trash_dtor(mem, size, NULL);
335#endif
336}
337
338/*
339 * The "packet" keg constructor.
340 */

--- 11 unchanged lines hidden (view full) ---

352 m = (struct mbuf *)mem;
353 args = (struct mb_args *)arg;
354 flags = args->flags;
355 type = args->type;
356
357#ifdef INVARIANTS
358 trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
359#endif
441#ifdef INVARIANTS
442 trash_dtor(mem, size, NULL);
443#endif
444}
445
446/*
447 * The "packet" keg constructor.
448 */

--- 11 unchanged lines hidden (view full) ---

460 m = (struct mbuf *)mem;
461 args = (struct mb_args *)arg;
462 flags = args->flags;
463 type = args->type;
464
465#ifdef INVARIANTS
466 trash_ctor(m->m_ext.ext_buf, MCLBYTES, arg, how);
467#endif
360 m->m_type = type;
361 m->m_next = NULL;
362 m->m_nextpkt = NULL;
363 m->m_data = m->m_ext.ext_buf;
468 m->m_next = NULL;
469 m->m_nextpkt = NULL;
470 m->m_data = m->m_ext.ext_buf;
364 m->m_flags = flags|M_EXT;
365 m->m_ext.ext_free = NULL;
366 m->m_ext.ext_args = NULL;
367 m->m_ext.ext_size = MCLBYTES;
368 m->m_ext.ext_type = EXT_PACKET;
369 m->m_ext.ref_cnt = NULL; /* Lazy counter assign. */
471 m->m_len = 0;
472 m->m_flags = (flags | M_EXT);
473 m->m_type = type;
370
371 if (flags & M_PKTHDR) {
372 m->m_pkthdr.rcvif = NULL;
474
475 if (flags & M_PKTHDR) {
476 m->m_pkthdr.rcvif = NULL;
477 m->m_pkthdr.len = 0;
478 m->m_pkthdr.header = NULL;
373 m->m_pkthdr.csum_flags = 0;
479 m->m_pkthdr.csum_flags = 0;
480 m->m_pkthdr.csum_data = 0;
374 SLIST_INIT(&m->m_pkthdr.tags);
375#ifdef MAC
376 /* If the label init fails, fail the alloc */
377 error = mac_init_mbuf(m, how);
378 if (error)
379 return (error);
380#endif
381 }
481 SLIST_INIT(&m->m_pkthdr.tags);
482#ifdef MAC
483 /* If the label init fails, fail the alloc */
484 error = mac_init_mbuf(m, how);
485 if (error)
486 return (error);
487#endif
488 }
489 /* m_ext is already initialized. */
490
382 return (0);
383}
384
385/*
386 * This is the protocol drain routine.
387 *
388 * No locks should be held when this is called. The drain routines have to
389 * presently acquire some locks which raises the possibility of lock order

--- 16 unchanged lines hidden ---
491 return (0);
492}
493
494/*
495 * This is the protocol drain routine.
496 *
497 * No locks should be held when this is called. The drain routines have to
498 * presently acquire some locks which raises the possibility of lock order

--- 16 unchanged lines hidden ---