Deleted Added
full compact
uipc_mbuf.c (72789) uipc_mbuf.c (74402)
1/*
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 17 unchanged lines hidden (view full) ---

26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
1/*
2 * Copyright (c) 1982, 1986, 1988, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 17 unchanged lines hidden (view full) ---

26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
34 * $FreeBSD: head/sys/kern/uipc_mbuf.c 72789 2001-02-21 09:24:13Z bp $
34 * $FreeBSD: head/sys/kern/uipc_mbuf.c 74402 2001-03-17 23:23:24Z bmilekic $
35 */
36
37#include "opt_param.h"
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/malloc.h>
41#include <sys/mbuf.h>
42#include <sys/mutex.h>

--- 189 unchanged lines hidden (view full) ---

232 */
233int
234m_mballoc(int nmb, int how)
235{
236 caddr_t p;
237 int i;
238 int nbytes;
239
35 */
36
37#include "opt_param.h"
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/malloc.h>
41#include <sys/mbuf.h>
42#include <sys/mutex.h>

--- 189 unchanged lines hidden (view full) ---

232 */
233int
234m_mballoc(int nmb, int how)
235{
236 caddr_t p;
237 int i;
238 int nbytes;
239
240 nbytes = round_page(nmb * MSIZE);
241 nmb = nbytes / MSIZE;
242
240 /*
241 * If we've hit the mbuf limit, stop allocating from mb_map.
242 * Also, once we run out of map space, it will be impossible to
243 * get any more (nothing is ever freed back to the map).
244 */
245 if (mb_map_full || ((nmb + mbstat.m_mbufs) > nmbufs)) {
246 /*
247 * Needs to be atomic as we may be incrementing it
248 * while holding another mutex, like mclfree. In other
249 * words, m_drops is not reserved solely for mbufs,
250 * but is also available for clusters.
251 */
252 atomic_add_long(&mbstat.m_drops, 1);
253 return (0);
254 }
255
243 /*
244 * If we've hit the mbuf limit, stop allocating from mb_map.
245 * Also, once we run out of map space, it will be impossible to
246 * get any more (nothing is ever freed back to the map).
247 */
248 if (mb_map_full || ((nmb + mbstat.m_mbufs) > nmbufs)) {
249 /*
250 * Needs to be atomic as we may be incrementing it
251 * while holding another mutex, like mclfree. In other
252 * words, m_drops is not reserved solely for mbufs,
253 * but is also available for clusters.
254 */
255 atomic_add_long(&mbstat.m_drops, 1);
256 return (0);
257 }
258
256 nbytes = round_page(nmb * MSIZE);
257
258 mtx_unlock(&mmbfree.m_mtx);
259 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
260 if (p == NULL && how == M_TRYWAIT) {
261 atomic_add_long(&mbstat.m_wait, 1);
262 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
263 }
264 mtx_lock(&mmbfree.m_mtx);
265
266 /*
267 * Either the map is now full, or `how' is M_DONTWAIT and there
268 * are no pages left.
269 */
270 if (p == NULL)
271 return (0);
272
259 mtx_unlock(&mmbfree.m_mtx);
260 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT);
261 if (p == NULL && how == M_TRYWAIT) {
262 atomic_add_long(&mbstat.m_wait, 1);
263 p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK);
264 }
265 mtx_lock(&mmbfree.m_mtx);
266
267 /*
268 * Either the map is now full, or `how' is M_DONTWAIT and there
269 * are no pages left.
270 */
271 if (p == NULL)
272 return (0);
273
273 nmb = nbytes / MSIZE;
274
275 /*
276 * We don't let go of the mutex in order to avoid a race.
277 * It is up to the caller to let go of the mutex when done
278 * with grabbing the mbuf from the free list.
279 */
280 for (i = 0; i < nmb; i++) {
281 ((struct mbuf *)p)->m_next = mmbfree.m_head;
282 mmbfree.m_head = (struct mbuf *)p;

--- 73 unchanged lines hidden (view full) ---

356 *
357 * Must be called with the mclfree lock held.
358 */
359int
360m_clalloc(int ncl, int how)
361{
362 caddr_t p;
363 int i;
274 /*
275 * We don't let go of the mutex in order to avoid a race.
276 * It is up to the caller to let go of the mutex when done
277 * with grabbing the mbuf from the free list.
278 */
279 for (i = 0; i < nmb; i++) {
280 ((struct mbuf *)p)->m_next = mmbfree.m_head;
281 mmbfree.m_head = (struct mbuf *)p;

--- 73 unchanged lines hidden (view full) ---

355 *
356 * Must be called with the mclfree lock held.
357 */
358int
359m_clalloc(int ncl, int how)
360{
361 caddr_t p;
362 int i;
364 int npg;
363 int npg_sz;
365
364
365 npg_sz = round_page(ncl * MCLBYTES);
366 ncl = npg_sz / MCLBYTES;
367
366 /*
367 * If the map is now full (nothing will ever be freed to it).
368 * If we've hit the mcluster number limit, stop allocating from
369 * mb_map.
370 */
371 if (mb_map_full || ((ncl + mbstat.m_clusters) > nmbclusters)) {
372 atomic_add_long(&mbstat.m_drops, 1);
373 return (0);
374 }
375
368 /*
369 * If the map is now full (nothing will ever be freed to it).
370 * If we've hit the mcluster number limit, stop allocating from
371 * mb_map.
372 */
373 if (mb_map_full || ((ncl + mbstat.m_clusters) > nmbclusters)) {
374 atomic_add_long(&mbstat.m_drops, 1);
375 return (0);
376 }
377
376 npg = ncl;
377 mtx_unlock(&mclfree.m_mtx);
378 mtx_unlock(&mclfree.m_mtx);
378 p = (caddr_t)kmem_malloc(mb_map, ctob(npg),
379 p = (caddr_t)kmem_malloc(mb_map, npg_sz,
379 how == M_TRYWAIT ? M_WAITOK : M_NOWAIT);
380 how == M_TRYWAIT ? M_WAITOK : M_NOWAIT);
380 ncl = ncl * PAGE_SIZE / MCLBYTES;
381 mtx_lock(&mclfree.m_mtx);
382
383 /*
384 * Either the map is now full, or `how' is M_DONTWAIT and there
385 * are no pages left.
386 */
387 if (p == NULL) {
388 atomic_add_long(&mbstat.m_drops, 1);

--- 824 unchanged lines hidden ---
381 mtx_lock(&mclfree.m_mtx);
382
383 /*
384 * Either the map is now full, or `how' is M_DONTWAIT and there
385 * are no pages left.
386 */
387 if (p == NULL) {
388 atomic_add_long(&mbstat.m_drops, 1);

--- 824 unchanged lines hidden ---