Deleted Added
full compact
kern_malloc.c (127911) kern_malloc.c (129906)
1/*
2 * Copyright (c) 1987, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 16 unchanged lines hidden (view full) ---

25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
30 */
31
32#include <sys/cdefs.h>
1/*
2 * Copyright (c) 1987, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 16 unchanged lines hidden (view full) ---

25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/kern/kern_malloc.c 127911 2004-04-05 21:03:37Z imp $");
33__FBSDID("$FreeBSD: head/sys/kern/kern_malloc.c 129906 2004-05-31 21:46:06Z bmilekic $");
34
35#include "opt_vm.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>

--- 144 unchanged lines hidden (view full) ---

186malloc(size, type, flags)
187 unsigned long size;
188 struct malloc_type *type;
189 int flags;
190{
191 int indx;
192 caddr_t va;
193 uma_zone_t zone;
34
35#include "opt_vm.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>

--- 144 unchanged lines hidden (view full) ---

186malloc(size, type, flags)
187 unsigned long size;
188 struct malloc_type *type;
189 int flags;
190{
191 int indx;
192 caddr_t va;
193 uma_zone_t zone;
194 uma_keg_t keg;
194#ifdef DIAGNOSTIC
195 unsigned long osize = size;
196#endif
197 register struct malloc_type *ksp = type;
198
199#ifdef INVARIANTS
200 /*
201 * To make sure that WAITOK or NOWAIT is set, but not more than

--- 28 unchanged lines hidden (view full) ---

230 if (flags & M_WAITOK)
231 KASSERT(curthread->td_intr_nesting_level == 0,
232 ("malloc(M_WAITOK) in interrupt context"));
233 if (size <= KMEM_ZMAX) {
234 if (size & KMEM_ZMASK)
235 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
236 indx = kmemsize[size >> KMEM_ZSHIFT];
237 zone = kmemzones[indx].kz_zone;
195#ifdef DIAGNOSTIC
196 unsigned long osize = size;
197#endif
198 register struct malloc_type *ksp = type;
199
200#ifdef INVARIANTS
201 /*
202 * To make sure that WAITOK or NOWAIT is set, but not more than

--- 28 unchanged lines hidden (view full) ---

231 if (flags & M_WAITOK)
232 KASSERT(curthread->td_intr_nesting_level == 0,
233 ("malloc(M_WAITOK) in interrupt context"));
234 if (size <= KMEM_ZMAX) {
235 if (size & KMEM_ZMASK)
236 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
237 indx = kmemsize[size >> KMEM_ZSHIFT];
238 zone = kmemzones[indx].kz_zone;
239 keg = zone->uz_keg;
238#ifdef MALLOC_PROFILE
239 krequests[size >> KMEM_ZSHIFT]++;
240#endif
241 va = uma_zalloc(zone, flags);
242 mtx_lock(&ksp->ks_mtx);
243 if (va == NULL)
244 goto out;
245
246 ksp->ks_size |= 1 << indx;
240#ifdef MALLOC_PROFILE
241 krequests[size >> KMEM_ZSHIFT]++;
242#endif
243 va = uma_zalloc(zone, flags);
244 mtx_lock(&ksp->ks_mtx);
245 if (va == NULL)
246 goto out;
247
248 ksp->ks_size |= 1 << indx;
247 size = zone->uz_size;
249 size = keg->uk_size;
248 } else {
249 size = roundup(size, PAGE_SIZE);
250 zone = NULL;
250 } else {
251 size = roundup(size, PAGE_SIZE);
252 zone = NULL;
253 keg = NULL;
251 va = uma_large_malloc(size, flags);
252 mtx_lock(&ksp->ks_mtx);
253 if (va == NULL)
254 goto out;
255 }
256 ksp->ks_memuse += size;
257 ksp->ks_inuse++;
258out:

--- 45 unchanged lines hidden (view full) ---

304 panic("free: address %p(%p) has not been allocated.\n",
305 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
306
307
308 if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
309#ifdef INVARIANTS
310 struct malloc_type **mtp = addr;
311#endif
254 va = uma_large_malloc(size, flags);
255 mtx_lock(&ksp->ks_mtx);
256 if (va == NULL)
257 goto out;
258 }
259 ksp->ks_memuse += size;
260 ksp->ks_inuse++;
261out:

--- 45 unchanged lines hidden (view full) ---

307 panic("free: address %p(%p) has not been allocated.\n",
308 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
309
310
311 if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
312#ifdef INVARIANTS
313 struct malloc_type **mtp = addr;
314#endif
312 size = slab->us_zone->uz_size;
315 size = slab->us_keg->uk_size;
313#ifdef INVARIANTS
314 /*
315 * Cache a pointer to the malloc_type that most recently freed
316 * this memory here. This way we know who is most likely to
317 * have stepped on it later.
318 *
319 * This code assumes that size is a multiple of 8 bytes for
320 * 64 bit machines
321 */
322 mtp = (struct malloc_type **)
323 ((unsigned long)mtp & ~UMA_ALIGN_PTR);
324 mtp += (size - sizeof(struct malloc_type *)) /
325 sizeof(struct malloc_type *);
326 *mtp = type;
327#endif
316#ifdef INVARIANTS
317 /*
318 * Cache a pointer to the malloc_type that most recently freed
319 * this memory here. This way we know who is most likely to
320 * have stepped on it later.
321 *
322 * This code assumes that size is a multiple of 8 bytes for
323 * 64 bit machines
324 */
325 mtp = (struct malloc_type **)
326 ((unsigned long)mtp & ~UMA_ALIGN_PTR);
327 mtp += (size - sizeof(struct malloc_type *)) /
328 sizeof(struct malloc_type *);
329 *mtp = type;
330#endif
328 uma_zfree_arg(slab->us_zone, addr, slab);
331 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
329 } else {
330 size = slab->us_size;
331 uma_large_free(slab);
332 }
333 mtx_lock(&ksp->ks_mtx);
334 KASSERT(size <= ksp->ks_memuse,
335 ("malloc(9)/free(9) confusion.\n%s",
336 "Probably freeing with wrong type, but maybe not here."));

--- 22 unchanged lines hidden (view full) ---

359
360 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
361
362 /* Sanity check */
363 KASSERT(slab != NULL,
364 ("realloc: address %p out of range", (void *)addr));
365
366 /* Get the size of the original block */
332 } else {
333 size = slab->us_size;
334 uma_large_free(slab);
335 }
336 mtx_lock(&ksp->ks_mtx);
337 KASSERT(size <= ksp->ks_memuse,
338 ("malloc(9)/free(9) confusion.\n%s",
339 "Probably freeing with wrong type, but maybe not here."));

--- 22 unchanged lines hidden (view full) ---

362
363 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
364
365 /* Sanity check */
366 KASSERT(slab != NULL,
367 ("realloc: address %p out of range", (void *)addr));
368
369 /* Get the size of the original block */
367 if (slab->us_zone)
368 alloc = slab->us_zone->uz_size;
370 if (slab->us_keg)
371 alloc = slab->us_keg->uk_size;
369 else
370 alloc = slab->us_size;
371
372 /* Reuse the original block if appropriate */
373 if (size <= alloc
374 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
375 return (addr);
376

--- 28 unchanged lines hidden (view full) ---

405 * Initialize the kernel memory allocator
406 */
407/* ARGSUSED*/
408static void
409kmeminit(dummy)
410 void *dummy;
411{
412 u_int8_t indx;
372 else
373 alloc = slab->us_size;
374
375 /* Reuse the original block if appropriate */
376 if (size <= alloc
377 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
378 return (addr);
379

--- 28 unchanged lines hidden (view full) ---

408 * Initialize the kernel memory allocator
409 */
410/* ARGSUSED*/
411static void
412kmeminit(dummy)
413 void *dummy;
414{
415 u_int8_t indx;
413 u_long npg;
414 u_long mem_size;
415 int i;
416
417 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
418
419 /*
420 * Try to auto-tune the kernel memory size, so that it is
421 * more applicable for a wider range of machine sizes.
422 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
423 * a VM_KMEM_SIZE of 12MB is a fair compromise. The
424 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
425 * available, and on an X86 with a total KVA space of 256MB,
426 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
427 *
428 * Note that the kmem_map is also used by the zone allocator,
429 * so make sure that there is enough space.
430 */
416 u_long mem_size;
417 int i;
418
419 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
420
421 /*
422 * Try to auto-tune the kernel memory size, so that it is
423 * more applicable for a wider range of machine sizes.
424 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
425 * a VM_KMEM_SIZE of 12MB is a fair compromise. The
426 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
427 * available, and on an X86 with a total KVA space of 256MB,
428 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
429 *
430 * Note that the kmem_map is also used by the zone allocator,
431 * so make sure that there is enough space.
432 */
431 vm_kmem_size = VM_KMEM_SIZE;
433 vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE;
432 mem_size = cnt.v_page_count;
433
434#if defined(VM_KMEM_SIZE_SCALE)
435 if ((mem_size / VM_KMEM_SIZE_SCALE) > (vm_kmem_size / PAGE_SIZE))
436 vm_kmem_size = (mem_size / VM_KMEM_SIZE_SCALE) * PAGE_SIZE;
437#endif
438
439#if defined(VM_KMEM_SIZE_MAX)

--- 17 unchanged lines hidden (view full) ---

457 if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count)
458 vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
459
460 /*
461 * Tune settings based on the kernel map's size at this time.
462 */
463 init_param3(vm_kmem_size / PAGE_SIZE);
464
434 mem_size = cnt.v_page_count;
435
436#if defined(VM_KMEM_SIZE_SCALE)
437 if ((mem_size / VM_KMEM_SIZE_SCALE) > (vm_kmem_size / PAGE_SIZE))
438 vm_kmem_size = (mem_size / VM_KMEM_SIZE_SCALE) * PAGE_SIZE;
439#endif
440
441#if defined(VM_KMEM_SIZE_MAX)

--- 17 unchanged lines hidden (view full) ---

459 if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count)
460 vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
461
462 /*
463 * Tune settings based on the kernel map's size at this time.
464 */
465 init_param3(vm_kmem_size / PAGE_SIZE);
466
465 /*
466 * In mbuf_init(), we set up submaps for mbufs and clusters, in which
467 * case we rounddown() (nmbufs * MSIZE) and (nmbclusters * MCLBYTES),
468 * respectively. Mathematically, this means that what we do here may
469 * amount to slightly more address space than we need for the submaps,
470 * but it never hurts to have an extra page in kmem_map.
471 */
472 npg = (nmbufs*MSIZE + nmbclusters*MCLBYTES + vm_kmem_size) / PAGE_SIZE;
473
474 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
467 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
475 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE));
468 (vm_offset_t *)&kmemlimit, vm_kmem_size);
476 kmem_map->system_map = 1;
477
478 uma_startup2();
479
480 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
481 int size = kmemzones[indx].kz_size;
482 char *name = kmemzones[indx].kz_name;
483

--- 204 unchanged lines hidden ---
469 kmem_map->system_map = 1;
470
471 uma_startup2();
472
473 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
474 int size = kmemzones[indx].kz_size;
475 char *name = kmemzones[indx].kz_name;
476

--- 204 unchanged lines hidden ---