Deleted Added
full compact
kern_malloc.c (253859) kern_malloc.c (254025)
1/*-
2 * Copyright (c) 1987, 1991, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2005-2009 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 29 unchanged lines hidden (view full) ---

38 * and a special UMA allocation interface is used for larger allocations.
39 * Callers declare memory types, and statistics are maintained independently
40 * for each memory type. Statistics are maintained per-CPU for performance
41 * reasons. See malloc(9) and comments in malloc.h for a detailed
42 * description.
43 */
44
45#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1987, 1991, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2005-2009 Robert N. M. Watson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 29 unchanged lines hidden (view full) ---

38 * and a special UMA allocation interface is used for larger allocations.
39 * Callers declare memory types, and statistics are maintained independently
40 * for each memory type. Statistics are maintained per-CPU for performance
41 * reasons. See malloc(9) and comments in malloc.h for a detailed
42 * description.
43 */
44
45#include <sys/cdefs.h>
46__FBSDID("$FreeBSD: head/sys/kern/kern_malloc.c 253859 2013-08-01 12:55:41Z kib $");
46__FBSDID("$FreeBSD: head/sys/kern/kern_malloc.c 254025 2013-08-07 06:21:20Z jeff $");
47
48#include "opt_ddb.h"
49#include "opt_kdtrace.h"
50#include "opt_vm.h"
51
52#include <sys/param.h>
53#include <sys/systm.h>
54#include <sys/kdb.h>
55#include <sys/kernel.h>
56#include <sys/lock.h>
57#include <sys/malloc.h>
58#include <sys/mbuf.h>
59#include <sys/mutex.h>
60#include <sys/vmmeter.h>
61#include <sys/proc.h>
62#include <sys/sbuf.h>
63#include <sys/sysctl.h>
64#include <sys/time.h>
47
48#include "opt_ddb.h"
49#include "opt_kdtrace.h"
50#include "opt_vm.h"
51
52#include <sys/param.h>
53#include <sys/systm.h>
54#include <sys/kdb.h>
55#include <sys/kernel.h>
56#include <sys/lock.h>
57#include <sys/malloc.h>
58#include <sys/mbuf.h>
59#include <sys/mutex.h>
60#include <sys/vmmeter.h>
61#include <sys/proc.h>
62#include <sys/sbuf.h>
63#include <sys/sysctl.h>
64#include <sys/time.h>
65#include <sys/vmem.h>
65
66#include <vm/vm.h>
67#include <vm/pmap.h>
66
67#include <vm/vm.h>
68#include <vm/pmap.h>
69#include <vm/vm_pageout.h>
68#include <vm/vm_param.h>
69#include <vm/vm_kern.h>
70#include <vm/vm_extern.h>
71#include <vm/vm_map.h>
72#include <vm/vm_page.h>
73#include <vm/uma.h>
74#include <vm/uma_int.h>
75#include <vm/uma_dbg.h>

--- 32 unchanged lines hidden (view full) ---

108 */
109MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
110MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
111MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
112
113MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
114MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
115
70#include <vm/vm_param.h>
71#include <vm/vm_kern.h>
72#include <vm/vm_extern.h>
73#include <vm/vm_map.h>
74#include <vm/vm_page.h>
75#include <vm/uma.h>
76#include <vm/uma_int.h>
77#include <vm/uma_dbg.h>

--- 32 unchanged lines hidden (view full) ---

110 */
111MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
112MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
113MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
114
115MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
116MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
117
116static void kmeminit(void *);
117SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL);
118
119static struct malloc_type *kmemstatistics;
118static struct malloc_type *kmemstatistics;
120static vm_offset_t kmembase;
121static vm_offset_t kmemlimit;
122static int kmemcount;
123
124#define KMEM_ZSHIFT 4
125#define KMEM_ZBASE 16
126#define KMEM_ZMASK (KMEM_ZBASE - 1)
127
128#define KMEM_ZMAX PAGE_SIZE
129#define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)

--- 68 unchanged lines hidden (view full) ---

198
199static u_int vm_kmem_size_scale;
200SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
201 "Scale factor for kernel memory size");
202
203static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
204SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
205 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
119static int kmemcount;
120
121#define KMEM_ZSHIFT 4
122#define KMEM_ZBASE 16
123#define KMEM_ZMASK (KMEM_ZBASE - 1)
124
125#define KMEM_ZMAX PAGE_SIZE
126#define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)

--- 68 unchanged lines hidden (view full) ---

195
196static u_int vm_kmem_size_scale;
197SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
198 "Scale factor for kernel memory size");
199
200static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
201SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
202 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
206 sysctl_kmem_map_size, "LU", "Current kmem_map allocation size");
203 sysctl_kmem_map_size, "LU", "Current kmem allocation size");
207
208static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
209SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
210 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
204
205static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
206SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
207 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
211 sysctl_kmem_map_free, "LU", "Largest contiguous free range in kmem_map");
208 sysctl_kmem_map_free, "LU", "Free space in kmem");
212
213/*
214 * The malloc_mtx protects the kmemstatistics linked list.
215 */
216struct mtx malloc_mtx;
217
218#ifdef MALLOC_PROFILE
219uint64_t krequests[KMEM_ZSIZE + 1];

--- 28 unchanged lines hidden (view full) ---

248 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
249#endif
250
251static int
252sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
253{
254 u_long size;
255
209
210/*
211 * The malloc_mtx protects the kmemstatistics linked list.
212 */
213struct mtx malloc_mtx;
214
215#ifdef MALLOC_PROFILE
216uint64_t krequests[KMEM_ZSIZE + 1];

--- 28 unchanged lines hidden (view full) ---

245 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
246#endif
247
248static int
249sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
250{
251 u_long size;
252
256 size = kmem_map->size;
253 size = vmem_size(kmem_arena, VMEM_ALLOC);
257 return (sysctl_handle_long(oidp, &size, 0, req));
258}
259
260static int
261sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
262{
263 u_long size;
264
254 return (sysctl_handle_long(oidp, &size, 0, req));
255}
256
257static int
258sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
259{
260 u_long size;
261
265 vm_map_lock_read(kmem_map);
266 size = kmem_map->root != NULL ? kmem_map->root->max_free :
267 kmem_map->max_offset - kmem_map->min_offset;
268 vm_map_unlock_read(kmem_map);
262 size = vmem_size(kmem_arena, VMEM_FREE);
269 return (sysctl_handle_long(oidp, &size, 0, req));
270}
271
272/*
273 * malloc(9) uma zone separation -- sub-page buffer overruns in one
274 * malloc type will affect only a subset of other malloc types.
275 */
276#if MALLOC_DEBUG_MAXZONES > 1

--- 138 unchanged lines hidden (view full) ---

415 */
416void *
417contigmalloc(unsigned long size, struct malloc_type *type, int flags,
418 vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
419 vm_paddr_t boundary)
420{
421 void *ret;
422
263 return (sysctl_handle_long(oidp, &size, 0, req));
264}
265
266/*
267 * malloc(9) uma zone separation -- sub-page buffer overruns in one
268 * malloc type will affect only a subset of other malloc types.
269 */
270#if MALLOC_DEBUG_MAXZONES > 1

--- 138 unchanged lines hidden (view full) ---

409 */
410void *
411contigmalloc(unsigned long size, struct malloc_type *type, int flags,
412 vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
413 vm_paddr_t boundary)
414{
415 void *ret;
416
423 ret = (void *)kmem_alloc_contig(kernel_map, size, flags, low, high,
417 ret = (void *)kmem_alloc_contig(kernel_arena, size, flags, low, high,
424 alignment, boundary, VM_MEMATTR_DEFAULT);
425 if (ret != NULL)
426 malloc_type_allocated(type, round_page(size));
427 return (ret);
428}
429
430/*
431 * contigfree:
432 *
433 * Free a block of memory allocated by contigmalloc.
434 *
435 * This routine may not block.
436 */
437void
438contigfree(void *addr, unsigned long size, struct malloc_type *type)
439{
440
418 alignment, boundary, VM_MEMATTR_DEFAULT);
419 if (ret != NULL)
420 malloc_type_allocated(type, round_page(size));
421 return (ret);
422}
423
424/*
425 * contigfree:
426 *
427 * Free a block of memory allocated by contigmalloc.
428 *
429 * This routine may not block.
430 */
431void
432contigfree(void *addr, unsigned long size, struct malloc_type *type)
433{
434
441 kmem_free(kernel_map, (vm_offset_t)addr, size);
435 kmem_free(kernel_arena, (vm_offset_t)addr, size);
442 malloc_type_freed(type, round_page(size));
443}
444
445/*
446 * malloc:
447 *
448 * Allocate a block of memory.
449 *

--- 226 unchanged lines hidden (view full) ---

676 void *mem;
677
678 if ((mem = realloc(addr, size, mtp, flags)) == NULL)
679 free(addr, mtp);
680 return (mem);
681}
682
683/*
436 malloc_type_freed(type, round_page(size));
437}
438
439/*
440 * malloc:
441 *
442 * Allocate a block of memory.
443 *

--- 226 unchanged lines hidden (view full) ---

670 void *mem;
671
672 if ((mem = realloc(addr, size, mtp, flags)) == NULL)
673 free(addr, mtp);
674 return (mem);
675}
676
677/*
684 * Initialize the kernel memory allocator
678 * Wake the page daemon when we exhaust KVA. It will call the lowmem handler
679 * and uma_reclaim() callbacks in a context that is safe.
685 */
680 */
686/* ARGSUSED*/
687static void
681static void
688kmeminit(void *dummy)
682kmem_reclaim(vmem_t *vm, int flags)
689{
683{
690 uint8_t indx;
684
685 pagedaemon_wakeup();
686}
687
688/*
689 * Initialize the kernel memory arena.
690 */
691void
692kmeminit(void)
693{
691 u_long mem_size, tmp;
694 u_long mem_size, tmp;
692 int i;
693
695
694 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
695
696 /*
697 * Try to auto-tune the kernel memory size, so that it is
698 * more applicable for a wider range of machine sizes. The
699 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
700 * available.
701 *
702 * Note that the kmem_map is also used by the zone allocator,
703 * so make sure that there is enough space.

--- 36 unchanged lines hidden (view full) ---

740 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
741 vm_kmem_size = 2 * mem_size * PAGE_SIZE;
742
743#ifdef DEBUG_MEMGUARD
744 tmp = memguard_fudge(vm_kmem_size, kernel_map);
745#else
746 tmp = vm_kmem_size;
747#endif
696 /*
697 * Try to auto-tune the kernel memory size, so that it is
698 * more applicable for a wider range of machine sizes. The
699 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
700 * available.
701 *
702 * Note that the kmem_map is also used by the zone allocator,
703 * so make sure that there is enough space.

--- 36 unchanged lines hidden (view full) ---

740 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
741 vm_kmem_size = 2 * mem_size * PAGE_SIZE;
742
743#ifdef DEBUG_MEMGUARD
744 tmp = memguard_fudge(vm_kmem_size, kernel_map);
745#else
746 tmp = vm_kmem_size;
747#endif
748 kmem_map = kmem_suballoc(kernel_map, &kmembase, &kmemlimit,
749 tmp, TRUE);
750 kmem_map->system_map = 1;
748 vmem_init(kmem_arena, "kmem arena", kva_alloc(tmp), tmp, PAGE_SIZE,
749 PAGE_SIZE * 16, 0);
750 vmem_set_reclaim(kmem_arena, kmem_reclaim);
751
752#ifdef DEBUG_MEMGUARD
753 /*
754 * Initialize MemGuard if support compiled in. MemGuard is a
755 * replacement allocator used for detecting tamper-after-free
756 * scenarios as they occur. It is only used for debugging.
757 */
751
752#ifdef DEBUG_MEMGUARD
753 /*
754 * Initialize MemGuard if support compiled in. MemGuard is a
755 * replacement allocator used for detecting tamper-after-free
756 * scenarios as they occur. It is only used for debugging.
757 */
758 memguard_init(kmem_map);
758 memguard_init(kmem_arena);
759#endif
759#endif
760}
760
761
762/*
763 * Initialize the kernel memory allocator
764 */
765/* ARGSUSED*/
766static void
767mallocinit(void *dummy)
768{
769 int i;
770 uint8_t indx;
771
772 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
773
774 kmeminit();
775
761 uma_startup2();
762
763 mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
764#ifdef INVARIANTS
765 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
766#else
767 NULL, NULL, NULL, NULL,
768#endif

--- 13 unchanged lines hidden (view full) ---

782#endif
783 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
784 }
785 for (;i <= size; i+= KMEM_ZBASE)
786 kmemsize[i >> KMEM_ZSHIFT] = indx;
787
788 }
789}
776 uma_startup2();
777
778 mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
779#ifdef INVARIANTS
780 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
781#else
782 NULL, NULL, NULL, NULL,
783#endif

--- 13 unchanged lines hidden (view full) ---

797#endif
798 UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
799 }
800 for (;i <= size; i+= KMEM_ZBASE)
801 kmemsize[i >> KMEM_ZSHIFT] = indx;
802
803 }
804}
805SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, mallocinit, NULL);
790
791void
792malloc_init(void *data)
793{
794 struct malloc_type_internal *mtip;
795 struct malloc_type *mtp;
796
797 KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init"));

--- 286 unchanged lines hidden ---
806
807void
808malloc_init(void *data)
809{
810 struct malloc_type_internal *mtip;
811 struct malloc_type *mtp;
812
813 KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init"));

--- 286 unchanged lines hidden ---