Deleted Added
full compact
1#define JEMALLOC_BASE_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
7static malloc_mutex_t base_mtx;
8
9/*
10 * Current pages that are being used for internal memory allocations. These
11 * pages are carved up in cacheline-size quanta, so that there is no chance of
12 * false cache line sharing.
13 */
14static void *base_pages;
15static void *base_next_addr;
16static void *base_past_addr; /* Addr immediately past base_pages. */
17static extent_node_t *base_nodes;
18
19/******************************************************************************/
20/* Function prototypes for non-inline static functions. */
21
22static bool base_pages_alloc(size_t minsize);
23
24/******************************************************************************/
25
26static bool
27base_pages_alloc(size_t minsize)
28{
29 size_t csize;
30 bool zero;
31
32 assert(minsize != 0);
33 csize = CHUNK_CEILING(minsize);
34 zero = false;
35 base_pages = chunk_alloc(csize, chunksize, true, &zero);
35 base_pages = chunk_alloc(csize, chunksize, true, &zero,
36 chunk_dss_prec_get());
37 if (base_pages == NULL)
38 return (true);
39 base_next_addr = base_pages;
40 base_past_addr = (void *)((uintptr_t)base_pages + csize);
41
42 return (false);
43}
44
45void *
46base_alloc(size_t size)
47{
48 void *ret;
49 size_t csize;
50
51 /* Round size up to nearest multiple of the cacheline size. */
52 csize = CACHELINE_CEILING(size);
53
54 malloc_mutex_lock(&base_mtx);
55 /* Make sure there's enough space for the allocation. */
56 if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
57 if (base_pages_alloc(csize)) {
58 malloc_mutex_unlock(&base_mtx);
59 return (NULL);
60 }
61 }
62 /* Allocate. */
63 ret = base_next_addr;
64 base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
65 malloc_mutex_unlock(&base_mtx);
66
67 return (ret);
68}
69
70void *
71base_calloc(size_t number, size_t size)
72{
73 void *ret = base_alloc(number * size);
74
75 if (ret != NULL)
76 memset(ret, 0, number * size);
77
78 return (ret);
79}
80
81extent_node_t *
82base_node_alloc(void)
83{
84 extent_node_t *ret;
85
86 malloc_mutex_lock(&base_mtx);
87 if (base_nodes != NULL) {
88 ret = base_nodes;
89 base_nodes = *(extent_node_t **)ret;
90 malloc_mutex_unlock(&base_mtx);
91 } else {
92 malloc_mutex_unlock(&base_mtx);
93 ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
94 }
95
96 return (ret);
97}
98
99void
100base_node_dealloc(extent_node_t *node)
101{
102
103 malloc_mutex_lock(&base_mtx);
104 *(extent_node_t **)node = base_nodes;
105 base_nodes = node;
106 malloc_mutex_unlock(&base_mtx);
107}
108
109bool
110base_boot(void)
111{
112
113 base_nodes = NULL;
114 if (malloc_mutex_init(&base_mtx))
115 return (true);
116
117 return (false);
118}
119
120void
121base_prefork(void)
122{
123
124 malloc_mutex_prefork(&base_mtx);
125}
126
127void
128base_postfork_parent(void)
129{
130
131 malloc_mutex_postfork_parent(&base_mtx);
132}
133
134void
135base_postfork_child(void)
136{
137
138 malloc_mutex_postfork_child(&base_mtx);
139}