Deleted Added
full compact
huge.c (296221) huge.c (299587)
1#define JEMALLOC_HUGE_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5
6static extent_node_t *
7huge_node_get(const void *ptr)
8{
9 extent_node_t *node;
10
11 node = chunk_lookup(ptr, true);
12 assert(!extent_node_achunk_get(node));
13
14 return (node);
15}
16
17static bool
1#define JEMALLOC_HUGE_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5
6static extent_node_t *
7huge_node_get(const void *ptr)
8{
9 extent_node_t *node;
10
11 node = chunk_lookup(ptr, true);
12 assert(!extent_node_achunk_get(node));
13
14 return (node);
15}
16
17static bool
18huge_node_set(const void *ptr, extent_node_t *node)
18huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
19{
20
21 assert(extent_node_addr_get(node) == ptr);
22 assert(!extent_node_achunk_get(node));
19{
20
21 assert(extent_node_addr_get(node) == ptr);
22 assert(!extent_node_achunk_get(node));
23 return (chunk_register(ptr, node));
23 return (chunk_register(tsdn, ptr, node));
24}
25
26static void
24}
25
26static void
27huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
28{
29 bool err;
30
31 err = huge_node_set(tsdn, ptr, node);
32 assert(!err);
33}
34
35static void
27huge_node_unset(const void *ptr, const extent_node_t *node)
28{
29
30 chunk_deregister(ptr, node);
31}
32
33void *
36huge_node_unset(const void *ptr, const extent_node_t *node)
37{
38
39 chunk_deregister(ptr, node);
40}
41
42void *
34huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero,
35 tcache_t *tcache)
43huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
36{
37
38 assert(usize == s2u(usize));
39
44{
45
46 assert(usize == s2u(usize));
47
40 return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
48 return (huge_palloc(tsdn, arena, usize, chunksize, zero));
41}
42
43void *
49}
50
51void *
44huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
45 bool zero, tcache_t *tcache)
52huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
53 bool zero)
46{
47 void *ret;
48 size_t ausize;
49 extent_node_t *node;
50 bool is_zeroed;
51
52 /* Allocate one or more contiguous chunks for this request. */
53
54{
55 void *ret;
56 size_t ausize;
57 extent_node_t *node;
58 bool is_zeroed;
59
60 /* Allocate one or more contiguous chunks for this request. */
61
62 assert(!tsdn_null(tsdn) || arena != NULL);
63
54 ausize = sa2u(usize, alignment);
55 if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
56 return (NULL);
57 assert(ausize >= chunksize);
58
59 /* Allocate an extent node with which to track the chunk. */
64 ausize = sa2u(usize, alignment);
65 if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
66 return (NULL);
67 assert(ausize >= chunksize);
68
69 /* Allocate an extent node with which to track the chunk. */
60 node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
61 CACHELINE, false, tcache, true, arena);
70 node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
71 CACHELINE, false, NULL, true, arena_ichoose(tsdn, arena));
62 if (node == NULL)
63 return (NULL);
64
65 /*
66 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
67 * it is possible to make correct junk/zero fill decisions below.
68 */
69 is_zeroed = zero;
72 if (node == NULL)
73 return (NULL);
74
75 /*
76 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
77 * it is possible to make correct junk/zero fill decisions below.
78 */
79 is_zeroed = zero;
70 arena = arena_choose(tsd, arena);
71 if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
72 usize, alignment, &is_zeroed)) == NULL) {
73 idalloctm(tsd, node, tcache, true, true);
80 if (likely(!tsdn_null(tsdn)))
81 arena = arena_choose(tsdn_tsd(tsdn), arena);
82 if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
83 arena, usize, alignment, &is_zeroed)) == NULL) {
84 idalloctm(tsdn, node, NULL, true, true);
74 return (NULL);
75 }
76
77 extent_node_init(node, arena, ret, usize, is_zeroed, true);
78
85 return (NULL);
86 }
87
88 extent_node_init(node, arena, ret, usize, is_zeroed, true);
89
79 if (huge_node_set(ret, node)) {
80 arena_chunk_dalloc_huge(arena, ret, usize);
81 idalloctm(tsd, node, tcache, true, true);
90 if (huge_node_set(tsdn, ret, node)) {
91 arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
92 idalloctm(tsdn, node, NULL, true, true);
82 return (NULL);
83 }
84
85 /* Insert node into huge. */
93 return (NULL);
94 }
95
96 /* Insert node into huge. */
86 malloc_mutex_lock(&arena->huge_mtx);
97 malloc_mutex_lock(tsdn, &arena->huge_mtx);
87 ql_elm_new(node, ql_link);
88 ql_tail_insert(&arena->huge, node, ql_link);
98 ql_elm_new(node, ql_link);
99 ql_tail_insert(&arena->huge, node, ql_link);
89 malloc_mutex_unlock(&arena->huge_mtx);
100 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
90
91 if (zero || (config_fill && unlikely(opt_zero))) {
92 if (!is_zeroed)
93 memset(ret, 0, usize);
94 } else if (config_fill && unlikely(opt_junk_alloc))
101
102 if (zero || (config_fill && unlikely(opt_zero))) {
103 if (!is_zeroed)
104 memset(ret, 0, usize);
105 } else if (config_fill && unlikely(opt_junk_alloc))
95 memset(ret, 0xa5, usize);
106 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
96
107
97 arena_decay_tick(tsd, arena);
108 arena_decay_tick(tsdn, arena);
98 return (ret);
99}
100
101#ifdef JEMALLOC_JET
102#undef huge_dalloc_junk
103#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
104#endif
105static void
109 return (ret);
110}
111
112#ifdef JEMALLOC_JET
113#undef huge_dalloc_junk
114#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
115#endif
116static void
106huge_dalloc_junk(void *ptr, size_t usize)
117huge_dalloc_junk(tsdn_t *tsdn, void *ptr, size_t usize)
107{
108
109 if (config_fill && have_dss && unlikely(opt_junk_free)) {
110 /*
111 * Only bother junk filling if the chunk isn't about to be
112 * unmapped.
113 */
118{
119
120 if (config_fill && have_dss && unlikely(opt_junk_free)) {
121 /*
122 * Only bother junk filling if the chunk isn't about to be
123 * unmapped.
124 */
114 if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
115 memset(ptr, 0x5a, usize);
125 if (!config_munmap || (have_dss && chunk_in_dss(tsdn, ptr)))
126 memset(ptr, JEMALLOC_FREE_JUNK, usize);
116 }
117}
118#ifdef JEMALLOC_JET
119#undef huge_dalloc_junk
120#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
121huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
122#endif
123
124static void
127 }
128}
129#ifdef JEMALLOC_JET
130#undef huge_dalloc_junk
131#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
132huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
133#endif
134
135static void
125huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
126 size_t usize_max, bool zero)
136huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
137 size_t usize_min, size_t usize_max, bool zero)
127{
128 size_t usize, usize_next;
129 extent_node_t *node;
130 arena_t *arena;
131 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
132 bool pre_zeroed, post_zeroed;
133
134 /* Increase usize to incorporate extra. */

--- 7 unchanged lines hidden (view full) ---

142 node = huge_node_get(ptr);
143 arena = extent_node_arena_get(node);
144 pre_zeroed = extent_node_zeroed_get(node);
145
146 /* Fill if necessary (shrinking). */
147 if (oldsize > usize) {
148 size_t sdiff = oldsize - usize;
149 if (config_fill && unlikely(opt_junk_free)) {
138{
139 size_t usize, usize_next;
140 extent_node_t *node;
141 arena_t *arena;
142 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
143 bool pre_zeroed, post_zeroed;
144
145 /* Increase usize to incorporate extra. */

--- 7 unchanged lines hidden (view full) ---

153 node = huge_node_get(ptr);
154 arena = extent_node_arena_get(node);
155 pre_zeroed = extent_node_zeroed_get(node);
156
157 /* Fill if necessary (shrinking). */
158 if (oldsize > usize) {
159 size_t sdiff = oldsize - usize;
160 if (config_fill && unlikely(opt_junk_free)) {
150 memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
161 memset((void *)((uintptr_t)ptr + usize),
162 JEMALLOC_FREE_JUNK, sdiff);
151 post_zeroed = false;
152 } else {
163 post_zeroed = false;
164 } else {
153 post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
154 ptr, CHUNK_CEILING(oldsize), usize, sdiff);
165 post_zeroed = !chunk_purge_wrapper(tsdn, arena,
166 &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize,
167 sdiff);
155 }
156 } else
157 post_zeroed = pre_zeroed;
158
168 }
169 } else
170 post_zeroed = pre_zeroed;
171
159 malloc_mutex_lock(&arena->huge_mtx);
172 malloc_mutex_lock(tsdn, &arena->huge_mtx);
160 /* Update the size of the huge allocation. */
173 /* Update the size of the huge allocation. */
174 huge_node_unset(ptr, node);
161 assert(extent_node_size_get(node) != usize);
162 extent_node_size_set(node, usize);
175 assert(extent_node_size_get(node) != usize);
176 extent_node_size_set(node, usize);
177 huge_node_reset(tsdn, ptr, node);
163 /* Update zeroed. */
164 extent_node_zeroed_set(node, post_zeroed);
178 /* Update zeroed. */
179 extent_node_zeroed_set(node, post_zeroed);
165 malloc_mutex_unlock(&arena->huge_mtx);
180 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
166
181
167 arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
182 arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
168
169 /* Fill if necessary (growing). */
170 if (oldsize < usize) {
171 if (zero || (config_fill && unlikely(opt_zero))) {
172 if (!pre_zeroed) {
173 memset((void *)((uintptr_t)ptr + oldsize), 0,
174 usize - oldsize);
175 }
176 } else if (config_fill && unlikely(opt_junk_alloc)) {
183
184 /* Fill if necessary (growing). */
185 if (oldsize < usize) {
186 if (zero || (config_fill && unlikely(opt_zero))) {
187 if (!pre_zeroed) {
188 memset((void *)((uintptr_t)ptr + oldsize), 0,
189 usize - oldsize);
190 }
191 } else if (config_fill && unlikely(opt_junk_alloc)) {
177 memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
178 oldsize);
192 memset((void *)((uintptr_t)ptr + oldsize),
193 JEMALLOC_ALLOC_JUNK, usize - oldsize);
179 }
180 }
181}
182
183static bool
194 }
195 }
196}
197
198static bool
184huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
199huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
200 size_t usize)
185{
186 extent_node_t *node;
187 arena_t *arena;
188 chunk_hooks_t chunk_hooks;
189 size_t cdiff;
190 bool pre_zeroed, post_zeroed;
191
192 node = huge_node_get(ptr);
193 arena = extent_node_arena_get(node);
194 pre_zeroed = extent_node_zeroed_get(node);
201{
202 extent_node_t *node;
203 arena_t *arena;
204 chunk_hooks_t chunk_hooks;
205 size_t cdiff;
206 bool pre_zeroed, post_zeroed;
207
208 node = huge_node_get(ptr);
209 arena = extent_node_arena_get(node);
210 pre_zeroed = extent_node_zeroed_get(node);
195 chunk_hooks = chunk_hooks_get(arena);
211 chunk_hooks = chunk_hooks_get(tsdn, arena);
196
197 assert(oldsize > usize);
198
199 /* Split excess chunks. */
200 cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
201 if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
202 CHUNK_CEILING(usize), cdiff, true, arena->ind))
203 return (true);
204
205 if (oldsize > usize) {
206 size_t sdiff = oldsize - usize;
207 if (config_fill && unlikely(opt_junk_free)) {
212
213 assert(oldsize > usize);
214
215 /* Split excess chunks. */
216 cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
217 if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
218 CHUNK_CEILING(usize), cdiff, true, arena->ind))
219 return (true);
220
221 if (oldsize > usize) {
222 size_t sdiff = oldsize - usize;
223 if (config_fill && unlikely(opt_junk_free)) {
208 huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
224 huge_dalloc_junk(tsdn, (void *)((uintptr_t)ptr + usize),
209 sdiff);
210 post_zeroed = false;
211 } else {
225 sdiff);
226 post_zeroed = false;
227 } else {
212 post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
213 CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
214 CHUNK_CEILING(oldsize),
228 post_zeroed = !chunk_purge_wrapper(tsdn, arena,
229 &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr +
230 usize), CHUNK_CEILING(oldsize),
215 CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
216 }
217 } else
218 post_zeroed = pre_zeroed;
219
231 CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
232 }
233 } else
234 post_zeroed = pre_zeroed;
235
220 malloc_mutex_lock(&arena->huge_mtx);
236 malloc_mutex_lock(tsdn, &arena->huge_mtx);
221 /* Update the size of the huge allocation. */
237 /* Update the size of the huge allocation. */
238 huge_node_unset(ptr, node);
222 extent_node_size_set(node, usize);
239 extent_node_size_set(node, usize);
240 huge_node_reset(tsdn, ptr, node);
223 /* Update zeroed. */
224 extent_node_zeroed_set(node, post_zeroed);
241 /* Update zeroed. */
242 extent_node_zeroed_set(node, post_zeroed);
225 malloc_mutex_unlock(&arena->huge_mtx);
243 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
226
227 /* Zap the excess chunks. */
244
245 /* Zap the excess chunks. */
228 arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
246 arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize);
229
230 return (false);
231}
232
233static bool
247
248 return (false);
249}
250
251static bool
234huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
252huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
253 size_t usize, bool zero) {
235 extent_node_t *node;
236 arena_t *arena;
237 bool is_zeroed_subchunk, is_zeroed_chunk;
238
239 node = huge_node_get(ptr);
240 arena = extent_node_arena_get(node);
254 extent_node_t *node;
255 arena_t *arena;
256 bool is_zeroed_subchunk, is_zeroed_chunk;
257
258 node = huge_node_get(ptr);
259 arena = extent_node_arena_get(node);
241 malloc_mutex_lock(&arena->huge_mtx);
260 malloc_mutex_lock(tsdn, &arena->huge_mtx);
242 is_zeroed_subchunk = extent_node_zeroed_get(node);
261 is_zeroed_subchunk = extent_node_zeroed_get(node);
243 malloc_mutex_unlock(&arena->huge_mtx);
262 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
244
245 /*
246 * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
247 * that it is possible to make correct junk/zero fill decisions below.
248 */
249 is_zeroed_chunk = zero;
250
263
264 /*
265 * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
266 * that it is possible to make correct junk/zero fill decisions below.
267 */
268 is_zeroed_chunk = zero;
269
251 if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
270 if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
252 &is_zeroed_chunk))
253 return (true);
254
271 &is_zeroed_chunk))
272 return (true);
273
255 malloc_mutex_lock(&arena->huge_mtx);
274 malloc_mutex_lock(tsdn, &arena->huge_mtx);
256 /* Update the size of the huge allocation. */
275 /* Update the size of the huge allocation. */
276 huge_node_unset(ptr, node);
257 extent_node_size_set(node, usize);
277 extent_node_size_set(node, usize);
258 malloc_mutex_unlock(&arena->huge_mtx);
278 huge_node_reset(tsdn, ptr, node);
279 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
259
260 if (zero || (config_fill && unlikely(opt_zero))) {
261 if (!is_zeroed_subchunk) {
262 memset((void *)((uintptr_t)ptr + oldsize), 0,
263 CHUNK_CEILING(oldsize) - oldsize);
264 }
265 if (!is_zeroed_chunk) {
266 memset((void *)((uintptr_t)ptr +
267 CHUNK_CEILING(oldsize)), 0, usize -
268 CHUNK_CEILING(oldsize));
269 }
270 } else if (config_fill && unlikely(opt_junk_alloc)) {
280
281 if (zero || (config_fill && unlikely(opt_zero))) {
282 if (!is_zeroed_subchunk) {
283 memset((void *)((uintptr_t)ptr + oldsize), 0,
284 CHUNK_CEILING(oldsize) - oldsize);
285 }
286 if (!is_zeroed_chunk) {
287 memset((void *)((uintptr_t)ptr +
288 CHUNK_CEILING(oldsize)), 0, usize -
289 CHUNK_CEILING(oldsize));
290 }
291 } else if (config_fill && unlikely(opt_junk_alloc)) {
271 memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
272 oldsize);
292 memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK,
293 usize - oldsize);
273 }
274
275 return (false);
276}
277
278bool
294 }
295
296 return (false);
297}
298
299bool
279huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
300huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
280 size_t usize_max, bool zero)
281{
282
283 assert(s2u(oldsize) == oldsize);
284 /* The following should have been caught by callers. */
285 assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
286
287 /* Both allocations must be huge to avoid a move. */
288 if (oldsize < chunksize || usize_max < chunksize)
289 return (true);
290
291 if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
292 /* Attempt to expand the allocation in-place. */
301 size_t usize_max, bool zero)
302{
303
304 assert(s2u(oldsize) == oldsize);
305 /* The following should have been caught by callers. */
306 assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
307
308 /* Both allocations must be huge to avoid a move. */
309 if (oldsize < chunksize || usize_max < chunksize)
310 return (true);
311
312 if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
313 /* Attempt to expand the allocation in-place. */
293 if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max,
314 if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max,
294 zero)) {
315 zero)) {
295 arena_decay_tick(tsd, huge_aalloc(ptr));
316 arena_decay_tick(tsdn, huge_aalloc(ptr));
296 return (false);
297 }
298 /* Try again, this time with usize_min. */
299 if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
317 return (false);
318 }
319 /* Try again, this time with usize_min. */
320 if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
300 CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
301 oldsize, usize_min, zero)) {
302 arena_decay_tick(tsd, huge_aalloc(ptr));
321 CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn,
322 ptr, oldsize, usize_min, zero)) {
323 arena_decay_tick(tsdn, huge_aalloc(ptr));
303 return (false);
304 }
305 }
306
307 /*
308 * Avoid moving the allocation if the existing chunk size accommodates
309 * the new size.
310 */
311 if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
312 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
324 return (false);
325 }
326 }
327
328 /*
329 * Avoid moving the allocation if the existing chunk size accommodates
330 * the new size.
331 */
332 if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
333 && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
313 huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
314 zero);
315 arena_decay_tick(tsd, huge_aalloc(ptr));
334 huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min,
335 usize_max, zero);
336 arena_decay_tick(tsdn, huge_aalloc(ptr));
316 return (false);
317 }
318
319 /* Attempt to shrink the allocation in-place. */
320 if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
337 return (false);
338 }
339
340 /* Attempt to shrink the allocation in-place. */
341 if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
321 if (!huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)) {
322 arena_decay_tick(tsd, huge_aalloc(ptr));
342 if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize,
343 usize_max)) {
344 arena_decay_tick(tsdn, huge_aalloc(ptr));
323 return (false);
324 }
325 }
326 return (true);
327}
328
329static void *
345 return (false);
346 }
347 }
348 return (true);
349}
350
351static void *
330huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
331 size_t alignment, bool zero, tcache_t *tcache)
352huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
353 size_t alignment, bool zero)
332{
333
334 if (alignment <= chunksize)
354{
355
356 if (alignment <= chunksize)
335 return (huge_malloc(tsd, arena, usize, zero, tcache));
336 return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
357 return (huge_malloc(tsdn, arena, usize, zero));
358 return (huge_palloc(tsdn, arena, usize, alignment, zero));
337}
338
339void *
359}
360
361void *
340huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
341 size_t alignment, bool zero, tcache_t *tcache)
362huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
363 size_t usize, size_t alignment, bool zero, tcache_t *tcache)
342{
343 void *ret;
344 size_t copysize;
345
346 /* The following should have been caught by callers. */
347 assert(usize > 0 && usize <= HUGE_MAXCLASS);
348
349 /* Try to avoid moving the allocation. */
364{
365 void *ret;
366 size_t copysize;
367
368 /* The following should have been caught by callers. */
369 assert(usize > 0 && usize <= HUGE_MAXCLASS);
370
371 /* Try to avoid moving the allocation. */
350 if (!huge_ralloc_no_move(tsd, ptr, oldsize, usize, usize, zero))
372 if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize,
373 zero))
351 return (ptr);
352
353 /*
354 * usize and oldsize are different enough that we need to use a
355 * different size class. In that case, fall back to allocating new
356 * space and copying.
357 */
374 return (ptr);
375
376 /*
377 * usize and oldsize are different enough that we need to use a
378 * different size class. In that case, fall back to allocating new
379 * space and copying.
380 */
358 ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
359 tcache);
381 ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment,
382 zero);
360 if (ret == NULL)
361 return (NULL);
362
363 copysize = (usize < oldsize) ? usize : oldsize;
364 memcpy(ret, ptr, copysize);
383 if (ret == NULL)
384 return (NULL);
385
386 copysize = (usize < oldsize) ? usize : oldsize;
387 memcpy(ret, ptr, copysize);
365 isqalloc(tsd, ptr, oldsize, tcache);
388 isqalloc(tsd, ptr, oldsize, tcache, true);
366 return (ret);
367}
368
369void
389 return (ret);
390}
391
392void
370huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
393huge_dalloc(tsdn_t *tsdn, void *ptr)
371{
372 extent_node_t *node;
373 arena_t *arena;
374
375 node = huge_node_get(ptr);
376 arena = extent_node_arena_get(node);
377 huge_node_unset(ptr, node);
394{
395 extent_node_t *node;
396 arena_t *arena;
397
398 node = huge_node_get(ptr);
399 arena = extent_node_arena_get(node);
400 huge_node_unset(ptr, node);
378 malloc_mutex_lock(&arena->huge_mtx);
401 malloc_mutex_lock(tsdn, &arena->huge_mtx);
379 ql_remove(&arena->huge, node, ql_link);
402 ql_remove(&arena->huge, node, ql_link);
380 malloc_mutex_unlock(&arena->huge_mtx);
403 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
381
404
382 huge_dalloc_junk(extent_node_addr_get(node),
405 huge_dalloc_junk(tsdn, extent_node_addr_get(node),
383 extent_node_size_get(node));
406 extent_node_size_get(node));
384 arena_chunk_dalloc_huge(extent_node_arena_get(node),
407 arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
385 extent_node_addr_get(node), extent_node_size_get(node));
408 extent_node_addr_get(node), extent_node_size_get(node));
386 idalloctm(tsd, node, tcache, true, true);
409 idalloctm(tsdn, node, NULL, true, true);
387
410
388 arena_decay_tick(tsd, arena);
411 arena_decay_tick(tsdn, arena);
389}
390
391arena_t *
392huge_aalloc(const void *ptr)
393{
394
395 return (extent_node_arena_get(huge_node_get(ptr)));
396}
397
398size_t
412}
413
414arena_t *
415huge_aalloc(const void *ptr)
416{
417
418 return (extent_node_arena_get(huge_node_get(ptr)));
419}
420
421size_t
399huge_salloc(const void *ptr)
422huge_salloc(tsdn_t *tsdn, const void *ptr)
400{
401 size_t size;
402 extent_node_t *node;
403 arena_t *arena;
404
405 node = huge_node_get(ptr);
406 arena = extent_node_arena_get(node);
423{
424 size_t size;
425 extent_node_t *node;
426 arena_t *arena;
427
428 node = huge_node_get(ptr);
429 arena = extent_node_arena_get(node);
407 malloc_mutex_lock(&arena->huge_mtx);
430 malloc_mutex_lock(tsdn, &arena->huge_mtx);
408 size = extent_node_size_get(node);
431 size = extent_node_size_get(node);
409 malloc_mutex_unlock(&arena->huge_mtx);
432 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
410
411 return (size);
412}
413
414prof_tctx_t *
433
434 return (size);
435}
436
437prof_tctx_t *
415huge_prof_tctx_get(const void *ptr)
438huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
416{
417 prof_tctx_t *tctx;
418 extent_node_t *node;
419 arena_t *arena;
420
421 node = huge_node_get(ptr);
422 arena = extent_node_arena_get(node);
439{
440 prof_tctx_t *tctx;
441 extent_node_t *node;
442 arena_t *arena;
443
444 node = huge_node_get(ptr);
445 arena = extent_node_arena_get(node);
423 malloc_mutex_lock(&arena->huge_mtx);
446 malloc_mutex_lock(tsdn, &arena->huge_mtx);
424 tctx = extent_node_prof_tctx_get(node);
447 tctx = extent_node_prof_tctx_get(node);
425 malloc_mutex_unlock(&arena->huge_mtx);
448 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
426
427 return (tctx);
428}
429
430void
449
450 return (tctx);
451}
452
453void
431huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
454huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
432{
433 extent_node_t *node;
434 arena_t *arena;
435
436 node = huge_node_get(ptr);
437 arena = extent_node_arena_get(node);
455{
456 extent_node_t *node;
457 arena_t *arena;
458
459 node = huge_node_get(ptr);
460 arena = extent_node_arena_get(node);
438 malloc_mutex_lock(&arena->huge_mtx);
461 malloc_mutex_lock(tsdn, &arena->huge_mtx);
439 extent_node_prof_tctx_set(node, tctx);
462 extent_node_prof_tctx_set(node, tctx);
440 malloc_mutex_unlock(&arena->huge_mtx);
463 malloc_mutex_unlock(tsdn, &arena->huge_mtx);
441}
442
443void
464}
465
466void
444huge_prof_tctx_reset(const void *ptr)
467huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr)
445{
446
468{
469
447 huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
470 huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U);
448}
471}