retained.c revision 1.1.1.1
1#include "test/jemalloc_test.h"
2
3#include "jemalloc/internal/spin.h"
4
5static unsigned		arena_ind;
6static size_t		sz;
7static size_t		esz;
8#define NEPOCHS		8
9#define PER_THD_NALLOCS	1
10static atomic_u_t	epoch;
11static atomic_u_t	nfinished;
12
13static unsigned
14do_arena_create(extent_hooks_t *h) {
15	unsigned arena_ind;
16	size_t sz = sizeof(unsigned);
17	assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
18	    (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
19	    "Unexpected mallctl() failure");
20	return arena_ind;
21}
22
23static void
24do_arena_destroy(unsigned arena_ind) {
25	size_t mib[3];
26	size_t miblen;
27
28	miblen = sizeof(mib)/sizeof(size_t);
29	assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
30	    "Unexpected mallctlnametomib() failure");
31	mib[1] = (size_t)arena_ind;
32	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
33	    "Unexpected mallctlbymib() failure");
34}
35
36static void
37do_refresh(void) {
38	uint64_t epoch = 1;
39	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
40	    sizeof(epoch)), 0, "Unexpected mallctl() failure");
41}
42
43static size_t
44do_get_size_impl(const char *cmd, unsigned arena_ind) {
45	size_t mib[4];
46	size_t miblen = sizeof(mib) / sizeof(size_t);
47	size_t z = sizeof(size_t);
48
49	assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
50	    0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
51	mib[2] = arena_ind;
52	size_t size;
53	assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0),
54	    0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd);
55
56	return size;
57}
58
59static size_t
60do_get_active(unsigned arena_ind) {
61	return do_get_size_impl("stats.arenas.0.pactive", arena_ind) * PAGE;
62}
63
64static size_t
65do_get_mapped(unsigned arena_ind) {
66	return do_get_size_impl("stats.arenas.0.mapped", arena_ind);
67}
68
69static void *
70thd_start(void *arg) {
71	for (unsigned next_epoch = 1; next_epoch < NEPOCHS; next_epoch++) {
72		/* Busy-wait for next epoch. */
73		unsigned cur_epoch;
74		spin_t spinner = SPIN_INITIALIZER;
75		while ((cur_epoch = atomic_load_u(&epoch, ATOMIC_ACQUIRE)) !=
76		    next_epoch) {
77			spin_adaptive(&spinner);
78		}
79		assert_u_eq(cur_epoch, next_epoch, "Unexpected epoch");
80
81		/*
82		 * Allocate.  The main thread will reset the arena, so there's
83		 * no need to deallocate.
84		 */
85		for (unsigned i = 0; i < PER_THD_NALLOCS; i++) {
86			void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) |
87			    MALLOCX_TCACHE_NONE
88			    );
89			assert_ptr_not_null(p,
90			    "Unexpected mallocx() failure\n");
91		}
92
93		/* Let the main thread know we've finished this iteration. */
94		atomic_fetch_add_u(&nfinished, 1, ATOMIC_RELEASE);
95	}
96
97	return NULL;
98}
99
100TEST_BEGIN(test_retained) {
101	test_skip_if(!config_stats);
102
103	arena_ind = do_arena_create(NULL);
104	sz = nallocx(HUGEPAGE, 0);
105	esz = sz + sz_large_pad;
106
107	atomic_store_u(&epoch, 0, ATOMIC_RELAXED);
108
109	unsigned nthreads = ncpus * 2;
110	VARIABLE_ARRAY(thd_t, threads, nthreads);
111	for (unsigned i = 0; i < nthreads; i++) {
112		thd_create(&threads[i], thd_start, NULL);
113	}
114
115	for (unsigned e = 1; e < NEPOCHS; e++) {
116		atomic_store_u(&nfinished, 0, ATOMIC_RELEASE);
117		atomic_store_u(&epoch, e, ATOMIC_RELEASE);
118
119		/* Wait for threads to finish allocating. */
120		spin_t spinner = SPIN_INITIALIZER;
121		while (atomic_load_u(&nfinished, ATOMIC_ACQUIRE) < nthreads) {
122			spin_adaptive(&spinner);
123		}
124
125		/*
126		 * Assert that retained is no more than the sum of size classes
127		 * that should have been used to satisfy the worker threads'
128		 * requests, discounting per growth fragmentation.
129		 */
130		do_refresh();
131
132		size_t allocated = esz * nthreads * PER_THD_NALLOCS;
133		size_t active = do_get_active(arena_ind);
134		assert_zu_le(allocated, active, "Unexpected active memory");
135		size_t mapped = do_get_mapped(arena_ind);
136		assert_zu_le(active, mapped, "Unexpected mapped memory");
137
138		arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false);
139		size_t usable = 0;
140		size_t fragmented = 0;
141		for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind <
142		    arena->extent_grow_next; pind++) {
143			size_t psz = sz_pind2sz(pind);
144			size_t psz_fragmented = psz % esz;
145			size_t psz_usable = psz - psz_fragmented;
146			/*
147			 * Only consider size classes that wouldn't be skipped.
148			 */
149			if (psz_usable > 0) {
150				assert_zu_lt(usable, allocated,
151				    "Excessive retained memory "
152				    "(%#zx[+%#zx] > %#zx)", usable, psz_usable,
153				    allocated);
154				fragmented += psz_fragmented;
155				usable += psz_usable;
156			}
157		}
158
159		/*
160		 * Clean up arena.  Destroying and recreating the arena
161		 * is simpler that specifying extent hooks that deallocate
162		 * (rather than retaining) during reset.
163		 */
164		do_arena_destroy(arena_ind);
165		assert_u_eq(do_arena_create(NULL), arena_ind,
166		    "Unexpected arena index");
167	}
168
169	for (unsigned i = 0; i < nthreads; i++) {
170		thd_join(threads[i], NULL);
171	}
172
173	do_arena_destroy(arena_ind);
174}
175TEST_END
176
177int
178main(void) {
179	return test(
180	    test_retained);
181}
182