1#include "test/jemalloc_test.h"
2
3const char *malloc_conf = "decay_time:1,lg_tcache_max:0";
4
5static nstime_monotonic_t *nstime_monotonic_orig;
6static nstime_update_t *nstime_update_orig;
7
8static unsigned nupdates_mock;
9static nstime_t time_mock;
10static bool monotonic_mock;
11
12static bool
13nstime_monotonic_mock(void)
14{
15	return (monotonic_mock);
16}
17
18static bool
19nstime_update_mock(nstime_t *time)
20{
21	nupdates_mock++;
22	if (monotonic_mock)
23		nstime_copy(time, &time_mock);
24	return (!monotonic_mock);
25}
26
27TEST_BEGIN(test_decay_ticks)
28{
29	ticker_t *decay_ticker;
30	unsigned tick0, tick1;
31	size_t sz, large0;
32	void *p;
33
34	decay_ticker = decay_ticker_get(tsd_fetch(), 0);
35	assert_ptr_not_null(decay_ticker,
36	    "Unexpected failure getting decay ticker");
37
38	sz = sizeof(size_t);
39	assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
40	    0), 0, "Unexpected mallctl failure");
41
42	/*
43	 * Test the standard APIs using a large size class, since we can't
44	 * control tcache interactions for small size classes (except by
45	 * completely disabling tcache for the entire test program).
46	 */
47
48	/* malloc(). */
49	tick0 = ticker_read(decay_ticker);
50	p = malloc(large0);
51	assert_ptr_not_null(p, "Unexpected malloc() failure");
52	tick1 = ticker_read(decay_ticker);
53	assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
54	/* free(). */
55	tick0 = ticker_read(decay_ticker);
56	free(p);
57	tick1 = ticker_read(decay_ticker);
58	assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
59
60	/* calloc(). */
61	tick0 = ticker_read(decay_ticker);
62	p = calloc(1, large0);
63	assert_ptr_not_null(p, "Unexpected calloc() failure");
64	tick1 = ticker_read(decay_ticker);
65	assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
66	free(p);
67
68	/* posix_memalign(). */
69	tick0 = ticker_read(decay_ticker);
70	assert_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0,
71	    "Unexpected posix_memalign() failure");
72	tick1 = ticker_read(decay_ticker);
73	assert_u32_ne(tick1, tick0,
74	    "Expected ticker to tick during posix_memalign()");
75	free(p);
76
77	/* aligned_alloc(). */
78	tick0 = ticker_read(decay_ticker);
79	p = aligned_alloc(sizeof(size_t), large0);
80	assert_ptr_not_null(p, "Unexpected aligned_alloc() failure");
81	tick1 = ticker_read(decay_ticker);
82	assert_u32_ne(tick1, tick0,
83	    "Expected ticker to tick during aligned_alloc()");
84	free(p);
85
86	/* realloc(). */
87	/* Allocate. */
88	tick0 = ticker_read(decay_ticker);
89	p = realloc(NULL, large0);
90	assert_ptr_not_null(p, "Unexpected realloc() failure");
91	tick1 = ticker_read(decay_ticker);
92	assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
93	/* Reallocate. */
94	tick0 = ticker_read(decay_ticker);
95	p = realloc(p, large0);
96	assert_ptr_not_null(p, "Unexpected realloc() failure");
97	tick1 = ticker_read(decay_ticker);
98	assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
99	/* Deallocate. */
100	tick0 = ticker_read(decay_ticker);
101	realloc(p, 0);
102	tick1 = ticker_read(decay_ticker);
103	assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
104
105	/*
106	 * Test the *allocx() APIs using large and small size classes, with
107	 * tcache explicitly disabled.
108	 */
109	{
110		unsigned i;
111		size_t allocx_sizes[2];
112		allocx_sizes[0] = large0;
113		allocx_sizes[1] = 1;
114
115		for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
116			sz = allocx_sizes[i];
117
118			/* mallocx(). */
119			tick0 = ticker_read(decay_ticker);
120			p = mallocx(sz, MALLOCX_TCACHE_NONE);
121			assert_ptr_not_null(p, "Unexpected mallocx() failure");
122			tick1 = ticker_read(decay_ticker);
123			assert_u32_ne(tick1, tick0,
124			    "Expected ticker to tick during mallocx() (sz=%zu)",
125			    sz);
126			/* rallocx(). */
127			tick0 = ticker_read(decay_ticker);
128			p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
129			assert_ptr_not_null(p, "Unexpected rallocx() failure");
130			tick1 = ticker_read(decay_ticker);
131			assert_u32_ne(tick1, tick0,
132			    "Expected ticker to tick during rallocx() (sz=%zu)",
133			    sz);
134			/* xallocx(). */
135			tick0 = ticker_read(decay_ticker);
136			xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
137			tick1 = ticker_read(decay_ticker);
138			assert_u32_ne(tick1, tick0,
139			    "Expected ticker to tick during xallocx() (sz=%zu)",
140			    sz);
141			/* dallocx(). */
142			tick0 = ticker_read(decay_ticker);
143			dallocx(p, MALLOCX_TCACHE_NONE);
144			tick1 = ticker_read(decay_ticker);
145			assert_u32_ne(tick1, tick0,
146			    "Expected ticker to tick during dallocx() (sz=%zu)",
147			    sz);
148			/* sdallocx(). */
149			p = mallocx(sz, MALLOCX_TCACHE_NONE);
150			assert_ptr_not_null(p, "Unexpected mallocx() failure");
151			tick0 = ticker_read(decay_ticker);
152			sdallocx(p, sz, MALLOCX_TCACHE_NONE);
153			tick1 = ticker_read(decay_ticker);
154			assert_u32_ne(tick1, tick0,
155			    "Expected ticker to tick during sdallocx() "
156			    "(sz=%zu)", sz);
157		}
158	}
159
160	/*
161	 * Test tcache fill/flush interactions for large and small size classes,
162	 * using an explicit tcache.
163	 */
164	if (config_tcache) {
165		unsigned tcache_ind, i;
166		size_t tcache_sizes[2];
167		tcache_sizes[0] = large0;
168		tcache_sizes[1] = 1;
169
170		sz = sizeof(unsigned);
171		assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
172		    NULL, 0), 0, "Unexpected mallctl failure");
173
174		for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
175			sz = tcache_sizes[i];
176
177			/* tcache fill. */
178			tick0 = ticker_read(decay_ticker);
179			p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
180			assert_ptr_not_null(p, "Unexpected mallocx() failure");
181			tick1 = ticker_read(decay_ticker);
182			assert_u32_ne(tick1, tick0,
183			    "Expected ticker to tick during tcache fill "
184			    "(sz=%zu)", sz);
185			/* tcache flush. */
186			dallocx(p, MALLOCX_TCACHE(tcache_ind));
187			tick0 = ticker_read(decay_ticker);
188			assert_d_eq(mallctl("tcache.flush", NULL, NULL,
189			    (void *)&tcache_ind, sizeof(unsigned)), 0,
190			    "Unexpected mallctl failure");
191			tick1 = ticker_read(decay_ticker);
192			assert_u32_ne(tick1, tick0,
193			    "Expected ticker to tick during tcache flush "
194			    "(sz=%zu)", sz);
195		}
196	}
197}
198TEST_END
199
200TEST_BEGIN(test_decay_ticker)
201{
202#define	NPS 1024
203	int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
204	void *ps[NPS];
205	uint64_t epoch;
206	uint64_t npurge0 = 0;
207	uint64_t npurge1 = 0;
208	size_t sz, large;
209	unsigned i, nupdates0;
210	nstime_t time, decay_time, deadline;
211
212	/*
213	 * Allocate a bunch of large objects, pause the clock, deallocate the
214	 * objects, restore the clock, then [md]allocx() in a tight loop to
215	 * verify the ticker triggers purging.
216	 */
217
218	if (config_tcache) {
219		size_t tcache_max;
220
221		sz = sizeof(size_t);
222		assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
223		    &sz, NULL, 0), 0, "Unexpected mallctl failure");
224		large = nallocx(tcache_max + 1, flags);
225	}  else {
226		sz = sizeof(size_t);
227		assert_d_eq(mallctl("arenas.lextent.0.size", &large, &sz, NULL,
228		    0), 0, "Unexpected mallctl failure");
229	}
230
231	assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
232	    "Unexpected mallctl failure");
233	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
234	    sizeof(uint64_t)), 0, "Unexpected mallctl failure");
235	sz = sizeof(uint64_t);
236	assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz,
237	    NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
238
239	for (i = 0; i < NPS; i++) {
240		ps[i] = mallocx(large, flags);
241		assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
242	}
243
244	nupdates_mock = 0;
245	nstime_init(&time_mock, 0);
246	nstime_update(&time_mock);
247	monotonic_mock = true;
248
249	nstime_monotonic_orig = nstime_monotonic;
250	nstime_update_orig = nstime_update;
251	nstime_monotonic = nstime_monotonic_mock;
252	nstime_update = nstime_update_mock;
253
254	for (i = 0; i < NPS; i++) {
255		dallocx(ps[i], flags);
256		nupdates0 = nupdates_mock;
257		assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
258		    "Unexpected arena.0.decay failure");
259		assert_u_gt(nupdates_mock, nupdates0,
260		    "Expected nstime_update() to be called");
261	}
262
263	nstime_monotonic = nstime_monotonic_orig;
264	nstime_update = nstime_update_orig;
265
266	nstime_init(&time, 0);
267	nstime_update(&time);
268	nstime_init2(&decay_time, opt_decay_time, 0);
269	nstime_copy(&deadline, &time);
270	nstime_add(&deadline, &decay_time);
271	do {
272		for (i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; i++) {
273			void *p = mallocx(1, flags);
274			assert_ptr_not_null(p, "Unexpected mallocx() failure");
275			dallocx(p, flags);
276		}
277		assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
278		    sizeof(uint64_t)), 0, "Unexpected mallctl failure");
279		sz = sizeof(uint64_t);
280		assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1,
281		    &sz, NULL, 0), config_stats ? 0 : ENOENT,
282		    "Unexpected mallctl result");
283
284		nstime_update(&time);
285	} while (nstime_compare(&time, &deadline) <= 0 && npurge1 == npurge0);
286
287	if (config_stats)
288		assert_u64_gt(npurge1, npurge0, "Expected purging to occur");
289#undef NPS
290}
291TEST_END
292
293TEST_BEGIN(test_decay_nonmonotonic)
294{
295#define	NPS (SMOOTHSTEP_NSTEPS + 1)
296	int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
297	void *ps[NPS];
298	uint64_t epoch;
299	uint64_t npurge0 = 0;
300	uint64_t npurge1 = 0;
301	size_t sz, large0;
302	unsigned i, nupdates0;
303
304	sz = sizeof(size_t);
305	assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
306	    0), 0, "Unexpected mallctl failure");
307
308	assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
309	    "Unexpected mallctl failure");
310	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
311	    sizeof(uint64_t)), 0, "Unexpected mallctl failure");
312	sz = sizeof(uint64_t);
313	assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz,
314	    NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
315
316	nupdates_mock = 0;
317	nstime_init(&time_mock, 0);
318	nstime_update(&time_mock);
319	monotonic_mock = false;
320
321	nstime_monotonic_orig = nstime_monotonic;
322	nstime_update_orig = nstime_update;
323	nstime_monotonic = nstime_monotonic_mock;
324	nstime_update = nstime_update_mock;
325
326	for (i = 0; i < NPS; i++) {
327		ps[i] = mallocx(large0, flags);
328		assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
329	}
330
331	for (i = 0; i < NPS; i++) {
332		dallocx(ps[i], flags);
333		nupdates0 = nupdates_mock;
334		assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
335		    "Unexpected arena.0.decay failure");
336		assert_u_gt(nupdates_mock, nupdates0,
337		    "Expected nstime_update() to be called");
338	}
339
340	assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
341	    sizeof(uint64_t)), 0, "Unexpected mallctl failure");
342	sz = sizeof(uint64_t);
343	assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1, &sz,
344	    NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result");
345
346	if (config_stats)
347		assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
348
349	nstime_monotonic = nstime_monotonic_orig;
350	nstime_update = nstime_update_orig;
351#undef NPS
352}
353TEST_END
354
355int
356main(void)
357{
358	return (test(
359	    test_decay_ticks,
360	    test_decay_ticker,
361	    test_decay_nonmonotonic));
362}
363