1#include "test/jemalloc_test.h"
2
3#define NTHREADS		4
4#define NALLOCS_PER_THREAD	50
5#define DUMP_INTERVAL		1
6#define BT_COUNT_CHECK_INTERVAL	5
7
8static int
9prof_dump_open_intercept(bool propagate_err, const char *filename) {
10	int fd;
11
12	fd = open("/dev/null", O_WRONLY);
13	assert_d_ne(fd, -1, "Unexpected open() failure");
14
15	return fd;
16}
17
18static void *
19alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) {
20	return btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration);
21}
22
23static void *
24thd_start(void *varg) {
25	unsigned thd_ind = *(unsigned *)varg;
26	size_t bt_count_prev, bt_count;
27	unsigned i_prev, i;
28
29	i_prev = 0;
30	bt_count_prev = 0;
31	for (i = 0; i < NALLOCS_PER_THREAD; i++) {
32		void *p = alloc_from_permuted_backtrace(thd_ind, i);
33		dallocx(p, 0);
34		if (i % DUMP_INTERVAL == 0) {
35			assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
36			    0, "Unexpected error while dumping heap profile");
37		}
38
39		if (i % BT_COUNT_CHECK_INTERVAL == 0 ||
40		    i+1 == NALLOCS_PER_THREAD) {
41			bt_count = prof_bt_count();
42			assert_zu_le(bt_count_prev+(i-i_prev), bt_count,
43			    "Expected larger backtrace count increase");
44			i_prev = i;
45			bt_count_prev = bt_count;
46		}
47	}
48
49	return NULL;
50}
51
52TEST_BEGIN(test_idump) {
53	bool active;
54	thd_t thds[NTHREADS];
55	unsigned thd_args[NTHREADS];
56	unsigned i;
57
58	test_skip_if(!config_prof);
59
60	active = true;
61	assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active,
62	    sizeof(active)), 0,
63	    "Unexpected mallctl failure while activating profiling");
64
65	prof_dump_open = prof_dump_open_intercept;
66
67	for (i = 0; i < NTHREADS; i++) {
68		thd_args[i] = i;
69		thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
70	}
71	for (i = 0; i < NTHREADS; i++) {
72		thd_join(thds[i], NULL);
73	}
74}
75TEST_END
76
77int
78main(void) {
79	return test_no_reentrancy(
80	    test_idump);
81}
82