1#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
2#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
3
4#include "jemalloc/internal/arena_stats.h"
5#include "jemalloc/internal/atomic.h"
6#include "jemalloc/internal/bin.h"
7#include "jemalloc/internal/bitmap.h"
8#include "jemalloc/internal/extent_dss.h"
9#include "jemalloc/internal/jemalloc_internal_types.h"
10#include "jemalloc/internal/mutex.h"
11#include "jemalloc/internal/nstime.h"
12#include "jemalloc/internal/ql.h"
13#include "jemalloc/internal/size_classes.h"
14#include "jemalloc/internal/smoothstep.h"
15#include "jemalloc/internal/ticker.h"
16
17struct arena_decay_s {
18	/* Synchronizes all non-atomic fields. */
19	malloc_mutex_t		mtx;
20	/*
21	 * True if a thread is currently purging the extents associated with
22	 * this decay structure.
23	 */
24	bool			purging;
25	/*
26	 * Approximate time in milliseconds from the creation of a set of unused
27	 * dirty pages until an equivalent set of unused dirty pages is purged
28	 * and/or reused.
29	 */
30	atomic_zd_t		time_ms;
31	/* time / SMOOTHSTEP_NSTEPS. */
32	nstime_t		interval;
33	/*
34	 * Time at which the current decay interval logically started.  We do
35	 * not actually advance to a new epoch until sometime after it starts
36	 * because of scheduling and computation delays, and it is even possible
37	 * to completely skip epochs.  In all cases, during epoch advancement we
38	 * merge all relevant activity into the most recently recorded epoch.
39	 */
40	nstime_t		epoch;
41	/* Deadline randomness generator. */
42	uint64_t		jitter_state;
43	/*
44	 * Deadline for current epoch.  This is the sum of interval and per
45	 * epoch jitter which is a uniform random variable in [0..interval).
46	 * Epochs always advance by precise multiples of interval, but we
47	 * randomize the deadline to reduce the likelihood of arenas purging in
48	 * lockstep.
49	 */
50	nstime_t		deadline;
51	/*
52	 * Number of unpurged pages at beginning of current epoch.  During epoch
53	 * advancement we use the delta between arena->decay_*.nunpurged and
54	 * extents_npages_get(&arena->extents_*) to determine how many dirty
55	 * pages, if any, were generated.
56	 */
57	size_t			nunpurged;
58	/*
59	 * Trailing log of how many unused dirty pages were generated during
60	 * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
61	 * element is the most recent epoch.  Corresponding epoch times are
62	 * relative to epoch.
63	 */
64	size_t			backlog[SMOOTHSTEP_NSTEPS];
65
66	/*
67	 * Pointer to associated stats.  These stats are embedded directly in
68	 * the arena's stats due to how stats structures are shared between the
69	 * arena and ctl code.
70	 *
71	 * Synchronization: Same as associated arena's stats field. */
72	arena_stats_decay_t	*stats;
73	/* Peak number of pages in associated extents.  Used for debug only. */
74	uint64_t		ceil_npages;
75};
76
77struct arena_s {
78	/*
79	 * Number of threads currently assigned to this arena.  Each thread has
80	 * two distinct assignments, one for application-serving allocation, and
81	 * the other for internal metadata allocation.  Internal metadata must
82	 * not be allocated from arenas explicitly created via the arenas.create
83	 * mallctl, because the arena.<i>.reset mallctl indiscriminately
84	 * discards all allocations for the affected arena.
85	 *
86	 *   0: Application allocation.
87	 *   1: Internal metadata allocation.
88	 *
89	 * Synchronization: atomic.
90	 */
91	atomic_u_t		nthreads[2];
92
93	/*
94	 * When percpu_arena is enabled, to amortize the cost of reading /
95	 * updating the current CPU id, track the most recent thread accessing
96	 * this arena, and only read CPU if there is a mismatch.
97	 */
98	tsdn_t		*last_thd;
99
100	/* Synchronization: internal. */
101	arena_stats_t		stats;
102
103	/*
104	 * Lists of tcaches and cache_bin_array_descriptors for extant threads
105	 * associated with this arena.  Stats from these are merged
106	 * incrementally, and at exit if opt_stats_print is enabled.
107	 *
108	 * Synchronization: tcache_ql_mtx.
109	 */
110	ql_head(tcache_t)			tcache_ql;
111	ql_head(cache_bin_array_descriptor_t)	cache_bin_array_descriptor_ql;
112	malloc_mutex_t				tcache_ql_mtx;
113
114	/* Synchronization: internal. */
115	prof_accum_t		prof_accum;
116	uint64_t		prof_accumbytes;
117
118	/*
119	 * PRNG state for cache index randomization of large allocation base
120	 * pointers.
121	 *
122	 * Synchronization: atomic.
123	 */
124	atomic_zu_t		offset_state;
125
126	/*
127	 * Extent serial number generator state.
128	 *
129	 * Synchronization: atomic.
130	 */
131	atomic_zu_t		extent_sn_next;
132
133	/*
134	 * Represents a dss_prec_t, but atomically.
135	 *
136	 * Synchronization: atomic.
137	 */
138	atomic_u_t		dss_prec;
139
140	/*
141	 * Number of pages in active extents.
142	 *
143	 * Synchronization: atomic.
144	 */
145	atomic_zu_t		nactive;
146
147	/*
148	 * Extant large allocations.
149	 *
150	 * Synchronization: large_mtx.
151	 */
152	extent_list_t		large;
153	/* Synchronizes all large allocation/update/deallocation. */
154	malloc_mutex_t		large_mtx;
155
156	/*
157	 * Collections of extents that were previously allocated.  These are
158	 * used when allocating extents, in an attempt to re-use address space.
159	 *
160	 * Synchronization: internal.
161	 */
162	extents_t		extents_dirty;
163	extents_t		extents_muzzy;
164	extents_t		extents_retained;
165
166	/*
167	 * Decay-based purging state, responsible for scheduling extent state
168	 * transitions.
169	 *
170	 * Synchronization: internal.
171	 */
172	arena_decay_t		decay_dirty; /* dirty --> muzzy */
173	arena_decay_t		decay_muzzy; /* muzzy --> retained */
174
175	/*
176	 * Next extent size class in a growing series to use when satisfying a
177	 * request via the extent hooks (only if opt_retain).  This limits the
178	 * number of disjoint virtual memory ranges so that extent merging can
179	 * be effective even if multiple arenas' extent allocation requests are
180	 * highly interleaved.
181	 *
182	 * retain_grow_limit is the max allowed size ind to expand (unless the
183	 * required size is greater).  Default is no limit, and controlled
184	 * through mallctl only.
185	 *
186	 * Synchronization: extent_grow_mtx
187	 */
188	pszind_t		extent_grow_next;
189	pszind_t		retain_grow_limit;
190	malloc_mutex_t		extent_grow_mtx;
191
192	/*
193	 * Available extent structures that were allocated via
194	 * base_alloc_extent().
195	 *
196	 * Synchronization: extent_avail_mtx.
197	 */
198	extent_tree_t		extent_avail;
199	malloc_mutex_t		extent_avail_mtx;
200
201	/*
202	 * bins is used to store heaps of free regions.
203	 *
204	 * Synchronization: internal.
205	 */
206	bin_t			bins[NBINS];
207
208	/*
209	 * Base allocator, from which arena metadata are allocated.
210	 *
211	 * Synchronization: internal.
212	 */
213	base_t			*base;
214	/* Used to determine uptime.  Read-only after initialization. */
215	nstime_t		create_time;
216};
217
218/* Used in conjunction with tsd for fast arena-related context lookup. */
219struct arena_tdata_s {
220	ticker_t		decay_ticker;
221};
222
223/* Used to pass rtree lookup context down the path. */
224struct alloc_ctx_s {
225	szind_t szind;
226	bool slab;
227};
228
229#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */
230