1#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
2#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
3
4#include "jemalloc/internal/atomic.h"
5#include "jemalloc/internal/bit_util.h"
6#include "jemalloc/internal/bitmap.h"
7#include "jemalloc/internal/mutex.h"
8#include "jemalloc/internal/ql.h"
9#include "jemalloc/internal/ph.h"
10#include "jemalloc/internal/sc.h"
11
12typedef enum {
13	extent_state_active   = 0,
14	extent_state_dirty    = 1,
15	extent_state_muzzy    = 2,
16	extent_state_retained = 3
17} extent_state_t;
18
19/* Extent (span of pages).  Use accessor functions for e_* fields. */
20struct extent_s {
21	/*
22	 * Bitfield containing several fields:
23	 *
24	 * a: arena_ind
25	 * b: slab
26	 * c: committed
27	 * d: dumpable
28	 * z: zeroed
29	 * t: state
30	 * i: szind
31	 * f: nfree
32	 * s: bin_shard
33	 * n: sn
34	 *
35	 * nnnnnnnn ... nnnnnnss ssssffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
36	 *
37	 * arena_ind: Arena from which this extent came, or all 1 bits if
38	 *            unassociated.
39	 *
40	 * slab: The slab flag indicates whether the extent is used for a slab
41	 *       of small regions.  This helps differentiate small size classes,
42	 *       and it indicates whether interior pointers can be looked up via
43	 *       iealloc().
44	 *
45	 * committed: The committed flag indicates whether physical memory is
46	 *            committed to the extent, whether explicitly or implicitly
47	 *            as on a system that overcommits and satisfies physical
48	 *            memory needs on demand via soft page faults.
49	 *
50	 * dumpable: The dumpable flag indicates whether or not we've set the
51	 *           memory in question to be dumpable.  Note that this
52	 *           interacts somewhat subtly with user-specified extent hooks,
53	 *           since we don't know if *they* are fiddling with
54	 *           dumpability (in which case, we don't want to undo whatever
55	 *           they're doing).  To deal with this scenario, we:
56	 *             - Make dumpable false only for memory allocated with the
57	 *               default hooks.
58	 *             - Only allow memory to go from non-dumpable to dumpable,
59	 *               and only once.
60	 *             - Never make the OS call to allow dumping when the
61	 *               dumpable bit is already set.
62	 *           These three constraints mean that we will never
63	 *           accidentally dump user memory that the user meant to set
64	 *           nondumpable with their extent hooks.
65	 *
66	 *
67	 * zeroed: The zeroed flag is used by extent recycling code to track
68	 *         whether memory is zero-filled.
69	 *
70	 * state: The state flag is an extent_state_t.
71	 *
72	 * szind: The szind flag indicates usable size class index for
73	 *        allocations residing in this extent, regardless of whether the
74	 *        extent is a slab.  Extent size and usable size often differ
75	 *        even for non-slabs, either due to sz_large_pad or promotion of
76	 *        sampled small regions.
77	 *
78	 * nfree: Number of free regions in slab.
79	 *
80	 * bin_shard: the shard of the bin from which this extent came.
81	 *
82	 * sn: Serial number (potentially non-unique).
83	 *
84	 *     Serial numbers may wrap around if !opt_retain, but as long as
85	 *     comparison functions fall back on address comparison for equal
86	 *     serial numbers, stable (if imperfect) ordering is maintained.
87	 *
88	 *     Serial numbers may not be unique even in the absence of
89	 *     wrap-around, e.g. when splitting an extent and assigning the same
90	 *     serial number to both resulting adjacent extents.
91	 */
92	uint64_t		e_bits;
93#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
94
95#define EXTENT_BITS_ARENA_WIDTH  MALLOCX_ARENA_BITS
96#define EXTENT_BITS_ARENA_SHIFT  0
97#define EXTENT_BITS_ARENA_MASK  MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
98
99#define EXTENT_BITS_SLAB_WIDTH  1
100#define EXTENT_BITS_SLAB_SHIFT  (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
101#define EXTENT_BITS_SLAB_MASK  MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
102
103#define EXTENT_BITS_COMMITTED_WIDTH  1
104#define EXTENT_BITS_COMMITTED_SHIFT  (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
105#define EXTENT_BITS_COMMITTED_MASK  MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
106
107#define EXTENT_BITS_DUMPABLE_WIDTH  1
108#define EXTENT_BITS_DUMPABLE_SHIFT  (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
109#define EXTENT_BITS_DUMPABLE_MASK  MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
110
111#define EXTENT_BITS_ZEROED_WIDTH  1
112#define EXTENT_BITS_ZEROED_SHIFT  (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
113#define EXTENT_BITS_ZEROED_MASK  MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
114
115#define EXTENT_BITS_STATE_WIDTH  2
116#define EXTENT_BITS_STATE_SHIFT  (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
117#define EXTENT_BITS_STATE_MASK  MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
118
119#define EXTENT_BITS_SZIND_WIDTH  LG_CEIL(SC_NSIZES)
120#define EXTENT_BITS_SZIND_SHIFT  (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
121#define EXTENT_BITS_SZIND_MASK  MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
122
123#define EXTENT_BITS_NFREE_WIDTH  (LG_SLAB_MAXREGS + 1)
124#define EXTENT_BITS_NFREE_SHIFT  (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
125#define EXTENT_BITS_NFREE_MASK  MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
126
127#define EXTENT_BITS_BINSHARD_WIDTH  6
128#define EXTENT_BITS_BINSHARD_SHIFT  (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
129#define EXTENT_BITS_BINSHARD_MASK  MASK(EXTENT_BITS_BINSHARD_WIDTH, EXTENT_BITS_BINSHARD_SHIFT)
130
131#define EXTENT_BITS_IS_HEAD_WIDTH 1
132#define EXTENT_BITS_IS_HEAD_SHIFT  (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT)
133#define EXTENT_BITS_IS_HEAD_MASK  MASK(EXTENT_BITS_IS_HEAD_WIDTH, EXTENT_BITS_IS_HEAD_SHIFT)
134
135#define EXTENT_BITS_SN_SHIFT   (EXTENT_BITS_IS_HEAD_WIDTH + EXTENT_BITS_IS_HEAD_SHIFT)
136#define EXTENT_BITS_SN_MASK  (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
137
138	/* Pointer to the extent that this structure is responsible for. */
139	void			*e_addr;
140
141	union {
142		/*
143		 * Extent size and serial number associated with the extent
144		 * structure (different than the serial number for the extent at
145		 * e_addr).
146		 *
147		 * ssssssss [...] ssssssss ssssnnnn nnnnnnnn
148		 */
149		size_t			e_size_esn;
150	#define EXTENT_SIZE_MASK	((size_t)~(PAGE-1))
151	#define EXTENT_ESN_MASK		((size_t)PAGE-1)
152		/* Base extent size, which may not be a multiple of PAGE. */
153		size_t			e_bsize;
154	};
155
156	/*
157	 * List linkage, used by a variety of lists:
158	 * - bin_t's slabs_full
159	 * - extents_t's LRU
160	 * - stashed dirty extents
161	 * - arena's large allocations
162	 */
163	ql_elm(extent_t)	ql_link;
164
165	/*
166	 * Linkage for per size class sn/address-ordered heaps, and
167	 * for extent_avail
168	 */
169	phn(extent_t)		ph_link;
170
171	union {
172		/* Small region slab metadata. */
173		arena_slab_data_t	e_slab_data;
174
175		/* Profiling data, used for large objects. */
176		struct {
177			/* Time when this was allocated. */
178			nstime_t		e_alloc_time;
179			/* Points to a prof_tctx_t. */
180			atomic_p_t		e_prof_tctx;
181		};
182	};
183};
184typedef ql_head(extent_t) extent_list_t;
185typedef ph(extent_t) extent_tree_t;
186typedef ph(extent_t) extent_heap_t;
187
188/* Quantized collection of extents, with built-in LRU queue. */
189struct extents_s {
190	malloc_mutex_t		mtx;
191
192	/*
193	 * Quantized per size class heaps of extents.
194	 *
195	 * Synchronization: mtx.
196	 */
197	extent_heap_t		heaps[SC_NPSIZES + 1];
198	atomic_zu_t		nextents[SC_NPSIZES + 1];
199	atomic_zu_t		nbytes[SC_NPSIZES + 1];
200
201	/*
202	 * Bitmap for which set bits correspond to non-empty heaps.
203	 *
204	 * Synchronization: mtx.
205	 */
206	bitmap_t		bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)];
207
208	/*
209	 * LRU of all extents in heaps.
210	 *
211	 * Synchronization: mtx.
212	 */
213	extent_list_t		lru;
214
215	/*
216	 * Page sum for all extents in heaps.
217	 *
218	 * The synchronization here is a little tricky.  Modifications to npages
219	 * must hold mtx, but reads need not (though, a reader who sees npages
220	 * without holding the mutex can't assume anything about the rest of the
221	 * state of the extents_t).
222	 */
223	atomic_zu_t		npages;
224
225	/* All stored extents must be in the same state. */
226	extent_state_t		state;
227
228	/*
229	 * If true, delay coalescing until eviction; otherwise coalesce during
230	 * deallocation.
231	 */
232	bool			delay_coalesce;
233};
234
235/*
236 * The following two structs are for experimental purposes. See
237 * experimental_utilization_query_ctl and
238 * experimental_utilization_batch_query_ctl in src/ctl.c.
239 */
240
241struct extent_util_stats_s {
242	size_t nfree;
243	size_t nregs;
244	size_t size;
245};
246
247struct extent_util_stats_verbose_s {
248	void *slabcur_addr;
249	size_t nfree;
250	size_t nregs;
251	size_t size;
252	size_t bin_nfree;
253	size_t bin_nregs;
254};
255
256#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
257