1/*
2 * Copyright 2003-2013, Axel D��rfler, axeld@pinc-software.de.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7#include "runtime_loader_private.h"
8
9#include <syscalls.h>
10
11#ifdef HEAP_TEST
12#	include <stdio.h>
13#endif
14#include <stdlib.h>
15#include <string.h>
16
17#include <algorithm>
18
19#include <util/SplayTree.h>
20
21#include <locks.h>
22
23
24/*!	This is a very simple malloc()/free() implementation - it only
25	manages a free list.
26	After heap_init() is called, all free memory is contained in one
27	big chunk, the only entry in the free link list (which is a single
28	linked list).
29	When memory is allocated, the smallest free chunk that contains
30	the requested size is split (or taken as a whole if it can't be
31	splitted anymore), and it's lower half will be removed from the
32	free list.
33	The free list is ordered by size, starting with the smallest
34	free chunk available. When a chunk is freed, it will be joint
35	with its predecessor or successor, if possible.
36	To ease list handling, the list anchor itself is a free chunk with
37	size 0 that can't be allocated.
38*/
39#if __cplusplus >= 201103L
40#include <cstddef>
41const static size_t kAlignment = alignof(max_align_t);
42#else
43const static size_t kAlignment = 8;
44#endif
45	// all memory chunks will be a multiple of this
46
47const static size_t kInitialHeapSize = 64 * 1024;
48const static size_t kHeapGrowthAlignment = 32 * 1024;
49
50static const char* const kLockName = "runtime_loader heap";
51static recursive_lock sLock = RECURSIVE_LOCK_INITIALIZER(kLockName);
52
53
54class Chunk {
55public:
56	size_t CompleteSize() const
57	{
58		return fSize;
59	}
60
61protected:
62	union {
63		size_t	fSize;
64		char	fAlignment[kAlignment];
65	};
66};
67
68
69class FreeChunk;
70
71
72struct FreeChunkData : SplayTreeLink<FreeChunk> {
73
74	FreeChunk* Next() const
75	{
76		return fNext;
77	}
78
79	FreeChunk** NextLink()
80	{
81		return &fNext;
82	}
83
84protected:
85	FreeChunk*	fNext;
86};
87
88
89class FreeChunk : public Chunk, public FreeChunkData {
90public:
91			void				SetTo(size_t size);
92
93			size_t				Size() const;
94
95			FreeChunk*			Split(size_t splitSize);
96			bool				IsTouching(FreeChunk* link);
97			FreeChunk*			Join(FreeChunk* link);
98
99			void*				AllocatedAddress() const;
100	static	FreeChunk*			SetToAllocated(void* allocated);
101};
102
103
104struct FreeChunkKey {
105	FreeChunkKey(size_t size)
106		:
107		fSize(size),
108		fChunk(NULL)
109	{
110	}
111
112	FreeChunkKey(const FreeChunk* chunk)
113		:
114		fSize(chunk->Size()),
115		fChunk(chunk)
116	{
117	}
118
119	int Compare(const FreeChunk* chunk) const
120	{
121		size_t chunkSize = chunk->Size();
122		if (chunkSize != fSize)
123			return fSize < chunkSize ? -1 : 1;
124
125		if (fChunk == chunk)
126			return 0;
127		return fChunk < chunk ? -1 : 1;
128	}
129
130private:
131	size_t				fSize;
132	const FreeChunk*	fChunk;
133};
134
135
136struct FreeChunkTreeDefinition {
137	typedef FreeChunkKey	KeyType;
138	typedef FreeChunk		NodeType;
139
140	static FreeChunkKey GetKey(const FreeChunk* node)
141	{
142		return FreeChunkKey(node);
143	}
144
145	static SplayTreeLink<FreeChunk>* GetLink(FreeChunk* node)
146	{
147		return node;
148	}
149
150	static int Compare(const FreeChunkKey& key, const FreeChunk* node)
151	{
152		return key.Compare(node);
153	}
154
155	static FreeChunk** GetListLink(FreeChunk* node)
156	{
157		return node->NextLink();
158	}
159};
160
161
162typedef IteratableSplayTree<FreeChunkTreeDefinition> FreeChunkTree;
163
164
165static size_t sAvailable;
166static FreeChunkTree sFreeChunkTree;
167
168
169static inline size_t
170align(size_t size, size_t alignment = kAlignment)
171{
172	return (size + alignment - 1) & ~(alignment - 1);
173}
174
175
176void
177FreeChunk::SetTo(size_t size)
178{
179	fSize = size;
180	fNext = NULL;
181}
182
183
184/*!	Returns the amount of bytes that can be allocated
185	in this chunk.
186*/
187size_t
188FreeChunk::Size() const
189{
190	return (addr_t)this + fSize - (addr_t)AllocatedAddress();
191}
192
193
194/*!	Splits the upper half at the requested location and returns it. This chunk
195	will no longer be a valid FreeChunk object; only its fSize will be valid.
196 */
197FreeChunk*
198FreeChunk::Split(size_t splitSize)
199{
200	splitSize = align(splitSize);
201
202	FreeChunk* chunk = (FreeChunk*)((addr_t)AllocatedAddress() + splitSize);
203	size_t newSize = (addr_t)chunk - (addr_t)this;
204	chunk->fSize = fSize - newSize;
205	chunk->fNext = NULL;
206
207	fSize = newSize;
208
209	return chunk;
210}
211
212
213/*!	Checks if the specified chunk touches this chunk, so
214	that they could be joined.
215*/
216bool
217FreeChunk::IsTouching(FreeChunk* chunk)
218{
219	return chunk
220		&& (((uint8*)this + fSize == (uint8*)chunk)
221			|| (uint8*)chunk + chunk->fSize == (uint8*)this);
222}
223
224
225/*!	Joins the chunk to this chunk and returns the pointer
226	to the new chunk - which will either be one of the
227	two chunks.
228	Note, the chunks must be joinable, or else this method
229	doesn't work correctly. Use FreeChunk::IsTouching()
230	to check if this method can be applied.
231*/
232FreeChunk*
233FreeChunk::Join(FreeChunk* chunk)
234{
235	if (chunk < this) {
236		chunk->fSize += fSize;
237		chunk->fNext = fNext;
238
239		return chunk;
240	}
241
242	fSize += chunk->fSize;
243	fNext = chunk->fNext;
244
245	return this;
246}
247
248
249void*
250FreeChunk::AllocatedAddress() const
251{
252	return (void*)static_cast<const FreeChunkData*>(this);
253}
254
255
256FreeChunk*
257FreeChunk::SetToAllocated(void* allocated)
258{
259	return static_cast<FreeChunk*>((FreeChunkData*)allocated);
260}
261
262
263//	#pragma mark -
264
265
266static status_t
267add_area(size_t size)
268{
269	void* base;
270	area_id area = _kern_create_area("rld heap", &base,
271		B_RANDOMIZED_ANY_ADDRESS, size, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
272	if (area < 0)
273		return area;
274
275	// declare the whole area as one chunk, and add it to the free tree
276	FreeChunk* chunk = (FreeChunk*)base;
277	chunk->SetTo(size);
278	sFreeChunkTree.Insert(chunk);
279
280	sAvailable += chunk->Size();
281	return B_OK;
282}
283
284
285static status_t
286grow_heap(size_t bytes)
287{
288	return add_area(align(align(sizeof(Chunk)) + bytes, kHeapGrowthAlignment));
289}
290
291
292//	#pragma mark -
293
294
295status_t
296heap_init()
297{
298	return add_area(kInitialHeapSize);
299}
300
301
302status_t
303heap_reinit_after_fork()
304{
305	recursive_lock_init(&sLock, kLockName);
306	return B_OK;
307}
308
309
310#ifdef HEAP_TEST
311void
312dump_chunks(void)
313{
314	FreeChunk* chunk = sFreeChunkTree.FindMin();
315	while (chunk != NULL) {
316		printf("\t%p: chunk size = %ld, end = %p, next = %p\n", chunk,
317			chunk->Size(), (uint8*)chunk + chunk->CompleteSize(),
318			chunk->Next());
319		chunk = chunk->Next();
320	}
321}
322#endif
323
324
325void*
326malloc(size_t size)
327{
328	if (size == 0)
329		return NULL;
330
331	RecursiveLocker _(sLock);
332
333	// align the size requirement to a kAlignment bytes boundary
334	if (size < sizeof(FreeChunkData))
335		size = sizeof(FreeChunkData);
336	size = align(size);
337
338	if (size > sAvailable) {
339		// try to enlarge heap
340		if (grow_heap(size) != B_OK)
341			return NULL;
342	}
343
344	FreeChunkKey key(size);
345	FreeChunk* chunk = sFreeChunkTree.FindClosest(key, true, true);
346	if (chunk == NULL) {
347		// could not find a free chunk as large as needed
348		if (grow_heap(size) != B_OK)
349			return NULL;
350
351		chunk = sFreeChunkTree.FindClosest(key, true, true);
352		if (chunk == NULL) {
353			TRACE(("no allocation chunk found after growing the heap\n"));
354			return NULL;
355		}
356	}
357
358	sFreeChunkTree.Remove(chunk);
359	sAvailable -= chunk->Size();
360
361	void* allocatedAddress = chunk->AllocatedAddress();
362
363	// If this chunk is bigger than the requested size and there's enough space
364	// left over for a new chunk, we split it.
365	if (chunk->Size() >= size + align(sizeof(FreeChunk))) {
366		FreeChunk* freeChunk = chunk->Split(size);
367		sFreeChunkTree.Insert(freeChunk);
368		sAvailable += freeChunk->Size();
369	}
370
371	TRACE(("malloc(%lu) -> %p\n", size, allocatedAddress));
372	return allocatedAddress;
373}
374
375
376void*
377realloc(void* oldBuffer, size_t newSize)
378{
379	if (newSize == 0) {
380		TRACE(("realloc(%p, %lu) -> NULL\n", oldBuffer, newSize));
381		free(oldBuffer);
382		return NULL;
383	}
384
385	RecursiveLocker _(sLock);
386
387	size_t oldSize = 0;
388	if (oldBuffer != NULL) {
389		FreeChunk* oldChunk = FreeChunk::SetToAllocated(oldBuffer);
390		oldSize = oldChunk->Size();
391
392		// Check if the old buffer still fits, and if it makes sense to keep it.
393		if (oldSize >= newSize
394			&& (oldSize < 128 || newSize > oldSize / 3)) {
395			TRACE(("realloc(%p, %lu) old buffer is large enough\n",
396				oldBuffer, newSize));
397			return oldBuffer;
398		}
399	}
400
401	void* newBuffer = malloc(newSize);
402	if (newBuffer == NULL)
403		return NULL;
404
405	if (oldBuffer != NULL) {
406		memcpy(newBuffer, oldBuffer, std::min(oldSize, newSize));
407		free(oldBuffer);
408	}
409
410	TRACE(("realloc(%p, %lu) -> %p\n", oldBuffer, newSize, newBuffer));
411	return newBuffer;
412}
413
414
415void
416free(void* allocated)
417{
418	if (allocated == NULL)
419		return;
420
421	RecursiveLocker _(sLock);
422
423	TRACE(("free(%p)\n", allocated));
424
425
426	FreeChunk* freedChunk = FreeChunk::SetToAllocated(allocated);
427
428	// try to join the new free chunk with an existing one
429	// it may be joined with up to two chunks
430
431	FreeChunk* chunk = sFreeChunkTree.FindMin();
432	int32 joinCount = 0;
433
434	while (chunk) {
435		FreeChunk* nextChunk = chunk->Next();
436
437		if (chunk->IsTouching(freedChunk)) {
438			sFreeChunkTree.Remove(chunk);
439			sAvailable -= chunk->Size();
440
441			freedChunk = chunk->Join(freedChunk);
442
443			if (++joinCount == 2)
444				break;
445		}
446
447		chunk = nextChunk;
448	}
449
450	sFreeChunkTree.Insert(freedChunk);
451	sAvailable += freedChunk->Size();
452}
453