1/*
2 * Copyright 2009, Axel D��rfler, axeld@pinc-software.de.
3 * Distributed under the terms of the MIT License.
4 */
5
6
7/*!	A simple allocator that works directly on an area, based on the boot
8	loader's heap. See there for more information about its inner workings.
9*/
10
11
12#include <RealtimeAlloc.h>
13
14#include <pthread.h>
15#include <stdlib.h>
16#include <stdio.h>
17#include <string.h>
18
19#include <OS.h>
20
21#include <locks.h>
22#include <kernel/util/DoublyLinkedList.h>
23
24
25//#define TRACE_RTM
26#ifdef TRACE_RTM
27#	define TRACE(x...) printf(x);
28#else
29#	define TRACE(x...) ;
30#endif
31
32
33class FreeChunk {
34public:
35			void				SetTo(size_t size, FreeChunk* next);
36
37			uint32				Size() const;
38			uint32				CompleteSize() const { return fSize; }
39
40			FreeChunk*			Next() const { return fNext; }
41			void				SetNext(FreeChunk* next) { fNext = next; }
42
43			FreeChunk*			Split(uint32 splitSize);
44			bool				IsTouching(FreeChunk* link);
45			FreeChunk*			Join(FreeChunk* link);
46			void				Remove(rtm_pool* pool,
47									FreeChunk* previous = NULL);
48			void				Enqueue(rtm_pool* pool);
49
50			void*				AllocatedAddress() const;
51	static	FreeChunk*			SetToAllocated(void* allocated);
52	static	addr_t				NextOffset() { return sizeof(size_t); }
53
54private:
55			size_t				fSize;
56			FreeChunk*			fNext;
57};
58
59
60struct rtm_pool : DoublyLinkedListLinkImpl<rtm_pool> {
61	area_id		area;
62	void*		heap_base;
63	size_t		max_size;
64	size_t		available;
65	FreeChunk	free_anchor;
66	mutex		lock;
67
68	bool Contains(void* buffer) const;
69	void Free(void* buffer);
70};
71
72typedef DoublyLinkedList<rtm_pool> PoolList;
73
74
75const static uint32 kAlignment = 256;
76	// all memory chunks will be a multiple of this
77
78static mutex sPoolsLock = MUTEX_INITIALIZER("rtm pools");
79static PoolList sPools;
80
81
82void
83FreeChunk::SetTo(size_t size, FreeChunk* next)
84{
85	fSize = size;
86	fNext = next;
87}
88
89
90/*!	Returns the amount of bytes that can be allocated
91	in this chunk.
92*/
93uint32
94FreeChunk::Size() const
95{
96	return fSize - FreeChunk::NextOffset();
97}
98
99
100/*!	Splits the upper half at the requested location
101	and returns it.
102*/
103FreeChunk*
104FreeChunk::Split(uint32 splitSize)
105{
106	splitSize = (splitSize - 1 + kAlignment) & ~(kAlignment - 1);
107
108	FreeChunk* chunk
109		= (FreeChunk*)((uint8*)this + FreeChunk::NextOffset() + splitSize);
110	chunk->fSize = fSize - splitSize - FreeChunk::NextOffset();
111	chunk->fNext = fNext;
112
113	fSize = splitSize + FreeChunk::NextOffset();
114
115	return chunk;
116}
117
118
119/*!	Checks if the specified chunk touches this chunk, so
120	that they could be joined.
121*/
122bool
123FreeChunk::IsTouching(FreeChunk* chunk)
124{
125	return chunk
126		&& (((uint8*)this + fSize == (uint8*)chunk)
127			|| (uint8*)chunk + chunk->fSize == (uint8*)this);
128}
129
130
131/*!	Joins the chunk to this chunk and returns the pointer
132	to the new chunk - which will either be one of the
133	two chunks.
134	Note, the chunks must be joinable, or else this method
135	doesn't work correctly. Use FreeChunk::IsTouching()
136	to check if this method can be applied.
137*/
138FreeChunk*
139FreeChunk::Join(FreeChunk* chunk)
140{
141	if (chunk < this) {
142		chunk->fSize += fSize;
143		chunk->fNext = fNext;
144
145		return chunk;
146	}
147
148	fSize += chunk->fSize;
149	fNext = chunk->fNext;
150
151	return this;
152}
153
154
155void
156FreeChunk::Remove(rtm_pool* pool, FreeChunk* previous)
157{
158	if (previous == NULL) {
159		// find the previous chunk in the list
160		FreeChunk* chunk = pool->free_anchor.fNext;
161
162		while (chunk != NULL && chunk != this) {
163			previous = chunk;
164			chunk = chunk->fNext;
165		}
166
167		if (chunk == NULL)
168			return;
169	}
170
171	previous->fNext = fNext;
172	fNext = NULL;
173}
174
175
176void
177FreeChunk::Enqueue(rtm_pool* pool)
178{
179	FreeChunk* chunk = pool->free_anchor.fNext;
180	FreeChunk* last = &pool->free_anchor;
181	while (chunk && chunk->Size() < fSize) {
182		last = chunk;
183		chunk = chunk->fNext;
184	}
185
186	fNext = chunk;
187	last->fNext = this;
188}
189
190
191void*
192FreeChunk::AllocatedAddress() const
193{
194	return (void*)&fNext;
195}
196
197
198FreeChunk*
199FreeChunk::SetToAllocated(void* allocated)
200{
201	return (FreeChunk*)((addr_t)allocated - FreeChunk::NextOffset());
202}
203
204
205// #pragma mark - rtm_pool
206
207
208bool
209rtm_pool::Contains(void* buffer) const
210{
211	return (addr_t)heap_base <= (addr_t)buffer
212		&& (addr_t)heap_base - 1 + max_size >= (addr_t)buffer;
213}
214
215
216void
217rtm_pool::Free(void* allocated)
218{
219	FreeChunk* freedChunk = FreeChunk::SetToAllocated(allocated);
220	available += freedChunk->CompleteSize();
221
222	// try to join the new free chunk with an existing one
223	// it may be joined with up to two chunks
224
225	FreeChunk* chunk = free_anchor.Next();
226	FreeChunk* last = &free_anchor;
227	int32 joinCount = 0;
228
229	while (chunk) {
230		if (chunk->IsTouching(freedChunk)) {
231			// almost "insert" it into the list before joining
232			// because the next pointer is inherited by the chunk
233			freedChunk->SetNext(chunk->Next());
234			freedChunk = chunk->Join(freedChunk);
235
236			// remove the joined chunk from the list
237			last->SetNext(freedChunk->Next());
238			chunk = last;
239
240			if (++joinCount == 2)
241				break;
242		}
243
244		last = chunk;
245		chunk = chunk->Next();
246	}
247
248	// enqueue the link at the right position; the
249	// free link queue is ordered by size
250
251	freedChunk->Enqueue(this);
252}
253
254
255// #pragma mark -
256
257
258static rtm_pool*
259pool_for(void* buffer)
260{
261	MutexLocker _(&sPoolsLock);
262
263	PoolList::Iterator iterator = sPools.GetIterator();
264	while (rtm_pool* pool = iterator.Next()) {
265		if (pool->Contains(buffer))
266			return pool;
267	}
268
269	return NULL;
270}
271
272
273// #pragma mark - public API
274
275
276status_t
277rtm_create_pool(rtm_pool** _pool, size_t totalSize, const char* name)
278{
279	rtm_pool* pool = (rtm_pool*)malloc(sizeof(rtm_pool));
280	if (pool == NULL)
281		return B_NO_MEMORY;
282
283	if (name != NULL)
284		mutex_init_etc(&pool->lock, name, MUTEX_FLAG_CLONE_NAME);
285	else
286		mutex_init(&pool->lock, "realtime pool");
287
288	// Allocate enough space for at least one allocation over \a totalSize
289	pool->max_size = (totalSize + sizeof(FreeChunk) - 1 + B_PAGE_SIZE)
290		& ~(B_PAGE_SIZE - 1);
291
292	area_id area = create_area(name, &pool->heap_base, B_ANY_ADDRESS,
293		pool->max_size, B_LAZY_LOCK, B_READ_AREA | B_WRITE_AREA);
294	if (area < 0) {
295		mutex_destroy(&pool->lock);
296		free(pool);
297		return area;
298	}
299
300	pool->area = area;
301	pool->available = pool->max_size - FreeChunk::NextOffset();
302
303	// declare the whole heap as one chunk, and add it
304	// to the free list
305
306	FreeChunk* chunk = (FreeChunk*)pool->heap_base;
307	chunk->SetTo(pool->max_size, NULL);
308
309	pool->free_anchor.SetTo(0, chunk);
310
311	*_pool = pool;
312
313	MutexLocker _(&sPoolsLock);
314	sPools.Add(pool);
315	return B_OK;
316}
317
318
319status_t
320rtm_delete_pool(rtm_pool* pool)
321{
322	if (pool == NULL)
323		return B_BAD_VALUE;
324
325	mutex_lock(&pool->lock);
326
327	{
328		MutexLocker _(&sPoolsLock);
329		sPools.Remove(pool);
330	}
331
332	delete_area(pool->area);
333	mutex_destroy(&pool->lock);
334	free(pool);
335
336	return B_OK;
337}
338
339
340void*
341rtm_alloc(rtm_pool* pool, size_t size)
342{
343	if (pool == NULL)
344		return malloc(size);
345
346	if (pool->heap_base == NULL || size == 0)
347		return NULL;
348
349	MutexLocker _(&pool->lock);
350
351	// align the size requirement to a kAlignment bytes boundary
352	size = (size - 1 + kAlignment) & ~(size_t)(kAlignment - 1);
353
354	if (size > pool->available) {
355		TRACE("malloc(): Out of memory!\n");
356		return NULL;
357	}
358
359	FreeChunk* chunk = pool->free_anchor.Next();
360	FreeChunk* last = &pool->free_anchor;
361	while (chunk && chunk->Size() < size) {
362		last = chunk;
363		chunk = chunk->Next();
364	}
365
366	if (chunk == NULL) {
367		// could not find a free chunk as large as needed
368		TRACE("malloc(): Out of memory!\n");
369		return NULL;
370	}
371
372	if (chunk->Size() > size + sizeof(FreeChunk) + kAlignment) {
373		// if this chunk is bigger than the requested size,
374		// we split it to form two chunks (with a minimal
375		// size of kAlignment allocatable bytes).
376
377		FreeChunk* freeChunk = chunk->Split(size);
378		last->SetNext(freeChunk);
379
380		// re-enqueue the free chunk at the correct position
381		freeChunk->Remove(pool, last);
382		freeChunk->Enqueue(pool);
383	} else {
384		// remove the chunk from the free list
385
386		last->SetNext(chunk->Next());
387	}
388
389	pool->available -= size + sizeof(size_t);
390
391	TRACE("malloc(%lu) -> %p\n", size, chunk->AllocatedAddress());
392	return chunk->AllocatedAddress();
393}
394
395
396status_t
397rtm_free(void* allocated)
398{
399	if (allocated == NULL)
400		return B_OK;
401
402	TRACE("rtm_free(%p)\n", allocated);
403
404	// find pool
405	rtm_pool* pool = pool_for(allocated);
406	if (pool == NULL) {
407		free(allocated);
408		return B_OK;
409	}
410
411	MutexLocker _(&pool->lock);
412	pool->Free(allocated);
413	return B_OK;
414}
415
416
417status_t
418rtm_realloc(void** _buffer, size_t newSize)
419{
420	if (_buffer == NULL)
421		return B_BAD_VALUE;
422
423	TRACE("rtm_realloc(%p, %lu)\n", *_buffer, newSize);
424
425	void* oldBuffer = *_buffer;
426
427	// find pool
428	rtm_pool* pool = pool_for(oldBuffer);
429	if (pool == NULL) {
430		void* buffer = realloc(oldBuffer, newSize);
431		if (buffer != NULL) {
432			*_buffer = buffer;
433			return B_OK;
434		}
435		return B_NO_MEMORY;
436	}
437
438	MutexLocker _(&pool->lock);
439
440	if (newSize == 0) {
441		TRACE("realloc(%p, %lu) -> NULL\n", oldBuffer, newSize);
442		pool->Free(oldBuffer);
443		*_buffer = NULL;
444		return B_OK;
445	}
446
447	size_t copySize = newSize;
448	if (oldBuffer != NULL) {
449		FreeChunk* oldChunk = FreeChunk::SetToAllocated(oldBuffer);
450
451		// Check if the old buffer still fits, and if it makes sense to keep it
452		if (oldChunk->Size() >= newSize && newSize > oldChunk->Size() / 3) {
453			TRACE("realloc(%p, %lu) old buffer is large enough\n",
454				oldBuffer, newSize);
455			return B_OK;
456		}
457
458		if (copySize > oldChunk->Size())
459			copySize = oldChunk->Size();
460	}
461
462	void* newBuffer = rtm_alloc(pool, newSize);
463	if (newBuffer == NULL)
464		return B_NO_MEMORY;
465
466	if (oldBuffer != NULL) {
467		memcpy(newBuffer, oldBuffer, copySize);
468		pool->Free(oldBuffer);
469	}
470
471	TRACE("realloc(%p, %lu) -> %p\n", oldBuffer, newSize, newBuffer);
472	*_buffer = newBuffer;
473	return B_OK;
474}
475
476
477status_t
478rtm_size_for(void* buffer)
479{
480	if (buffer == NULL)
481		return 0;
482
483	FreeChunk* chunk = FreeChunk::SetToAllocated(buffer);
484	// TODO: we currently always return the actual chunk size, not the allocated
485	// one
486	return chunk->Size();
487}
488
489
490status_t
491rtm_phys_size_for(void* buffer)
492{
493	if (buffer == NULL)
494		return 0;
495
496	FreeChunk* chunk = FreeChunk::SetToAllocated(buffer);
497	return chunk->Size();
498}
499
500
501size_t
502rtm_available(rtm_pool* pool)
503{
504	if (pool == NULL) {
505		// whatever - might want to use system_info instead
506		return 1024 * 1024;
507	}
508
509	return pool->available;
510}
511
512
513rtm_pool*
514rtm_default_pool()
515{
516	// We always return NULL - the default pool will just use malloc()/free()
517	return NULL;
518}
519
520
521#if 0
522extern "C" {
523
524// undocumented symbols that BeOS exports
525status_t rtm_create_pool_etc(rtm_pool ** out_pool, size_t total_size, const char * name, int32 param4, int32 param5, ...);
526void rtm_get_pool(rtm_pool *pool,void *data,int32 param3,int32 param4, ...);
527
528}
529#endif
530