1/*
2 * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3 * Copyright 2007, Hugo Santos. All Rights Reserved.
4 * Distributed under the terms of the MIT License.
5 */
6
7
8#include "slab_private.h"
9
10#include <stdio.h>
11#include <string.h>
12
13#include <algorithm>
14
15#include <debug.h>
16#include <heap.h>
17#include <kernel.h> // for ROUNDUP
18#include <malloc.h>
19#include <vm/vm.h>
20#include <vm/VMAddressSpace.h>
21
22#include "ObjectCache.h"
23#include "MemoryManager.h"
24
25
26#define DEBUG_ALLOCATOR
27//#define TEST_ALL_CACHES_DURING_BOOT
28
29static const size_t kBlockSizes[] = {
30	16, 24, 32, 48, 64, 80, 96, 112,
31	128, 160, 192, 224, 256, 320, 384, 448,
32	512, 640, 768, 896, 1024, 1280, 1536, 1792,
33	2048, 2560, 3072, 3584, 4096, 4608, 5120, 5632,
34	6144, 6656, 7168, 7680, 8192,
35	0
36};
37
38static const size_t kNumBlockSizes = sizeof(kBlockSizes) / sizeof(size_t) - 1;
39
40static object_cache* sBlockCaches[kNumBlockSizes];
41
42static addr_t sBootStrapMemory = 0;
43static size_t sBootStrapMemorySize = 0;
44static size_t sUsedBootStrapMemory = 0;
45
46
47RANGE_MARKER_FUNCTION_BEGIN(slab_allocator)
48
49
50static int
51size_to_index(size_t size)
52{
53	if (size <= 16)
54		return 0;
55	if (size <= 32)
56		return 1 + (size - 16 - 1) / 8;
57	if (size <= 128)
58		return 3 + (size - 32 - 1) / 16;
59	if (size <= 256)
60		return 9 + (size - 128 - 1) / 32;
61	if (size <= 512)
62		return 13 + (size - 256 - 1) / 64;
63	if (size <= 1024)
64		return 17 + (size - 512 - 1) / 128;
65	if (size <= 2048)
66		return 21 + (size - 1024 - 1) / 256;
67	if (size <= 8192)
68		return 25 + (size - 2048 - 1) / 512;
69
70	return -1;
71}
72
73
74void*
75block_alloc(size_t size, size_t alignment, uint32 flags)
76{
77	if (alignment > kMinObjectAlignment) {
78		// Make size >= alignment and a power of two. This is sufficient, since
79		// all of our object caches with power of two sizes are aligned. We may
80		// waste quite a bit of memory, but memalign() is very rarely used
81		// in the kernel and always with power of two size == alignment anyway.
82		ASSERT((alignment & (alignment - 1)) == 0);
83		while (alignment < size)
84			alignment <<= 1;
85		size = alignment;
86
87		// If we're not using an object cache, make sure that the memory
88		// manager knows it has to align the allocation.
89		if (size > kBlockSizes[kNumBlockSizes])
90			flags |= CACHE_ALIGN_ON_SIZE;
91	}
92
93	// allocate from the respective object cache, if any
94	int index = size_to_index(size);
95	if (index >= 0)
96		return object_cache_alloc(sBlockCaches[index], flags);
97
98	// the allocation is too large for our object caches -- ask the memory
99	// manager
100	void* block;
101	if (MemoryManager::AllocateRaw(size, flags, block) != B_OK)
102		return NULL;
103
104	return block;
105}
106
107
108void*
109block_alloc_early(size_t size)
110{
111	int index = size_to_index(size);
112	if (index >= 0 && sBlockCaches[index] != NULL)
113		return object_cache_alloc(sBlockCaches[index], CACHE_DURING_BOOT);
114
115	if (size > SLAB_CHUNK_SIZE_SMALL) {
116		// This is a sufficiently large allocation -- just ask the memory
117		// manager directly.
118		void* block;
119		if (MemoryManager::AllocateRaw(size, 0, block) != B_OK)
120			return NULL;
121
122		return block;
123	}
124
125	// A small allocation, but no object cache yet. Use the bootstrap memory.
126	// This allocation must never be freed!
127	if (sBootStrapMemorySize - sUsedBootStrapMemory < size) {
128		// We need more memory.
129		void* block;
130		if (MemoryManager::AllocateRaw(SLAB_CHUNK_SIZE_SMALL, 0, block) != B_OK)
131			return NULL;
132		sBootStrapMemory = (addr_t)block;
133		sBootStrapMemorySize = SLAB_CHUNK_SIZE_SMALL;
134		sUsedBootStrapMemory = 0;
135	}
136
137	size_t neededSize = ROUNDUP(size, sizeof(double));
138	if (sUsedBootStrapMemory + neededSize > sBootStrapMemorySize)
139		return NULL;
140	void* block = (void*)(sBootStrapMemory + sUsedBootStrapMemory);
141	sUsedBootStrapMemory += neededSize;
142
143	return block;
144}
145
146
147void
148block_free(void* block, uint32 flags)
149{
150	if (block == NULL)
151		return;
152
153	ObjectCache* cache = MemoryManager::FreeRawOrReturnCache(block, flags);
154	if (cache != NULL) {
155		// a regular small allocation
156		ASSERT(cache->object_size >= kBlockSizes[0]);
157		ASSERT(cache->object_size <= kBlockSizes[kNumBlockSizes - 1]);
158		ASSERT(cache == sBlockCaches[size_to_index(cache->object_size)]);
159		object_cache_free(cache, block, flags);
160	}
161}
162
163
164void
165block_allocator_init_boot()
166{
167	for (int index = 0; kBlockSizes[index] != 0; index++) {
168		char name[32];
169		snprintf(name, sizeof(name), "block allocator: %lu",
170			kBlockSizes[index]);
171
172		uint32 flags = CACHE_DURING_BOOT;
173		size_t size = kBlockSizes[index];
174
175		// align the power of two objects to their size
176		size_t alignment = (size & (size - 1)) == 0 ? size : 0;
177
178		// For the larger allocation sizes disable the object depot, so we don't
179		// keep lot's of unused objects around.
180		if (size > 2048)
181			flags |= CACHE_NO_DEPOT;
182
183		sBlockCaches[index] = create_object_cache_etc(name, size, alignment, 0,
184			0, 0, flags, NULL, NULL, NULL, NULL);
185		if (sBlockCaches[index] == NULL)
186			panic("allocator: failed to init block cache");
187	}
188}
189
190
191void
192block_allocator_init_rest()
193{
194#ifdef TEST_ALL_CACHES_DURING_BOOT
195	for (int index = 0; kBlockSizes[index] != 0; index++) {
196		block_free(block_alloc(kBlockSizes[index] - sizeof(boundary_tag)), 0,
197			0);
198	}
199#endif
200}
201
202
203// #pragma mark - public API
204
205
206#if USE_SLAB_ALLOCATOR_FOR_MALLOC
207
208
209void*
210memalign(size_t alignment, size_t size)
211{
212	return block_alloc(size, alignment, 0);
213}
214
215
216void *
217memalign_etc(size_t alignment, size_t size, uint32 flags)
218{
219	return block_alloc(size, alignment, flags & CACHE_ALLOC_FLAGS);
220}
221
222
223void
224free_etc(void *address, uint32 flags)
225{
226	block_free(address, flags & CACHE_ALLOC_FLAGS);
227}
228
229
230void*
231malloc(size_t size)
232{
233	return block_alloc(size, 0, 0);
234}
235
236
237void
238free(void* address)
239{
240	block_free(address, 0);
241}
242
243
244void*
245realloc(void* address, size_t newSize)
246{
247	if (newSize == 0) {
248		block_free(address, 0);
249		return NULL;
250	}
251
252	if (address == NULL)
253		return block_alloc(newSize, 0, 0);
254
255	size_t oldSize;
256	ObjectCache* cache = MemoryManager::GetAllocationInfo(address, oldSize);
257	if (cache == NULL && oldSize == 0) {
258		panic("block_realloc(): allocation %p not known", address);
259		return NULL;
260	}
261
262	if (oldSize == newSize)
263		return address;
264
265	void* newBlock = block_alloc(newSize, 0, 0);
266	if (newBlock == NULL)
267		return NULL;
268
269	memcpy(newBlock, address, std::min(oldSize, newSize));
270
271	block_free(address, 0);
272
273	return newBlock;
274}
275
276
277#endif	// USE_SLAB_ALLOCATOR_FOR_MALLOC
278
279
280RANGE_MARKER_FUNCTION_END(slab_allocator)
281