1/*
2 * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3 * Copyright 2007, Hugo Santos. All Rights Reserved.
4 * Distributed under the terms of the MIT License.
5 */
6
7
8#include "slab_private.h"
9
10#include <stdio.h>
11#include <string.h>
12
13#include <algorithm>
14
15#include <debug.h>
16#include <heap.h>
17#include <kernel.h> // for ROUNDUP
18#include <malloc.h>
19#include <vm/vm.h>
20#include <vm/VMAddressSpace.h>
21
22#include "ObjectCache.h"
23#include "MemoryManager.h"
24
25
26//#define TEST_ALL_CACHES_DURING_BOOT
27
28static const size_t kBlockSizes[] = {
29	16, 24, 32, 48, 64, 80, 96, 112,
30	128, 160, 192, 224, 256, 320, 384, 448,
31	512, 640, 768, 896, 1024, 1280, 1536, 1792,
32	2048, 2560, 3072, 3584, 4096, 4608, 5120, 5632,
33	6144, 6656, 7168, 7680, 8192,
34	0
35};
36
37static const size_t kNumBlockSizes = sizeof(kBlockSizes) / sizeof(size_t) - 1;
38
39static object_cache* sBlockCaches[kNumBlockSizes];
40
41static addr_t sBootStrapMemory = 0;
42static size_t sBootStrapMemorySize = 0;
43static size_t sUsedBootStrapMemory = 0;
44
45
46RANGE_MARKER_FUNCTION_BEGIN(slab_allocator)
47
48
49static int
50size_to_index(size_t size)
51{
52	if (size <= 16)
53		return 0;
54	if (size <= 32)
55		return 1 + (size - 16 - 1) / 8;
56	if (size <= 128)
57		return 3 + (size - 32 - 1) / 16;
58	if (size <= 256)
59		return 9 + (size - 128 - 1) / 32;
60	if (size <= 512)
61		return 13 + (size - 256 - 1) / 64;
62	if (size <= 1024)
63		return 17 + (size - 512 - 1) / 128;
64	if (size <= 2048)
65		return 21 + (size - 1024 - 1) / 256;
66	if (size <= 8192)
67		return 25 + (size - 2048 - 1) / 512;
68
69	return -1;
70}
71
72
73static void*
74block_alloc(size_t size, size_t alignment, uint32 flags)
75{
76	if (alignment > kMinObjectAlignment) {
77		// Make size >= alignment and a power of two. This is sufficient, since
78		// all of our object caches with power of two sizes are aligned. We may
79		// waste quite a bit of memory, but memalign() is very rarely used
80		// in the kernel and always with power of two size == alignment anyway.
81		ASSERT((alignment & (alignment - 1)) == 0);
82		while (alignment < size)
83			alignment <<= 1;
84		size = alignment;
85
86		// If we're not using an object cache, make sure that the memory
87		// manager knows it has to align the allocation.
88		if (size > kBlockSizes[kNumBlockSizes])
89			flags |= CACHE_ALIGN_ON_SIZE;
90	}
91
92	// allocate from the respective object cache, if any
93	int index = size_to_index(size);
94	if (index >= 0)
95		return object_cache_alloc(sBlockCaches[index], flags);
96
97	// the allocation is too large for our object caches -- ask the memory
98	// manager
99	void* block;
100	if (MemoryManager::AllocateRaw(size, flags, block) != B_OK)
101		return NULL;
102
103	return block;
104}
105
106
107void*
108block_alloc_early(size_t size)
109{
110	int index = size_to_index(size);
111	if (index >= 0 && sBlockCaches[index] != NULL)
112		return object_cache_alloc(sBlockCaches[index], CACHE_DURING_BOOT);
113
114	if (size > SLAB_CHUNK_SIZE_SMALL) {
115		// This is a sufficiently large allocation -- just ask the memory
116		// manager directly.
117		void* block;
118		if (MemoryManager::AllocateRaw(size, 0, block) != B_OK)
119			return NULL;
120
121		return block;
122	}
123
124	// A small allocation, but no object cache yet. Use the bootstrap memory.
125	// This allocation must never be freed!
126	if (sBootStrapMemorySize - sUsedBootStrapMemory < size) {
127		// We need more memory.
128		void* block;
129		if (MemoryManager::AllocateRaw(SLAB_CHUNK_SIZE_SMALL, 0, block) != B_OK)
130			return NULL;
131		sBootStrapMemory = (addr_t)block;
132		sBootStrapMemorySize = SLAB_CHUNK_SIZE_SMALL;
133		sUsedBootStrapMemory = 0;
134	}
135
136	size_t neededSize = ROUNDUP(size, sizeof(double));
137	if (sUsedBootStrapMemory + neededSize > sBootStrapMemorySize)
138		return NULL;
139	void* block = (void*)(sBootStrapMemory + sUsedBootStrapMemory);
140	sUsedBootStrapMemory += neededSize;
141
142	return block;
143}
144
145
146static void
147block_free(void* block, uint32 flags)
148{
149	if (block == NULL)
150		return;
151
152	ObjectCache* cache = MemoryManager::FreeRawOrReturnCache(block, flags);
153	if (cache != NULL) {
154		// a regular small allocation
155		ASSERT(cache->object_size >= kBlockSizes[0]);
156		ASSERT(cache->object_size <= kBlockSizes[kNumBlockSizes - 1]);
157		ASSERT(cache == sBlockCaches[size_to_index(cache->object_size)]);
158		object_cache_free(cache, block, flags);
159	}
160}
161
162
163void
164block_allocator_init_boot()
165{
166	for (int index = 0; kBlockSizes[index] != 0; index++) {
167		char name[32];
168		snprintf(name, sizeof(name), "block allocator: %lu",
169			kBlockSizes[index]);
170
171		uint32 flags = CACHE_DURING_BOOT;
172		size_t size = kBlockSizes[index];
173
174		// align the power of two objects to their size
175		size_t alignment = (size & (size - 1)) == 0 ? size : 0;
176
177		// For the larger allocation sizes disable the object depot, so we don't
178		// keep lot's of unused objects around.
179		if (size > 2048)
180			flags |= CACHE_NO_DEPOT;
181
182		sBlockCaches[index] = create_object_cache_etc(name, size, alignment, 0,
183			0, 0, flags, NULL, NULL, NULL, NULL);
184		if (sBlockCaches[index] == NULL)
185			panic("allocator: failed to init block cache");
186	}
187}
188
189
190void
191block_allocator_init_rest()
192{
193#ifdef TEST_ALL_CACHES_DURING_BOOT
194	for (int index = 0; kBlockSizes[index] != 0; index++) {
195		block_free(block_alloc(kBlockSizes[index] - sizeof(boundary_tag)), 0,
196			0);
197	}
198#endif
199}
200
201
202// #pragma mark - public API
203
204
205#if USE_SLAB_ALLOCATOR_FOR_MALLOC
206
207
208void*
209memalign(size_t alignment, size_t size)
210{
211	return block_alloc(size, alignment, 0);
212}
213
214
215void *
216memalign_etc(size_t alignment, size_t size, uint32 flags)
217{
218	return block_alloc(size, alignment, flags & CACHE_ALLOC_FLAGS);
219}
220
221
222int
223posix_memalign(void** _pointer, size_t alignment, size_t size)
224{
225	if ((alignment & (sizeof(void*) - 1)) != 0 || _pointer == NULL)
226		return B_BAD_VALUE;
227	*_pointer = block_alloc(size, alignment, 0);
228	return 0;
229}
230
231
232void
233free_etc(void *address, uint32 flags)
234{
235	if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
236		deferred_free(address);
237		return;
238	}
239
240	block_free(address, flags & CACHE_ALLOC_FLAGS);
241}
242
243
244void*
245malloc(size_t size)
246{
247	return block_alloc(size, 0, 0);
248}
249
250
251void
252free(void* address)
253{
254	block_free(address, 0);
255}
256
257
258void*
259realloc_etc(void* address, size_t newSize, uint32 flags)
260{
261	if (newSize == 0) {
262		block_free(address, flags);
263		return NULL;
264	}
265
266	if (address == NULL)
267		return block_alloc(newSize, 0, flags);
268
269	size_t oldSize;
270	ObjectCache* cache = MemoryManager::GetAllocationInfo(address, oldSize);
271	if (cache == NULL && oldSize == 0) {
272		panic("block_realloc(): allocation %p not known", address);
273		return NULL;
274	}
275
276	if (oldSize == newSize)
277		return address;
278
279	void* newBlock = block_alloc(newSize, 0, flags);
280	if (newBlock == NULL)
281		return NULL;
282
283	memcpy(newBlock, address, std::min(oldSize, newSize));
284
285	block_free(address, flags);
286
287	return newBlock;
288}
289
290
291void*
292realloc(void* address, size_t newSize)
293{
294	return realloc_etc(address, newSize, 0);
295}
296
297
298#endif	// USE_SLAB_ALLOCATOR_FOR_MALLOC
299
300
301RANGE_MARKER_FUNCTION_END(slab_allocator)
302