1/*
2 * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2004-2008, Axel D��rfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
5 */
6
7
8#include <OS.h>
9
10#include <string.h>
11
12#include <algorithm>
13
14#include <kernel.h>
15#include <boot/stage2.h>
16#include <boot/platform.h>
17
18
19static const size_t kChunkSize = 16 * B_PAGE_SIZE;
20
21kernel_args gKernelArgs;
22KMessage gBootVolume;
23
24static void* sFirstFree;
25static void* sLast;
26static size_t sFree = kChunkSize;
27
28
29static status_t
30add_kernel_args_range(void* start, size_t size)
31{
32	return insert_address_range(gKernelArgs.kernel_args_range,
33		&gKernelArgs.num_kernel_args_ranges, MAX_KERNEL_ARGS_RANGE,
34		(addr_t)start, size);
35}
36
37
38// #pragma mark - addr_range utility functions
39
40
41static void
42remove_range_index(addr_range* ranges, uint32& numRanges, uint32 index)
43{
44	if (index + 1 == numRanges) {
45		// remove last range
46		numRanges--;
47		return;
48	}
49
50	memmove(&ranges[index], &ranges[index + 1],
51		sizeof(addr_range) * (numRanges - 1 - index));
52	numRanges--;
53}
54
55
56/*!	Inserts the specified (start, size) pair (aka range) in the
57	addr_range array.
58	It will extend existing ranges in order to have as little
59	ranges in the array as possible.
60	Returns B_OK on success, or B_ENTRY_NOT_FOUND if there was
61	no free array entry available anymore.
62*/
63extern "C" status_t
64insert_address_range(addr_range* ranges, uint32* _numRanges, uint32 maxRanges,
65	uint64 start, uint64 size)
66{
67	uint32 numRanges = *_numRanges;
68
69	start = ROUNDDOWN(start, B_PAGE_SIZE);
70	size = ROUNDUP(size, B_PAGE_SIZE);
71	uint64 end = start + size;
72
73	for (uint32 i = 0; i < numRanges; i++) {
74		uint64 rangeStart = ranges[i].start;
75		uint64 rangeEnd = rangeStart + ranges[i].size;
76
77		if (end < rangeStart || start > rangeEnd) {
78			// ranges don't intersect or touch each other
79			continue;
80		}
81		if (start >= rangeStart && end <= rangeEnd) {
82			// range is already completely covered
83			return B_OK;
84		}
85
86		if (start < rangeStart) {
87			// prepend to the existing range
88			ranges[i].start = start;
89			ranges[i].size += rangeStart - start;
90		}
91		if (end > ranges[i].start + ranges[i].size) {
92			// append to the existing range
93			ranges[i].size = end - ranges[i].start;
94		}
95
96		// join ranges if possible
97
98		for (uint32 j = 0; j < numRanges; j++) {
99			if (i == j)
100				continue;
101
102			rangeStart = ranges[i].start;
103			rangeEnd = rangeStart + ranges[i].size;
104			uint64 joinStart = ranges[j].start;
105			uint64 joinEnd = joinStart + ranges[j].size;
106
107			if (rangeStart <= joinEnd && joinEnd <= rangeEnd) {
108				// join range that used to be before the current one, or
109				// the one that's now entirely included by the current one
110				if (joinStart < rangeStart) {
111					ranges[i].size += rangeStart - joinStart;
112					ranges[i].start = joinStart;
113				}
114
115				remove_range_index(ranges, numRanges, j--);
116			} else if (joinStart <= rangeEnd && joinEnd > rangeEnd) {
117				// join range that used to be after the current one
118				ranges[i].size += joinEnd - rangeEnd;
119
120				remove_range_index(ranges, numRanges, j--);
121			}
122		}
123
124		*_numRanges = numRanges;
125		return B_OK;
126	}
127
128	// no range matched, we need to create a new one
129
130	if (numRanges >= maxRanges)
131		return B_ENTRY_NOT_FOUND;
132
133	ranges[numRanges].start = (uint64)start;
134	ranges[numRanges].size = size;
135	(*_numRanges)++;
136
137	return B_OK;
138}
139
140
141extern "C" status_t
142remove_address_range(addr_range* ranges, uint32* _numRanges, uint32 maxRanges,
143	uint64 start, uint64 size)
144{
145	uint32 numRanges = *_numRanges;
146
147	uint64 end = ROUNDUP(start + size, B_PAGE_SIZE);
148	start = ROUNDDOWN(start, B_PAGE_SIZE);
149
150	for (uint32 i = 0; i < numRanges; i++) {
151		uint64 rangeStart = ranges[i].start;
152		uint64 rangeEnd = rangeStart + ranges[i].size;
153
154		if (start <= rangeStart) {
155			if (end <= rangeStart) {
156				// no intersection
157			} else if (end >= rangeEnd) {
158				// remove the complete range
159				remove_range_index(ranges, numRanges, i);
160				i--;
161			} else {
162				// remove the head of the range
163				ranges[i].start = end;
164				ranges[i].size = rangeEnd - end;
165			}
166		} else if (end >= rangeEnd) {
167			if (start < rangeEnd) {
168				// remove the tail
169				ranges[i].size = start - rangeStart;
170			}	// else: no intersection
171		} else {
172			// rangeStart < start < end < rangeEnd
173			// The ugly case: We have to remove something from the middle of
174			// the range. We keep the head of the range and insert its tail
175			// as a new range.
176			ranges[i].size = start - rangeStart;
177			return insert_address_range(ranges, _numRanges, maxRanges, end,
178				rangeEnd - end);
179		}
180	}
181
182	*_numRanges = numRanges;
183	return B_OK;
184}
185
186
187bool
188get_free_address_range(addr_range* ranges, uint32 numRanges, uint64 base,
189	uint64 size, uint64* _rangeBase)
190{
191	uint64 end = base + size - 1;
192	if (end < base)
193		return false;
194
195	// Note: We don't assume that the ranges are sorted, so we can't do this
196	// in a simple loop. Instead we restart the loop whenever our range
197	// intersects with an existing one.
198
199	for (uint32 i = 0; i < numRanges;) {
200		uint64 rangeStart = ranges[i].start;
201		uint64 rangeEnd = ranges[i].start + ranges[i].size - 1;
202
203		if (base <= rangeEnd && rangeStart <= end) {
204			base = rangeEnd + 1;
205			end = rangeEnd + size;
206			if (base == 0 || end < base)
207				return false;
208
209			i = 0;
210			continue;
211		}
212
213		i++;
214	}
215
216	*_rangeBase = base;
217	return true;
218}
219
220
221bool
222is_address_range_covered(addr_range* ranges, uint32 numRanges, uint64 base,
223	uint64 size)
224{
225	// Note: We don't assume that the ranges are sorted, so we can't do this
226	// in a simple loop. Instead we restart the loop whenever the start of the
227	// given range intersects with an existing one.
228
229	for (uint32 i = 0; i < numRanges;) {
230		uint64 rangeStart = ranges[i].start;
231		uint64 rangeSize = ranges[i].size;
232
233		if (rangeStart <= base && rangeSize > base - rangeStart) {
234			uint64 intersect = std::min(rangeStart + rangeSize - base, size);
235			base += intersect;
236			size -= intersect;
237			if (size == 0)
238				return true;
239
240			i = 0;
241			continue;
242		}
243
244		i++;
245	}
246
247	return false;
248}
249
250
251extern "C" uint64
252total_address_ranges_size(addr_range* ranges, uint32 numRanges)
253{
254	uint64 total = 0;
255	for (uint32 i = 0; i < numRanges; i++)
256		total += ranges[i].size;
257	return total;
258}
259
260
261void
262sort_address_ranges(addr_range* ranges, uint32 numRanges)
263{
264	// TODO: This is a pretty sucky bubble sort implementation!
265	bool done;
266
267	do {
268		done = true;
269		for (uint32 i = 1; i < numRanges; i++) {
270			if (ranges[i].start < ranges[i - 1].start) {
271				done = false;
272				addr_range tempRange;
273				memcpy(&tempRange, &ranges[i], sizeof(addr_range));
274				memcpy(&ranges[i], &ranges[i - 1], sizeof(addr_range));
275				memcpy(&ranges[i - 1], &tempRange, sizeof(addr_range));
276			}
277		}
278	} while (!done);
279}
280
281
282// #pragma mark - kernel args range functions
283
284
285status_t
286insert_physical_memory_range(uint64 start, uint64 size)
287{
288	return insert_address_range(gKernelArgs.physical_memory_range,
289		&gKernelArgs.num_physical_memory_ranges, MAX_PHYSICAL_MEMORY_RANGE,
290		start, size);
291}
292
293
294status_t
295remove_physical_memory_range(uint64 start, uint64 size)
296{
297	return remove_address_range(gKernelArgs.physical_memory_range,
298		&gKernelArgs.num_physical_memory_ranges, MAX_PHYSICAL_MEMORY_RANGE,
299		start, size);
300}
301
302
303uint64
304total_physical_memory()
305{
306	return total_address_ranges_size(gKernelArgs.physical_memory_range,
307		gKernelArgs.num_physical_memory_ranges);
308}
309
310
311status_t
312insert_physical_allocated_range(uint64 start, uint64 size)
313{
314	return insert_address_range(gKernelArgs.physical_allocated_range,
315		&gKernelArgs.num_physical_allocated_ranges,
316		MAX_PHYSICAL_ALLOCATED_RANGE, start, size);
317}
318
319
320status_t
321insert_virtual_allocated_range(uint64 start, uint64 size)
322{
323	return insert_address_range(gKernelArgs.virtual_allocated_range,
324		&gKernelArgs.num_virtual_allocated_ranges, MAX_VIRTUAL_ALLOCATED_RANGE,
325		start, size);
326}
327
328
329#if B_HAIKU_PHYSICAL_BITS > 32
330
331void
332ignore_physical_memory_ranges_beyond_4gb()
333{
334	// sort
335	sort_address_ranges(gKernelArgs.physical_memory_range,
336		gKernelArgs.num_physical_memory_ranges);
337
338	static const uint64 kLimit = (uint64)1 << 32;
339
340	// remove everything past 4 GB
341	for (uint32 i = gKernelArgs.num_physical_memory_ranges; i > 0; i--) {
342		addr_range& range = gKernelArgs.physical_memory_range[i - 1];
343		if (range.start >= kLimit) {
344			// the complete range is beyond the limit
345			dprintf("ignore_physical_memory_ranges_beyond_4gb(): ignoring "
346				"range: %#" B_PRIx64 " - %#" B_PRIx64 "\n", range.start,
347				range.start + range.size);
348			gKernelArgs.ignored_physical_memory += range.size;
349			gKernelArgs.num_physical_memory_ranges = i - 1;
350			continue;
351		}
352
353		if (kLimit - range.start < range.size) {
354			// the range is partially beyond the limit
355			dprintf("ignore_physical_memory_ranges_beyond_4gb(): ignoring "
356				"range: %#" B_PRIx64 " - %#" B_PRIx64 "\n", kLimit,
357				range.start + range.size);
358			gKernelArgs.ignored_physical_memory
359				+= range.size - (kLimit - range.start);
360		}
361
362		break;
363	}
364}
365
366#endif	// B_HAIKU_PHYSICAL_BITS > 32
367
368
369// #pragma mark - kernel_args allocations
370
371
372/*!	Convenience function that copies strdup() functions for the
373	kernel args heap.
374*/
375char*
376kernel_args_strdup(const char* string)
377{
378	if (string == NULL || string[0] == '\0')
379		return NULL;
380
381	size_t length = strlen(string) + 1;
382
383	char* target = (char*)kernel_args_malloc(length);
384	if (target == NULL)
385		return NULL;
386
387	memcpy(target, string, length);
388	return target;
389}
390
391
392/*!	This function can be used to allocate (optionally aligned) memory that
393	is going to be passed over to the kernel. For example, the preloaded_image
394	structures are allocated this way.
395	The boot loader heap doesn't make it into the kernel!
396*/
397void*
398kernel_args_malloc(size_t size, uint8 alignment)
399{
400	//dprintf("kernel_args_malloc(): %ld bytes (%ld bytes left)\n", size, sFree);
401
402	#define ALIGN(addr, align) (((addr) + align - 1) & ~(align - 1))
403	size_t alignedSize = size + alignment - 1;
404
405	if (sFirstFree != NULL && alignedSize <= sFree) {
406		// there is enough space in the current buffer
407		void* address = (void*)ALIGN((addr_t)sFirstFree, alignment);
408		sFirstFree = (void*)((addr_t)sFirstFree + alignedSize);
409		sLast = address;
410		sFree -= alignedSize;
411
412		return address;
413	}
414
415	if (alignedSize > kChunkSize / 2 && sFree < alignedSize) {
416		// the block is so large, we'll allocate a new block for it
417		void* block = NULL;
418		if (platform_allocate_region(&block, alignedSize,
419				B_READ_AREA | B_WRITE_AREA, false) != B_OK) {
420			return NULL;
421		}
422
423		// Translate the address if needed on the current platform
424		addr_t translated_block;
425		platform_bootloader_address_to_kernel_address(block, &translated_block);
426		if (add_kernel_args_range((void *)translated_block, size) != B_OK)
427			panic("kernel_args max range too low!\n");
428
429		return (void*)ALIGN((addr_t)block, alignment);
430	}
431
432	// just allocate a new block and "close" the old one
433	void* block = NULL;
434	if (platform_allocate_region(&block, kChunkSize, B_READ_AREA | B_WRITE_AREA,
435			false) != B_OK) {
436		return NULL;
437	}
438
439	sFirstFree = (void*)((addr_t)block + alignedSize);
440	sLast = block;
441	sFree = kChunkSize - alignedSize;
442
443	// Translate the address if needed on the current platform
444	addr_t translated_block;
445	platform_bootloader_address_to_kernel_address(block, &translated_block);
446	if (add_kernel_args_range((void *)translated_block, kChunkSize) != B_OK)
447		panic("kernel_args max range too low!\n");
448
449	return (void*)ALIGN((addr_t)block, alignment);
450}
451
452
453/*!	This function frees a block allocated via kernel_args_malloc().
454	It's very simple; it can only free the last allocation. It's
455	enough for its current usage in the boot loader, though.
456*/
457void
458kernel_args_free(void* block)
459{
460	if (sLast != block) {
461		// sorry, we're dumb
462		return;
463	}
464
465	sFree = (addr_t)sFirstFree - (addr_t)sLast;
466	sFirstFree = block;
467	sLast = NULL;
468}
469
470